/* * For the moment, the only task for the NFS clear_inode method is to * release the mmap credential */ static void nfs_clear_inode(struct inode *inode) { struct nfs_inode_info *nfsi = NFS_I(inode); struct rpc_cred *cred = nfsi->mm_cred; if (cred) put_rpccred(cred); cred = nfsi->cache_access.cred; if (cred) put_rpccred(cred); }
static void pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo) { struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld; put_rpccred(lo->plh_lc_cred); return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo); }
/** * nfs_free_unlinkdata - release data from a sillydelete operation. * @data: pointer to unlink structure. */ static void nfs_free_unlinkdata(struct nfs_unlinkdata *data) { iput(data->dir); put_rpccred(data->cred); kfree(data->args.name.name); kfree(data); }
/* * For the moment, the only task for the NFS clear_inode method is to * release the mmap credential */ static void nfs_clear_inode(struct inode *inode) { struct rpc_cred *cred = NFS_I(inode)->mm_cred; if (cred) put_rpccred(cred); }
static inline int nfs_direct_write_rpc(struct file *file, struct nfs_writeargs *arg, struct nfs_writeverf *verf) { int result; struct inode *inode = file->f_dentry->d_inode; struct nfs_fattr fattr; struct rpc_message msg; struct nfs_writeres res = { &fattr, verf, 0 }; #ifdef CONFIG_NFS_V3 msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? NFS3PROC_WRITE : NFSPROC_WRITE; #else msg.rpc_proc = NFSPROC_WRITE; #endif msg.rpc_argp = arg; msg.rpc_resp = &res; lock_kernel(); msg.rpc_cred = get_rpccred(nfs_file_cred(file)); fattr.valid = 0; result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_write_attributes(inode, &fattr); put_rpccred(msg.rpc_cred); unlock_kernel(); #ifdef CONFIG_NFS_V3 if (NFS_PROTO(inode)->version == 3) { if (result > 0) { if ((arg->stable == NFS_FILE_SYNC) && (verf->committed != NFS_FILE_SYNC)) { printk(KERN_ERR "%s: server didn't sync stable write request\n", __FUNCTION__); return -EIO; } if (result != arg->count) { printk(KERN_INFO "%s: short write, count=%u, result=%d\n", __FUNCTION__, arg->count, result); } } return result; } else { #endif verf->committed = NFS_FILE_SYNC; /* NFSv2 always syncs data */ if (result == 0) return arg->count; return result; #ifdef CONFIG_NFS_V3 } #endif }
static void nfs4_clear_machine_cred(struct nfs_client *clp) { struct rpc_cred *cred; spin_lock(&clp->cl_lock); cred = clp->cl_machine_cred; clp->cl_machine_cred = NULL; spin_unlock(&clp->cl_lock); if (cred != NULL) put_rpccred(cred); }
void nfs4_renew_state(struct work_struct *work) { const struct nfs4_state_maintenance_ops *ops; struct nfs_client *clp = container_of(work, struct nfs_client, cl_renewd.work); struct rpc_cred *cred; long lease; unsigned long last, now; unsigned renew_flags = 0; ops = clp->cl_mvops->state_renewal_ops; dprintk("%s: start\n", __func__); if (test_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state)) goto out; spin_lock(&clp->cl_lock); lease = clp->cl_lease_time; last = clp->cl_last_renewal; now = jiffies; /* Are we close to a lease timeout? */ if (time_after(now, last + lease/3)) renew_flags |= NFS4_RENEW_TIMEOUT; if (nfs_delegations_present(clp)) renew_flags |= NFS4_RENEW_DELEGATION_CB; if (renew_flags != 0) { cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); goto out; } nfs_expire_all_delegations(clp); } else { /* Queue an asynchronous RENEW. */ ops->sched_state_renewal(clp, cred, renew_flags); put_rpccred(cred); goto out_exp; } } else { dprintk("%s: failed to call renewd. Reason: lease not expired \n", __func__); spin_unlock(&clp->cl_lock); } nfs4_schedule_state_renewal(clp); out_exp: nfs_expire_unreferenced_delegations(clp); out: dprintk("%s: done\n", __func__); }
static bool generic_key_to_expire(struct rpc_cred *cred) { struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; bool ret; get_rpccred(cred); ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); put_rpccred(cred); return ret; }
void nfs4_renew_state(struct work_struct *work) { struct nfs4_state_maintenance_ops *ops; struct nfs_client *clp = container_of(work, struct nfs_client, cl_renewd.work); struct rpc_cred *cred; long lease, timeout; unsigned long last, now; ops = nfs4_state_renewal_ops[clp->cl_minorversion]; dprintk("%s: start\n", __func__); /* Are there any active superblocks? */ if (list_empty(&clp->cl_superblocks)) goto out; spin_lock(&clp->cl_lock); lease = clp->cl_lease_time; last = clp->cl_last_renewal; now = jiffies; timeout = (2 * lease) / 3 + (long)last - (long)now; /* Are we close to a lease timeout? */ if (time_after(now, last + lease/3)) { cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { if (list_empty(&clp->cl_delegations)) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); goto out; } nfs_expire_all_delegations(clp); } else { /* Queue an asynchronous RENEW. */ ops->sched_state_renewal(clp, cred); put_rpccred(cred); } timeout = (2 * lease) / 3; spin_lock(&clp->cl_lock); } else dprintk("%s: failed to call renewd. Reason: lease not expired \n", __func__); if (timeout < 5 * HZ) /* safeguard */ timeout = 5 * HZ; dprintk("%s: requeueing work. Lease period = %ld\n", __func__, (timeout + HZ - 1) / HZ); cancel_delayed_work(&clp->cl_renewd); schedule_delayed_work(&clp->cl_renewd, timeout); spin_unlock(&clp->cl_lock); nfs_expire_unreferenced_delegations(clp); out: dprintk("%s: done\n", __func__); }
/* * called with dp->dl_count inc'ed. * nfs4_lock_state() may or may not have been called. */ void nfsd4_cb_recall(struct nfs4_delegation *dp) { struct nfs4_client *clp = dp->dl_client; struct rpc_clnt *clnt = clp->cl_callback.cb_client; struct nfs4_cb_recall *cbr = &dp->dl_recall; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL], .rpc_argp = cbr, }; int retries = 1; int status = 0; if ((!atomic_read(&clp->cl_callback.cb_set)) || !clnt) return; msg.rpc_cred = nfsd4_lookupcred(clp, 0); if (IS_ERR(msg.rpc_cred)) goto out; cbr->cbr_trunc = 0; /* XXX need to implement truncate optimization */ cbr->cbr_dp = dp; status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT); while (retries--) { switch (status) { case -EIO: /* Network partition? */ case -EBADHANDLE: case -NFS4ERR_BAD_STATEID: /* Race: client probably got cb_recall * before open reply granting delegation */ break; default: goto out_put_cred; } ssleep(2); status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT); } out_put_cred: put_rpccred(msg.rpc_cred); out: if (status == -EIO) atomic_set(&clp->cl_callback.cb_set, 0); /* Success or failure, now we're either waiting for lease expiration * or deleg_return. */ dprintk("NFSD: nfs4_cb_recall: dp %p dl_flock %p dl_count %d\n",dp, dp->dl_flock, atomic_read(&dp->dl_count)); nfs4_put_delegation(dp); return; }
void nfs4_renew_state(struct work_struct *work) { const struct nfs4_state_maintenance_ops *ops; struct nfs_client *clp = container_of(work, struct nfs_client, cl_renewd.work); struct rpc_cred *cred; long lease; unsigned long last, now; ops = clp->cl_mvops->state_renewal_ops; dprintk("%s: start\n", __func__); if (test_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state)) goto out; spin_lock(&clp->cl_lock); lease = clp->cl_lease_time; last = clp->cl_last_renewal; now = jiffies; /* Are we close to a lease timeout? */ if (time_after(now, last + lease/3)) { cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { if (list_empty(&clp->cl_delegations)) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); goto out; } nfs_expire_all_delegations(clp); } else { struct ve_struct *ve; /* Queue an asynchronous RENEW. */ ve = set_exec_env(clp->cl_rpcclient->cl_xprt->owner_env); ops->sched_state_renewal(clp, cred); (void)set_exec_env(ve); put_rpccred(cred); goto out_exp; } } else { dprintk("%s: failed to call renewd. Reason: lease not expired \n", __func__); spin_unlock(&clp->cl_lock); } nfs4_schedule_state_renewal(clp); out_exp: nfs_expire_unreferenced_delegations(clp); out: dprintk("%s: done\n", __func__); }
static void pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo) { struct nfs_server *server = NFS_SERVER(lo->plh_inode); struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; if (!list_empty(&lo->plh_layouts)) { struct nfs_client *clp = server->nfs_client; spin_lock(&clp->cl_lock); list_del_init(&lo->plh_layouts); spin_unlock(&clp->cl_lock); } put_rpccred(lo->plh_lc_cred); return ld->free_layout_hdr(lo); }
/** * nfs_async_rename_release - Release the sillyrename data. * @calldata: the struct nfs_renamedata to be released */ static void nfs_async_rename_release(void *calldata) { struct nfs_renamedata *data = calldata; struct super_block *sb = data->old_dir->i_sb; if (data->old_dentry->d_inode) nfs_mark_for_revalidate(data->old_dentry->d_inode); dput(data->old_dentry); dput(data->new_dentry); iput(data->old_dir); iput(data->new_dir); nfs_sb_deactive(sb); put_rpccred(data->cred); kfree(data); }
void nfs4_renew_state(void *data) { struct nfs4_client *clp = (struct nfs4_client *)data; struct rpc_cred *cred; long lease, timeout; unsigned long last, now; down_read(&clp->cl_sem); dprintk("%s: start\n", __FUNCTION__); /* Are there any active superblocks? */ if (list_empty(&clp->cl_superblocks)) goto out; spin_lock(&clp->cl_lock); lease = clp->cl_lease_time; last = clp->cl_last_renewal; now = jiffies; timeout = (2 * lease) / 3 + (long)last - (long)now; /* Are we close to a lease timeout? */ if (time_after(now, last + lease/3)) { cred = nfs4_get_renew_cred(clp); if (cred == NULL) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); spin_unlock(&clp->cl_lock); nfs_expire_all_delegations(clp); goto out; } spin_unlock(&clp->cl_lock); /* Queue an asynchronous RENEW. */ nfs4_proc_async_renew(clp, cred); put_rpccred(cred); timeout = (2 * lease) / 3; spin_lock(&clp->cl_lock); } else dprintk("%s: failed to call renewd. Reason: lease not expired \n", __FUNCTION__); if (timeout < 5 * HZ) /* safeguard */ timeout = 5 * HZ; dprintk("%s: requeueing work. Lease period = %ld\n", __FUNCTION__, (timeout + HZ - 1) / HZ); cancel_delayed_work(&clp->cl_renewd); schedule_delayed_work(&clp->cl_renewd, timeout); spin_unlock(&clp->cl_lock); out: up_read(&clp->cl_sem); dprintk("%s: done\n", __FUNCTION__); }
/* * Test the the current time (now) against the underlying credential key expiry * minus a timeout and setup notification. * * The normal case: * If 'now' is before the key expiry minus RPC_KEY_EXPIRE_TIMEO, set * the RPC_CRED_NOTIFY_TIMEOUT flag to setup the underlying credential * rpc_credops crmatch routine to notify this generic cred when it's key * expiration is within RPC_KEY_EXPIRE_TIMEO, and return 0. * * The error case: * If the underlying cred lookup fails, return -EACCES. * * The 'almost' error case: * If 'now' is within key expiry minus RPC_KEY_EXPIRE_TIMEO, but not within * key expiry minus RPC_KEY_EXPIRE_FAIL, set the RPC_CRED_EXPIRE_SOON bit * on the acred ac_flags and return 0. */ static int generic_key_timeout(struct rpc_auth *auth, struct rpc_cred *cred) { struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; struct rpc_cred *tcred; int ret = 0; /* Fast track for non crkey_timeout (no key) underlying credentials */ if (test_bit(RPC_CRED_NO_CRKEY_TIMEOUT, &acred->ac_flags)) return 0; /* Fast track for the normal case */ if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags)) return 0; /* lookup_cred either returns a valid referenced rpc_cred, or PTR_ERR */ tcred = auth->au_ops->lookup_cred(auth, acred, 0); if (IS_ERR(tcred)) return -EACCES; if (!tcred->cr_ops->crkey_timeout) { set_bit(RPC_CRED_NO_CRKEY_TIMEOUT, &acred->ac_flags); ret = 0; goto out_put; } /* Test for the almost error case */ ret = tcred->cr_ops->crkey_timeout(tcred); if (ret != 0) { set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); ret = 0; } else { /* In case underlying cred key has been reset */ if (test_and_clear_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags)) dprintk("RPC: UID %d Credential key reset\n", from_kuid(&init_user_ns, tcred->cr_uid)); /* set up fasttrack for the normal case */ set_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags); } out_put: put_rpccred(tcred); return ret; }
void nfs4_destroy_session(struct nfs4_session *session) { struct rpc_xprt *xprt; struct rpc_cred *cred; cred = nfs4_get_clid_cred(session->clp); nfs4_proc_destroy_session(session, cred); if (cred) put_rpccred(cred); rcu_read_lock(); xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); rcu_read_unlock(); dprintk("%s Destroy backchannel for xprt %p\n", __func__, xprt); xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); nfs4_destroy_session_slot_tables(session); kfree(session); }
/* * Set up the callback client and put a NFSPROC4_CB_NULL on the wire... */ void nfsd4_probe_callback(struct nfs4_client *clp) { struct sockaddr_in addr, saddr; struct nfs4_callback *cb = &clp->cl_callback; struct rpc_timeout timeparms; struct rpc_xprt * xprt; struct rpc_program * program = &cb->cb_program; struct rpc_stat * stat = &cb->cb_stat; struct rpc_clnt * clnt; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], .rpc_argp = clp, }; char hostname[32]; int status; if (atomic_read(&cb->cb_set)) return; /* Initialize address */ memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_port = htons(cb->cb_port); addr.sin_addr.s_addr = htonl(cb->cb_addr); /* Initialize timeout */ timeparms.to_initval = (NFSD_LEASE_TIME/4) * HZ; timeparms.to_retries = 0; timeparms.to_maxval = (NFSD_LEASE_TIME/2) * HZ; timeparms.to_exponential = 1; /* Create RPC transport */ xprt = xprt_create_proto(IPPROTO_TCP, &addr, &timeparms); if (IS_ERR(xprt)) { dprintk("NFSD: couldn't create callback transport!\n"); goto out_err; } /* Initialize rpc_program */ program->name = "nfs4_cb"; program->number = cb->cb_prog; program->nrvers = ARRAY_SIZE(nfs_cb_version); program->version = nfs_cb_version; program->stats = stat; /* Initialize rpc_stat */ memset(stat, 0, sizeof(struct rpc_stat)); stat->program = program; /* Create RPC client * * XXX AUTH_UNIX only - need AUTH_GSS.... */ sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(addr.sin_addr.s_addr)); clnt = rpc_new_client(xprt, hostname, program, 1, RPC_AUTH_UNIX); if (IS_ERR(clnt)) { dprintk("NFSD: couldn't create callback client\n"); goto out_err; } clnt->cl_intr = 0; clnt->cl_softrtry = 1; /* Set source address */ if (cb->cb_saddr){ memset(&saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = cb->cb_saddr; xprt->srcaddr = saddr; } /* Kick rpciod, put the call on the wire. */ if (rpciod_up() != 0) { dprintk("nfsd: couldn't start rpciod for callbacks!\n"); goto out_clnt; } cb->cb_client = clnt; /* the task holds a reference to the nfs4_client struct */ atomic_inc(&clp->cl_count); msg.rpc_cred = nfsd4_lookupcred(clp,0); if (IS_ERR(msg.rpc_cred)) goto out_rpciod; status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL); put_rpccred(msg.rpc_cred); if (status != 0) { dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n"); goto out_rpciod; } return; out_rpciod: atomic_dec(&clp->cl_count); rpciod_down(); cb->cb_client = NULL; out_clnt: rpc_shutdown_client(clnt); out_err: dprintk("NFSD: warning: no callback path to client %.*s\n", (int)clp->cl_name.len, clp->cl_name.data); }
int nfs_permission(struct inode *inode, int mask, struct nameidata *nd) { struct nfs_access_cache *cache = &NFS_I(inode)->cache_access; struct rpc_cred *cred; int mode = inode->i_mode; int res; if (mask == 0) return 0; if (mask & MAY_WRITE) { /* * * Nobody gets write access to a read-only fs. * */ if (IS_RDONLY(inode) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; /* * * Nobody gets write access to an immutable file. * */ if (IS_IMMUTABLE(inode)) return -EACCES; } /* Are we checking permissions on anything other than lookup/execute? */ if ((mask & MAY_EXEC) == 0) { /* We only need to check permissions on file open() and access() */ if (!nd || !(nd->flags & (LOOKUP_OPEN|LOOKUP_ACCESS))) return 0; /* NFSv4 has atomic_open... */ if (NFS_PROTO(inode)->version > 3 && (nd->flags & LOOKUP_OPEN)) return 0; } lock_kernel(); if (!NFS_PROTO(inode)->access) goto out_notsup; cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0); if (cache->cred == cred && time_before(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode)) && !(NFS_FLAGS(inode) & NFS_INO_INVALID_ATTR)) { if (!(res = cache->err)) { /* Is the mask a subset of an accepted mask? */ if ((cache->mask & mask) == mask) goto out; } else { /* ...or is it a superset of a rejected mask? */ if ((cache->mask & mask) == cache->mask) goto out; } } res = NFS_PROTO(inode)->access(inode, cred, mask); if (!res || res == -EACCES) goto add_cache; out: put_rpccred(cred); unlock_kernel(); return res; out_notsup: nfs_revalidate_inode(NFS_SERVER(inode), inode); res = vfs_permission(inode, mask); unlock_kernel(); return res; add_cache: cache->jiffies = jiffies; if (cache->cred) put_rpccred(cache->cred); cache->cred = cred; cache->mask = mask; cache->err = res; unlock_kernel(); return res; }
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data) { struct rpc_message msg = { .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; struct rpc_task_setup task_setup_data = { .rpc_message = &msg, .callback_ops = &nfs_unlink_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; struct dentry *alias; alias = d_lookup(parent, &data->args.name); if (alias != NULL) { int ret = 0; /* * Hey, we raced with lookup... See if we need to transfer * the sillyrename information to the aliased dentry. */ nfs_free_dname(data); spin_lock(&alias->d_lock); if (alias->d_inode != NULL && !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { alias->d_fsdata = data; alias->d_flags |= DCACHE_NFSFS_RENAMED; ret = 1; } spin_unlock(&alias->d_lock); nfs_dec_sillycount(dir); dput(alias); return ret; } data->dir = igrab(dir); if (!data->dir) { nfs_dec_sillycount(dir); return 0; } nfs_sb_active(dir->i_sb); data->args.fh = NFS_FH(dir); nfs_fattr_init(data->res.dir_attr); NFS_PROTO(dir)->unlink_setup(&msg, dir); task_setup_data.rpc_client = NFS_CLIENT(dir); task = rpc_run_task(&task_setup_data); if (!IS_ERR(task)) rpc_put_task(task); return 1; } static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) { struct dentry *parent; struct inode *dir; int ret = 0; parent = dget_parent(dentry); if (parent == NULL) goto out_free; dir = parent->d_inode; if (nfs_copy_dname(dentry, data) != 0) goto out_dput; /* Non-exclusive lock protects against concurrent lookup() calls */ spin_lock(&dir->i_lock); if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) { /* Deferred delete */ hlist_add_head(&data->list, &NFS_I(dir)->silly_list); spin_unlock(&dir->i_lock); ret = 1; goto out_dput; } spin_unlock(&dir->i_lock); ret = nfs_do_call_unlink(parent, dir, data); out_dput: dput(parent); out_free: return ret; } void nfs_block_sillyrename(struct dentry *dentry) { struct nfs_inode *nfsi = NFS_I(dentry->d_inode); wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1); } void nfs_unblock_sillyrename(struct dentry *dentry) { struct inode *dir = dentry->d_inode; struct nfs_inode *nfsi = NFS_I(dir); struct nfs_unlinkdata *data; atomic_inc(&nfsi->silly_count); spin_lock(&dir->i_lock); while (!hlist_empty(&nfsi->silly_list)) { if (!atomic_inc_not_zero(&nfsi->silly_count)) break; data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list); hlist_del(&data->list); spin_unlock(&dir->i_lock); if (nfs_do_call_unlink(dentry, dir, data) == 0) nfs_free_unlinkdata(data); spin_lock(&dir->i_lock); } spin_unlock(&dir->i_lock); } int nfs_async_unlink(struct inode *dir, struct dentry *dentry) { struct nfs_unlinkdata *data; int status = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) goto out; data->cred = rpc_lookup_cred(); if (IS_ERR(data->cred)) { status = PTR_ERR(data->cred); goto out_free; } data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; data->res.dir_attr = &data->dir_attr; status = -EBUSY; spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto out_unlock; dentry->d_flags |= DCACHE_NFSFS_RENAMED; dentry->d_fsdata = data; spin_unlock(&dentry->d_lock); return 0; out_unlock: spin_unlock(&dentry->d_lock); put_rpccred(data->cred); out_free: kfree(data); out: return status; } void nfs_complete_unlink(struct dentry *dentry, struct inode *inode) { struct nfs_unlinkdata *data = NULL; spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; data = dentry->d_fsdata; } spin_unlock(&dentry->d_lock); if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data))) nfs_free_unlinkdata(data); }
/* * Write a page synchronously. * Offset is the data offset within the page. */ static int nfs_writepage_sync(struct file *file, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { struct rpc_cred *cred = NULL; loff_t base; unsigned int wsize = NFS_SERVER(inode)->wsize; int result, refresh = 0, written = 0, flags; u8 *buffer; struct nfs_fattr fattr; struct nfs_writeverf verf; if (file) cred = get_rpccred(nfs_file_cred(file)); if (!cred) cred = get_rpccred(NFS_I(inode)->mm_cred); dprintk("NFS: nfs_writepage_sync(%x/%Ld %d@%Ld)\n", inode->i_dev, (long long)NFS_FILEID(inode), count, (long long)(page_offset(page) + offset)); buffer = kmap(page) + offset; base = page_offset(page) + offset; flags = ((IS_SWAPFILE(inode)) ? NFS_RW_SWAP : 0) | NFS_RW_SYNC; do { if (count < wsize && !IS_SWAPFILE(inode)) wsize = count; result = NFS_PROTO(inode)->write(inode, cred, &fattr, flags, base, wsize, buffer, &verf); nfs_write_attributes(inode, &fattr); if (result < 0) { /* Must mark the page invalid after I/O error */ ClearPageUptodate(page); goto io_error; } if (result != wsize) printk("NFS: short write, wsize=%u, result=%d\n", wsize, result); refresh = 1; buffer += wsize; base += wsize; written += wsize; count -= wsize; /* * If we've extended the file, update the inode * now so we don't invalidate the cache. */ if (base > inode->i_size) inode->i_size = base; } while (count); if (PageError(page)) ClearPageError(page); io_error: kunmap(page); if (cred) put_rpccred(cred); return written? written : result; }
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { struct nfs4_setclientid_res clid = { .clientid = clp->cl_clientid, .confirm = clp->cl_confirm, }; unsigned short port; int status; if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) goto do_confirm; port = nfs_callback_tcpport; if (clp->cl_addr.ss_family == AF_INET6) port = nfs_callback_tcpport6; status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); if (status != 0) goto out; clp->cl_clientid = clid.clientid; clp->cl_confirm = clid.confirm; set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); do_confirm: status = nfs4_proc_setclientid_confirm(clp, &clid, cred); if (status != 0) goto out; clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); nfs4_schedule_state_renewal(clp); out: return status; } struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp) { struct rpc_cred *cred = NULL; if (clp->cl_machine_cred != NULL) cred = get_rpccred(clp->cl_machine_cred); return cred; } static void nfs4_clear_machine_cred(struct nfs_client *clp) { struct rpc_cred *cred; spin_lock(&clp->cl_lock); cred = clp->cl_machine_cred; clp->cl_machine_cred = NULL; spin_unlock(&clp->cl_lock); if (cred != NULL) put_rpccred(cred); } struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) { struct nfs4_state_owner *sp; struct rb_node *pos; struct rpc_cred *cred = NULL; /* Use machine credentials if available */ cred = nfs4_get_machine_cred_locked(clp); if (cred != NULL) goto out; for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); if (list_empty(&sp->so_states)) continue; cred = get_rpccred(sp->so_cred); break; } out: return cred; } #if defined(CONFIG_NFS_V4_1) static int nfs41_setup_state_renewal(struct nfs_client *clp) { int status; struct nfs_fsinfo fsinfo; if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { nfs4_schedule_state_renewal(clp); return 0; } status = nfs4_proc_get_lease_time(clp, &fsinfo); if (status == 0) { /* Update lease time and schedule renewal */ spin_lock(&clp->cl_lock); clp->cl_lease_time = fsinfo.lease_time * HZ; clp->cl_last_renewal = jiffies; spin_unlock(&clp->cl_lock); nfs4_schedule_state_renewal(clp); } return status; }
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data) { struct rpc_message msg = { .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; struct rpc_task_setup task_setup_data = { .rpc_message = &msg, .callback_ops = &nfs_unlink_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; struct dentry *alias; alias = d_lookup(parent, &data->args.name); if (alias != NULL) { int ret; void *devname_garbage = NULL; nfs_free_dname(data); ret = nfs_copy_dname(alias, data); spin_lock(&alias->d_lock); if (ret == 0 && alias->d_inode != NULL && !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { devname_garbage = alias->d_fsdata; alias->d_fsdata = data; alias->d_flags |= DCACHE_NFSFS_RENAMED; ret = 1; } else ret = 0; spin_unlock(&alias->d_lock); nfs_dec_sillycount(dir); dput(alias); kfree(devname_garbage); return ret; } data->dir = igrab(dir); if (!data->dir) { nfs_dec_sillycount(dir); return 0; } nfs_sb_active(dir->i_sb); data->args.fh = NFS_FH(dir); nfs_fattr_init(data->res.dir_attr); NFS_PROTO(dir)->unlink_setup(&msg, dir); task_setup_data.rpc_client = NFS_CLIENT(dir); task = rpc_run_task(&task_setup_data); if (!IS_ERR(task)) rpc_put_task_async(task); return 1; } static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) { struct dentry *parent; struct inode *dir; int ret = 0; parent = dget_parent(dentry); if (parent == NULL) goto out_free; dir = parent->d_inode; spin_lock(&dir->i_lock); if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) { hlist_add_head(&data->list, &NFS_I(dir)->silly_list); spin_unlock(&dir->i_lock); ret = 1; goto out_dput; } spin_unlock(&dir->i_lock); ret = nfs_do_call_unlink(parent, dir, data); out_dput: dput(parent); out_free: return ret; } void nfs_block_sillyrename(struct dentry *dentry) { struct nfs_inode *nfsi = NFS_I(dentry->d_inode); wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1); } void nfs_unblock_sillyrename(struct dentry *dentry) { struct inode *dir = dentry->d_inode; struct nfs_inode *nfsi = NFS_I(dir); struct nfs_unlinkdata *data; atomic_inc(&nfsi->silly_count); spin_lock(&dir->i_lock); while (!hlist_empty(&nfsi->silly_list)) { if (!atomic_inc_not_zero(&nfsi->silly_count)) break; data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list); hlist_del(&data->list); spin_unlock(&dir->i_lock); if (nfs_do_call_unlink(dentry, dir, data) == 0) nfs_free_unlinkdata(data); spin_lock(&dir->i_lock); } spin_unlock(&dir->i_lock); } static int nfs_async_unlink(struct inode *dir, struct dentry *dentry) { struct nfs_unlinkdata *data; int status = -ENOMEM; void *devname_garbage = NULL; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) goto out; data->cred = rpc_lookup_cred(); if (IS_ERR(data->cred)) { status = PTR_ERR(data->cred); goto out_free; } data->res.dir_attr = &data->dir_attr; status = -EBUSY; spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto out_unlock; dentry->d_flags |= DCACHE_NFSFS_RENAMED; devname_garbage = dentry->d_fsdata; dentry->d_fsdata = data; spin_unlock(&dentry->d_lock); if (devname_garbage) kfree(devname_garbage); return 0; out_unlock: spin_unlock(&dentry->d_lock); put_rpccred(data->cred); out_free: kfree(data); out: return status; } void nfs_complete_unlink(struct dentry *dentry, struct inode *inode) { struct nfs_unlinkdata *data = NULL; spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; data = dentry->d_fsdata; dentry->d_fsdata = NULL; } spin_unlock(&dentry->d_lock); if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data))) nfs_free_unlinkdata(data); } static void nfs_cancel_async_unlink(struct dentry *dentry) { spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { struct nfs_unlinkdata *data = dentry->d_fsdata; dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; dentry->d_fsdata = NULL; spin_unlock(&dentry->d_lock); nfs_free_unlinkdata(data); return; } spin_unlock(&dentry->d_lock); }
int nfs_permission(struct inode *inode, int mask) { struct nfs_server *server = NFS_SERVER(inode); struct nfs_access_cache *cache = &NFS_I(inode)->cache_access; struct rpc_cred *cred; int mode = inode->i_mode; int error; if (mask & MAY_WRITE) { /* * * Nobody gets write access to a read-only fs. * */ if (IS_RDONLY(inode) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; /* * * Nobody gets write access to an immutable file. * */ if (IS_IMMUTABLE(inode)) return -EACCES; } if ((server->flags & NFS_MOUNT_NOACL) || !NFS_PROTO(inode)->access) goto out_notsup; cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0); if (cache->cred == cred && time_before(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode))) { if (!cache->err) { /* Is the mask a subset of an accepted mask? */ if ((cache->mask & mask) == mask) goto out_cached; } else { /* ...or is it a superset of a rejected mask? */ if ((cache->mask & mask) == cache->mask) goto out_cached; } } error = NFS_PROTO(inode)->access(inode, cred, mask); if (!error || error == -EACCES) { cache->jiffies = jiffies; if (cache->cred) put_rpccred(cache->cred); cache->cred = cred; cache->mask = mask; cache->err = error; return error; } put_rpccred(cred); out_notsup: nfs_revalidate_inode(NFS_SERVER(inode), inode); return vfs_permission(inode, mask); out_cached: put_rpccred(cred); return cache->err; }
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data) { struct rpc_message msg = { .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; struct rpc_task_setup task_setup_data = { .rpc_message = &msg, .callback_ops = &nfs_unlink_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; struct dentry *alias; alias = d_lookup(parent, &data->args.name); if (alias != NULL) { int ret; void *devname_garbage = NULL; /* * Hey, we raced with lookup... See if we need to transfer * the sillyrename information to the aliased dentry. */ nfs_free_dname(data); ret = nfs_copy_dname(alias, data); spin_lock(&alias->d_lock); if (ret == 0 && alias->d_inode != NULL && !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { devname_garbage = alias->d_fsdata; alias->d_fsdata = data; alias->d_flags |= DCACHE_NFSFS_RENAMED; ret = 1; } else ret = 0; spin_unlock(&alias->d_lock); nfs_dec_sillycount(dir); dput(alias); /* * If we'd displaced old cached devname, free it. At that * point dentry is definitely not a root, so we won't need * that anymore. */ kfree(devname_garbage); return ret; } data->dir = igrab(dir); if (!data->dir) { nfs_dec_sillycount(dir); return 0; } nfs_sb_active(dir->i_sb); data->args.fh = NFS_FH(dir); nfs_fattr_init(data->res.dir_attr); NFS_PROTO(dir)->unlink_setup(&msg, dir); task_setup_data.rpc_client = NFS_CLIENT(dir); task = rpc_run_task(&task_setup_data); if (!IS_ERR(task)) rpc_put_task_async(task); return 1; } static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) { struct dentry *parent; struct inode *dir; int ret = 0; parent = dget_parent(dentry); if (parent == NULL) goto out_free; dir = parent->d_inode; /* Non-exclusive lock protects against concurrent lookup() calls */ spin_lock(&dir->i_lock); if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) { /* Deferred delete */ hlist_add_head(&data->list, &NFS_I(dir)->silly_list); spin_unlock(&dir->i_lock); ret = 1; goto out_dput; } spin_unlock(&dir->i_lock); ret = nfs_do_call_unlink(parent, dir, data); out_dput: dput(parent); out_free: return ret; } void nfs_block_sillyrename(struct dentry *dentry) { struct nfs_inode *nfsi = NFS_I(dentry->d_inode); wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1); } void nfs_unblock_sillyrename(struct dentry *dentry) { struct inode *dir = dentry->d_inode; struct nfs_inode *nfsi = NFS_I(dir); struct nfs_unlinkdata *data; atomic_inc(&nfsi->silly_count); spin_lock(&dir->i_lock); while (!hlist_empty(&nfsi->silly_list)) { if (!atomic_inc_not_zero(&nfsi->silly_count)) break; data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list); hlist_del(&data->list); spin_unlock(&dir->i_lock); if (nfs_do_call_unlink(dentry, dir, data) == 0) nfs_free_unlinkdata(data); spin_lock(&dir->i_lock); } spin_unlock(&dir->i_lock); } /** * nfs_async_unlink - asynchronous unlinking of a file * @dir: parent directory of dentry * @dentry: dentry to unlink */ static int nfs_async_unlink(struct inode *dir, struct dentry *dentry) { struct nfs_unlinkdata *data; int status = -ENOMEM; void *devname_garbage = NULL; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) goto out; data->cred = rpc_lookup_cred(); if (IS_ERR(data->cred)) { status = PTR_ERR(data->cred); goto out_free; } data->res.dir_attr = &data->dir_attr; status = -EBUSY; spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto out_unlock; dentry->d_flags |= DCACHE_NFSFS_RENAMED; devname_garbage = dentry->d_fsdata; dentry->d_fsdata = data; spin_unlock(&dentry->d_lock); /* * If we'd displaced old cached devname, free it. At that * point dentry is definitely not a root, so we won't need * that anymore. */ if (devname_garbage) kfree(devname_garbage); return 0; out_unlock: spin_unlock(&dentry->d_lock); put_rpccred(data->cred); out_free: kfree(data); out: return status; } /** * nfs_complete_unlink - Initialize completion of the sillydelete * @dentry: dentry to delete * @inode: inode * * Since we're most likely to be called by dentry_iput(), we * only use the dentry to find the sillydelete. We then copy the name * into the qstr. */ void nfs_complete_unlink(struct dentry *dentry, struct inode *inode) { struct nfs_unlinkdata *data = NULL; spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; data = dentry->d_fsdata; dentry->d_fsdata = NULL; } spin_unlock(&dentry->d_lock); if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data))) nfs_free_unlinkdata(data); } /* Cancel a queued async unlink. Called when a sillyrename run fails. */ static void nfs_cancel_async_unlink(struct dentry *dentry) { spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { struct nfs_unlinkdata *data = dentry->d_fsdata; dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; dentry->d_fsdata = NULL; spin_unlock(&dentry->d_lock); nfs_free_unlinkdata(data); return; } spin_unlock(&dentry->d_lock); }