Пример #1
0
/*
 * 1.	Reserve an RPC call slot
 */
static void
call_reserve(struct rpc_task *task)
{
	struct rpc_clnt	*clnt = task->tk_client;

	dprintk("RPC: %4d call_reserve\n", task->tk_pid);
	if (!clnt->cl_port) {
		printk(KERN_NOTICE "%s: couldn't bind to server %s - %s.\n",
			clnt->cl_protname, clnt->cl_server,
			clnt->cl_softrtry? "giving up" : "retrying");
		if (!clnt->cl_softrtry) {
			rpc_delay(task, 5*HZ);
			return;
		}
		rpc_exit(task, -EIO);
		return;
	}
	if (!rpcauth_uptodatecred(task)) {
		task->tk_action = call_refresh;
		return;
	}
	task->tk_action  = call_reserveresult;
	task->tk_timeout = clnt->cl_timeout.to_resrvval;
	task->tk_status  = 0;
	clnt->cl_stats->rpccnt++;
	xprt_reserve(task);
}
Пример #2
0
/*
 * 2.	Allocate the buffer. For details, see sched.c:rpc_malloc.
 *	(Note: buffer memory is freed in rpc_task_release).
 */
static void
call_allocate(struct rpc_task *task)
{
	struct rpc_clnt	*clnt = task->tk_client;
	unsigned int	bufsiz;

	dprintk("RPC: %4d call_allocate (status %d)\n", 
				task->tk_pid, task->tk_status);
	task->tk_action = call_encode;
	if (task->tk_buffer)
		return;

	/* FIXME: compute buffer requirements more exactly using
	 * auth->au_wslack */
	bufsiz = rpcproc_bufsiz(clnt, task->tk_proc) + RPC_SLACK_SPACE;

	if ((task->tk_buffer = rpc_malloc(task, bufsiz)) != NULL)
		return;
	printk("RPC: buffer allocation failed for task %p\n", task); 

	if (!signalled()) {
		xprt_release(task);
		task->tk_action = call_reserve;
		rpc_delay(task, HZ);
		return;
	}

	rpc_exit(task, -ERESTARTSYS);
}
Пример #3
0
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
	struct nfs4_delegation *dp = calldata;
	struct nfs4_client *clp = dp->dl_client;

	nfsd4_cb_done(task, calldata);

	switch (task->tk_status) {
	case -EIO:
		/* Network partition? */
		atomic_set(&clp->cl_cb_conn.cb_set, 0);
		warn_no_callback_path(clp, task->tk_status);
	case -EBADHANDLE:
	case -NFS4ERR_BAD_STATEID:
		/* Race: client probably got cb_recall
		 * before open reply granting delegation */
		break;
	default:
		/* success, or error we can't handle */
		goto done;
	}
	if (dp->dl_retries--) {
		rpc_delay(task, 2*HZ);
		task->tk_status = 0;
		rpc_restart_call(task);
		return;
	} else {
		atomic_set(&clp->cl_cb_conn.cb_set, 0);
		warn_no_callback_path(clp, task->tk_status);
	}
done:
	kfree(task->tk_msg.rpc_argp);
}
static void
nlmclnt_unlock_callback(struct rpc_task *task)
{
	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
	int		status = req->a_res.status;

	if (RPC_ASSASSINATED(task))
		goto die;

	if (task->tk_status < 0) {
		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
		goto retry_rebind;
	}
	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
		rpc_delay(task, NLMCLNT_GRACE_WAIT);
		goto retry_unlock;
	}
	if (status != NLM_LCK_GRANTED)
		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);

die:
	nlm_release_host(req->a_host);
	kfree(req);
	return;
 retry_rebind:
	nlm_rebind_host(req->a_host);
 retry_unlock:
	rpc_restart_call(task);
}
Пример #5
0
static int
nfs_async_handle_expired_key(struct rpc_task *task)
{
	if (task->tk_status != -EKEYEXPIRED)
		return 0;
	task->tk_status = 0;
	rpc_restart_call(task);
	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
	return 1;
}
Пример #6
0
static int
nfs3_async_handle_jukebox(struct rpc_task *task)
{
	if (task->tk_status != -EJUKEBOX)
		return 0;
	task->tk_status = 0;
	rpc_restart_call(task);
	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
	return 1;
}
Пример #7
0
static int
nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)
{
	if (task->tk_status != -EJUKEBOX)
		return 0;
	nfs_inc_stats(inode, NFSIOS_DELAY);
	task->tk_status = 0;
	rpc_restart_call(task);
	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
	return 1;
}
Пример #8
0
/*
 * 6.	Sort out the RPC call status
 */
static void
call_status(struct rpc_task *task)
{
	struct rpc_clnt	*clnt = task->tk_client;
	struct rpc_rqst	*req;
	int		status = task->tk_status;

	dprintk("RPC: %4d call_status (status %d)\n", 
				task->tk_pid, task->tk_status);

	if (status >= 0) {
		task->tk_action = call_decode;
	} else if (status == -ETIMEDOUT) {
		task->tk_action = call_timeout;
	} else if (status == -EAGAIN) {
		rpc_remove_wait_queue(task); /* remove from xprt_pending */
		if (!(req = task->tk_rqstp))
			task->tk_action = call_reserve;
		else if (!task->tk_buffer)
			task->tk_action = call_allocate;
		else if (req->rq_damaged)
			task->tk_action = call_encode;
		else
			task->tk_action = call_transmit;
	} else if (status == -ENOTCONN) {
		rpc_remove_wait_queue(task); /* remove from xprt_pending */
		task->tk_action = call_reconnect;
	} else if (status == -ECONNREFUSED && clnt->cl_autobind) {
		rpc_remove_wait_queue(task); /* remove from xprt_pending */
		task->tk_action = call_bind;
		clnt->cl_port = 0;
	} else if (status == -ECONNREFUSED && clnt->cl_to_err &&
		   !clnt->cl_softrtry) {
		/* NFS should not return on -ECONNREFUSED
		 * if it is hard mounted.
		 */
		req = task->tk_rqstp;
		rpc_remove_wait_queue(task); /* remove from xprt_pending */
		rpc_delay(task, req->rq_timeout.to_current);
		task->tk_action = call_timeout;
	} else {
		rpc_remove_wait_queue(task);
		if (clnt->cl_chatty)
			printk("%s: RPC call returned error %d\n",
				clnt->cl_protname, -status);
		task->tk_action = NULL;
	}
}
Пример #9
0
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
	struct nfsd4_callback *cb = calldata;
	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
	struct nfs4_client *clp = dp->dl_client;
	struct rpc_clnt *current_rpc_client = clp->cl_cb_client;

	nfsd4_cb_done(task, calldata);

	if (current_rpc_client == NULL) {
		/* We're shutting down; give up. */
		/* XXX: err, or is it ok just to fall through
		 * and rpc_restart_call? */
		return;
	}

	switch (task->tk_status) {
	case 0:
		return;
	case -EBADHANDLE:
	case -NFS4ERR_BAD_STATEID:
		/* Race: client probably got cb_recall
		 * before open reply granting delegation */
		break;
	default:
		/* Network partition? */
		atomic_set(&clp->cl_cb_set, 0);
		warn_no_callback_path(clp, task->tk_status);
		if (current_rpc_client != task->tk_client) {
			/* queue a callback on the new connection: */
			atomic_inc(&dp->dl_count);
			nfsd4_cb_recall(dp);
			return;
		}
	}
	if (dp->dl_retries--) {
		rpc_delay(task, 2*HZ);
		task->tk_status = 0;
		rpc_restart_call_prepare(task);
		return;
	} else {
		atomic_set(&clp->cl_cb_set, 0);
		warn_no_callback_path(clp, task->tk_status);
	}
}
Пример #10
0
static void
nlmclnt_cancel_callback(struct rpc_task *task)
{
	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;

	if (RPC_ASSASSINATED(task))
		goto die;

	if (task->tk_status < 0) {
		dprintk("lockd: CANCEL call error %d, retrying.\n",
					task->tk_status);
		goto retry_cancel;
	}

	dprintk("lockd: cancel status %d (task %d)\n",
			req->a_res.status, task->tk_pid);

	switch (req->a_res.status) {
	case NLM_LCK_GRANTED:
	case NLM_LCK_DENIED_GRACE_PERIOD:
		/* Everything's good */
		break;
	case NLM_LCK_DENIED_NOLOCKS:
		dprintk("lockd: CANCEL failed (server has no locks)\n");
		goto retry_cancel;
	default:
		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
			req->a_res.status);
	}

die:
	rpc_release_task(task);
	nlm_release_host(req->a_host);
	kfree(req);
	return;

retry_cancel:
	nlm_rebind_host(req->a_host);
	rpc_restart_call(task);
	rpc_delay(task, 30 * HZ);
	return;
}
Пример #11
0
static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
		struct nfs_lock_context *lock, loff_t offset, loff_t len)
{
	struct inode *inode = file_inode(filep);
	struct nfs_server *server = NFS_SERVER(inode);
	struct nfs42_falloc_args args = {
		.falloc_fh	= NFS_FH(inode),
		.falloc_offset	= offset,
		.falloc_length	= len,
		.falloc_bitmask	= server->cache_consistency_bitmask,
	};
	struct nfs42_falloc_res res = {
		.falloc_server	= server,
	};
	int status;

	msg->rpc_argp = &args;
	msg->rpc_resp = &res;

	status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
			lock, FMODE_WRITE);
	if (status)
		return status;

	res.falloc_fattr = nfs_alloc_fattr();
	if (!res.falloc_fattr)
		return -ENOMEM;

	status = nfs4_call_sync(server->client, server, msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == 0)
		status = nfs_post_op_update_inode(inode, res.falloc_fattr);

	kfree(res.falloc_fattr);
	return status;
}

static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
				loff_t offset, loff_t len)
{
	struct nfs_server *server = NFS_SERVER(file_inode(filep));
	struct nfs4_exception exception = { };
	struct nfs_lock_context *lock;
	int err;

	lock = nfs_get_lock_context(nfs_file_open_context(filep));
	if (IS_ERR(lock))
		return PTR_ERR(lock);

	exception.inode = file_inode(filep);
	exception.state = lock->open_context->state;

	do {
		err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
		if (err == -ENOTSUPP) {
			err = -EOPNOTSUPP;
			break;
		}
		err = nfs4_handle_exception(server, err, &exception);
	} while (exception.retry);

	nfs_put_lock_context(lock);
	return err;
}

int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
	};
	struct inode *inode = file_inode(filep);
	int err;

	if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
		return -EOPNOTSUPP;

	inode_lock(inode);

	err = nfs42_proc_fallocate(&msg, filep, offset, len);
	if (err == -EOPNOTSUPP)
		NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;

	inode_unlock(inode);
	return err;
}

int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
	};
	struct inode *inode = file_inode(filep);
	int err;

	if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
		return -EOPNOTSUPP;

	inode_lock(inode);
	err = nfs_sync_inode(inode);
	if (err)
		goto out_unlock;

	err = nfs42_proc_fallocate(&msg, filep, offset, len);
	if (err == 0)
		truncate_pagecache_range(inode, offset, (offset + len) -1);
	if (err == -EOPNOTSUPP)
		NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
out_unlock:
	inode_unlock(inode);
	return err;
}

static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
				struct nfs_lock_context *src_lock,
				struct file *dst, loff_t pos_dst,
				struct nfs_lock_context *dst_lock,
				size_t count)
{
	struct nfs42_copy_args args = {
		.src_fh		= NFS_FH(file_inode(src)),
		.src_pos	= pos_src,
		.dst_fh		= NFS_FH(file_inode(dst)),
		.dst_pos	= pos_dst,
		.count		= count,
	};
	struct nfs42_copy_res res;
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
		.rpc_argp = &args,
		.rpc_resp = &res,
	};
	struct inode *dst_inode = file_inode(dst);
	struct nfs_server *server = NFS_SERVER(dst_inode);
	int status;

	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
				     src_lock, FMODE_READ);
	if (status)
		return status;

	status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
			pos_src, pos_src + (loff_t)count - 1);
	if (status)
		return status;

	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
				     dst_lock, FMODE_WRITE);
	if (status)
		return status;

	status = nfs_sync_inode(dst_inode);
	if (status)
		return status;

	status = nfs4_call_sync(server->client, server, &msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == -ENOTSUPP)
		server->caps &= ~NFS_CAP_COPY;
	if (status)
		return status;

	if (res.write_res.verifier.committed != NFS_FILE_SYNC) {
		status = nfs_commit_file(dst, &res.write_res.verifier.verifier);
		if (status)
			return status;
	}

	truncate_pagecache_range(dst_inode, pos_dst,
				 pos_dst + res.write_res.count);

	return res.write_res.count;
}

ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
			struct file *dst, loff_t pos_dst,
			size_t count)
{
	struct nfs_server *server = NFS_SERVER(file_inode(dst));
	struct nfs_lock_context *src_lock;
	struct nfs_lock_context *dst_lock;
	struct nfs4_exception src_exception = { };
	struct nfs4_exception dst_exception = { };
	ssize_t err, err2;

	if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
		return -EOPNOTSUPP;

	src_lock = nfs_get_lock_context(nfs_file_open_context(src));
	if (IS_ERR(src_lock))
		return PTR_ERR(src_lock);

	src_exception.inode = file_inode(src);
	src_exception.state = src_lock->open_context->state;

	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
	if (IS_ERR(dst_lock)) {
		err = PTR_ERR(dst_lock);
		goto out_put_src_lock;
	}

	dst_exception.inode = file_inode(dst);
	dst_exception.state = dst_lock->open_context->state;

	do {
		inode_lock(file_inode(dst));
		err = _nfs42_proc_copy(src, pos_src, src_lock,
				       dst, pos_dst, dst_lock, count);
		inode_unlock(file_inode(dst));

		if (err == -ENOTSUPP) {
			err = -EOPNOTSUPP;
			break;
		}

		err2 = nfs4_handle_exception(server, err, &src_exception);
		err  = nfs4_handle_exception(server, err, &dst_exception);
		if (!err)
			err = err2;
	} while (src_exception.retry || dst_exception.retry);

	nfs_put_lock_context(dst_lock);
out_put_src_lock:
	nfs_put_lock_context(src_lock);
	return err;
}

static loff_t _nfs42_proc_llseek(struct file *filep,
		struct nfs_lock_context *lock, loff_t offset, int whence)
{
	struct inode *inode = file_inode(filep);
	struct nfs42_seek_args args = {
		.sa_fh		= NFS_FH(inode),
		.sa_offset	= offset,
		.sa_what	= (whence == SEEK_HOLE) ?
					NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
	};
	struct nfs42_seek_res res;
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
		.rpc_argp = &args,
		.rpc_resp = &res,
	};
	struct nfs_server *server = NFS_SERVER(inode);
	int status;

	if (!nfs_server_capable(inode, NFS_CAP_SEEK))
		return -ENOTSUPP;

	status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
			lock, FMODE_READ);
	if (status)
		return status;

	status = nfs_filemap_write_and_wait_range(inode->i_mapping,
			offset, LLONG_MAX);
	if (status)
		return status;

	status = nfs4_call_sync(server->client, server, &msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == -ENOTSUPP)
		server->caps &= ~NFS_CAP_SEEK;
	if (status)
		return status;

	return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
}

loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
{
	struct nfs_server *server = NFS_SERVER(file_inode(filep));
	struct nfs4_exception exception = { };
	struct nfs_lock_context *lock;
	loff_t err;

	lock = nfs_get_lock_context(nfs_file_open_context(filep));
	if (IS_ERR(lock))
		return PTR_ERR(lock);

	exception.inode = file_inode(filep);
	exception.state = lock->open_context->state;

	do {
		err = _nfs42_proc_llseek(filep, lock, offset, whence);
		if (err >= 0)
			break;
		if (err == -ENOTSUPP) {
			err = -EOPNOTSUPP;
			break;
		}
		err = nfs4_handle_exception(server, err, &exception);
	} while (exception.retry);

	nfs_put_lock_context(lock);
	return err;
}


static void
nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
{
	struct nfs42_layoutstat_data *data = calldata;
	struct inode *inode = data->inode;
	struct nfs_server *server = NFS_SERVER(inode);
	struct pnfs_layout_hdr *lo;

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (!pnfs_layout_is_valid(lo)) {
		spin_unlock(&inode->i_lock);
		rpc_exit(task, 0);
		return;
	}
	nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
	spin_unlock(&inode->i_lock);
	nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
			     &data->res.seq_res, task);

}

static void
nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
{
	struct nfs42_layoutstat_data *data = calldata;
	struct inode *inode = data->inode;
	struct pnfs_layout_hdr *lo;

	if (!nfs4_sequence_done(task, &data->res.seq_res))
		return;

	switch (task->tk_status) {
	case 0:
		break;
	case -NFS4ERR_EXPIRED:
	case -NFS4ERR_ADMIN_REVOKED:
	case -NFS4ERR_DELEG_REVOKED:
	case -NFS4ERR_STALE_STATEID:
	case -NFS4ERR_BAD_STATEID:
		spin_lock(&inode->i_lock);
		lo = NFS_I(inode)->layout;
		if (pnfs_layout_is_valid(lo) &&
		    nfs4_stateid_match(&data->args.stateid,
					     &lo->plh_stateid)) {
			LIST_HEAD(head);

			/*
			 * Mark the bad layout state as invalid, then retry
			 * with the current stateid.
			 */
			pnfs_mark_layout_stateid_invalid(lo, &head);
			spin_unlock(&inode->i_lock);
			pnfs_free_lseg_list(&head);
		} else
			spin_unlock(&inode->i_lock);
		break;
	case -NFS4ERR_OLD_STATEID:
		spin_lock(&inode->i_lock);
		lo = NFS_I(inode)->layout;
		if (pnfs_layout_is_valid(lo) &&
		    nfs4_stateid_match_other(&data->args.stateid,
					&lo->plh_stateid)) {
			/* Do we need to delay before resending? */
			if (!nfs4_stateid_is_newer(&lo->plh_stateid,
						&data->args.stateid))
				rpc_delay(task, HZ);
			rpc_restart_call_prepare(task);
		}
		spin_unlock(&inode->i_lock);
		break;
	case -ENOTSUPP:
	case -EOPNOTSUPP:
		NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
	}

	dprintk("%s server returns %d\n", __func__, task->tk_status);
}

static void
nfs42_layoutstat_release(void *calldata)
{
	struct nfs42_layoutstat_data *data = calldata;
	struct nfs_server *nfss = NFS_SERVER(data->args.inode);

	if (nfss->pnfs_curr_ld->cleanup_layoutstats)
		nfss->pnfs_curr_ld->cleanup_layoutstats(data);

	pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
	smp_mb__before_atomic();
	clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
	smp_mb__after_atomic();
	nfs_iput_and_deactive(data->inode);
	kfree(data->args.devinfo);
	kfree(data);
}

static const struct rpc_call_ops nfs42_layoutstat_ops = {
	.rpc_call_prepare = nfs42_layoutstat_prepare,
	.rpc_call_done = nfs42_layoutstat_done,
	.rpc_release = nfs42_layoutstat_release,
};

int nfs42_proc_layoutstats_generic(struct nfs_server *server,
				   struct nfs42_layoutstat_data *data)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
	};
	struct rpc_task_setup task_setup = {
		.rpc_client = server->client,
		.rpc_message = &msg,
		.callback_ops = &nfs42_layoutstat_ops,
		.callback_data = data,
		.flags = RPC_TASK_ASYNC,
	};
	struct rpc_task *task;

	data->inode = nfs_igrab_and_active(data->args.inode);
	if (!data->inode) {
		nfs42_layoutstat_release(data);
		return -EAGAIN;
	}
	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
	task = rpc_run_task(&task_setup);
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
		struct file *dst_f, struct nfs_lock_context *src_lock,
		struct nfs_lock_context *dst_lock, loff_t src_offset,
		loff_t dst_offset, loff_t count)
{
	struct inode *src_inode = file_inode(src_f);
	struct inode *dst_inode = file_inode(dst_f);
	struct nfs_server *server = NFS_SERVER(dst_inode);
	struct nfs42_clone_args args = {
		.src_fh = NFS_FH(src_inode),
		.dst_fh = NFS_FH(dst_inode),
		.src_offset = src_offset,
		.dst_offset = dst_offset,
		.count = count,
		.dst_bitmask = server->cache_consistency_bitmask,
	};
	struct nfs42_clone_res res = {
		.server	= server,
	};
	int status;

	msg->rpc_argp = &args;
	msg->rpc_resp = &res;

	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
			src_lock, FMODE_READ);
	if (status)
		return status;

	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
			dst_lock, FMODE_WRITE);
	if (status)
		return status;

	res.dst_fattr = nfs_alloc_fattr();
	if (!res.dst_fattr)
		return -ENOMEM;

	status = nfs4_call_sync(server->client, server, msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == 0)
		status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);

	kfree(res.dst_fattr);
	return status;
}

int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
		     loff_t src_offset, loff_t dst_offset, loff_t count)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
	};
	struct inode *inode = file_inode(src_f);
	struct nfs_server *server = NFS_SERVER(file_inode(src_f));
	struct nfs_lock_context *src_lock;
	struct nfs_lock_context *dst_lock;
	struct nfs4_exception src_exception = { };
	struct nfs4_exception dst_exception = { };
	int err, err2;

	if (!nfs_server_capable(inode, NFS_CAP_CLONE))
		return -EOPNOTSUPP;

	src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
	if (IS_ERR(src_lock))
		return PTR_ERR(src_lock);

	src_exception.inode = file_inode(src_f);
	src_exception.state = src_lock->open_context->state;

	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
	if (IS_ERR(dst_lock)) {
		err = PTR_ERR(dst_lock);
		goto out_put_src_lock;
	}

	dst_exception.inode = file_inode(dst_f);
	dst_exception.state = dst_lock->open_context->state;

	do {
		err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
					src_offset, dst_offset, count);
		if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
			NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
			err = -EOPNOTSUPP;
			break;
		}

		err2 = nfs4_handle_exception(server, err, &src_exception);
		err = nfs4_handle_exception(server, err, &dst_exception);
		if (!err)
			err = err2;
	} while (src_exception.retry || dst_exception.retry);

	nfs_put_lock_context(dst_lock);
out_put_src_lock:
	nfs_put_lock_context(src_lock);
	return err;
}
Пример #12
0
int setup_callback_client(struct nfs4_client *clp)
{
	struct sockaddr_in	addr;
	struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
	struct rpc_timeout	timeparms = {
		.to_initval	= max_cb_time(),
		.to_retries	= 0,
	};
	struct rpc_create_args args = {
		.protocol	= IPPROTO_TCP,
		.address	= (struct sockaddr *)&addr,
		.addrsize	= sizeof(addr),
		.timeout	= &timeparms,
		.program	= &cb_program,
		.prognumber	= cb->cb_prog,
		.version	= nfs_cb_version[1]->number,
		.authflavor	= clp->cl_flavor,
		.flags		= (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
		.client_name    = clp->cl_principal,
	};
	struct rpc_clnt *client;

	if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
		return -EINVAL;

	/* Initialize address */
	memset(&addr, 0, sizeof(addr));
	addr.sin_family = AF_INET;
	addr.sin_port = htons(cb->cb_port);
	addr.sin_addr.s_addr = htonl(cb->cb_addr);

	/* Create RPC client */
	client = rpc_create(&args);
	if (IS_ERR(client)) {
		dprintk("NFSD: couldn't create callback client: %ld\n",
			PTR_ERR(client));
		return PTR_ERR(client);
	}
	cb->cb_client = client;
	return 0;

}

static void warn_no_callback_path(struct nfs4_client *clp, int reason)
{
	dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
		(int)clp->cl_name.len, clp->cl_name.data, reason);
}

static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
	struct nfs4_client *clp = calldata;

	if (task->tk_status)
		warn_no_callback_path(clp, task->tk_status);
	else
		atomic_set(&clp->cl_cb_conn.cb_set, 1);
	put_nfs4_client(clp);
}

static const struct rpc_call_ops nfsd4_cb_probe_ops = {
	.rpc_call_done = nfsd4_cb_probe_done,
};

static struct rpc_cred *lookup_cb_cred(struct nfs4_cb_conn *cb)
{
	struct auth_cred acred = {
		.machine_cred = 1
	};

	/*
	 * Note in the gss case this doesn't actually have to wait for a
	 * gss upcall (or any calls to the client); this just creates a
	 * non-uptodate cred which the rpc state machine will fill in with
	 * a refresh_upcall later.
	 */
	return rpcauth_lookup_credcache(cb->cb_client->cl_auth, &acred,
							RPCAUTH_LOOKUP_NEW);
}

void do_probe_callback(struct nfs4_client *clp)
{
	struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
	struct rpc_message msg = {
		.rpc_proc       = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
		.rpc_argp       = clp,
	};
	struct rpc_cred *cred;
	int status;

	cred = lookup_cb_cred(cb);
	if (IS_ERR(cred)) {
		status = PTR_ERR(cred);
		goto out;
	}
	cb->cb_cred = cred;
	msg.rpc_cred = cb->cb_cred;
	status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT,
				&nfsd4_cb_probe_ops, (void *)clp);
out:
	if (status) {
		warn_no_callback_path(clp, status);
		put_nfs4_client(clp);
	}
}

/*
 * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
 */
void
nfsd4_probe_callback(struct nfs4_client *clp)
{
	int status;

	BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set));

	status = setup_callback_client(clp);
	if (status) {
		warn_no_callback_path(clp, status);
		return;
	}

	/* the task holds a reference to the nfs4_client struct */
	atomic_inc(&clp->cl_count);

	do_probe_callback(clp);
}

static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
	struct nfs4_delegation *dp = calldata;
	struct nfs4_client *clp = dp->dl_client;

	switch (task->tk_status) {
	case -EIO:
		/* Network partition? */
		atomic_set(&clp->cl_cb_conn.cb_set, 0);
		warn_no_callback_path(clp, task->tk_status);
	case -EBADHANDLE:
	case -NFS4ERR_BAD_STATEID:
		/* Race: client probably got cb_recall
		 * before open reply granting delegation */
		break;
	default:
		/* success, or error we can't handle */
		return;
	}
	if (dp->dl_retries--) {
		rpc_delay(task, 2*HZ);
		task->tk_status = 0;
		rpc_restart_call(task);
	} else {
		atomic_set(&clp->cl_cb_conn.cb_set, 0);
		warn_no_callback_path(clp, task->tk_status);
	}
}

static void nfsd4_cb_recall_release(void *calldata)
{
	struct nfs4_delegation *dp = calldata;
	struct nfs4_client *clp = dp->dl_client;

	nfs4_put_delegation(dp);
	put_nfs4_client(clp);
}

static const struct rpc_call_ops nfsd4_cb_recall_ops = {
	.rpc_call_done = nfsd4_cb_recall_done,
	.rpc_release = nfsd4_cb_recall_release,
};

/*
 * called with dp->dl_count inc'ed.
 */
void
nfsd4_cb_recall(struct nfs4_delegation *dp)
{
	struct nfs4_client *clp = dp->dl_client;
	struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
	struct rpc_message msg = {
		.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
		.rpc_argp = dp,
		.rpc_cred = clp->cl_cb_conn.cb_cred
	};
	int status;

	dp->dl_retries = 1;
	status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
				&nfsd4_cb_recall_ops, dp);
	if (status) {
		put_nfs4_client(clp);
		nfs4_put_delegation(dp);
	}
}