/*
 * Send the callback reply
 */
int bc_send(struct rpc_rqst *req)
{
	struct rpc_task *task;
	int ret;

	dprintk("RPC:       bc_send req= %p\n", req);
	task = rpc_run_bc_task(req, &nfs41_callback_ops);
	if (IS_ERR(task))
		ret = PTR_ERR(task);
	else {
		BUG_ON(atomic_read(&task->tk_count) != 1);
		ret = task->tk_status;
		rpc_put_task(task);
	}
<<<<<<< HEAD
Example #2
0
/*
 * Process a backchannel RPC request that arrived over an existing
 * outbound connection
 */
int
bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
	       struct svc_rqst *rqstp)
{
	struct kvec	*argv = &rqstp->rq_arg.head[0];
	struct kvec	*resv = &rqstp->rq_res.head[0];
	struct rpc_task *task;
	int proc_error;
	int error;

	dprintk("svc: %s(%p)\n", __func__, req);

	/* Build the svc_rqst used by the common processing routine */
	rqstp->rq_xprt = serv->sv_bc_xprt;
	rqstp->rq_xid = req->rq_xid;
	rqstp->rq_prot = req->rq_xprt->prot;
	rqstp->rq_server = serv;

	rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));

	/* Adjust the argument buffer length */
	rqstp->rq_arg.len = req->rq_private_buf.len;
	if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
		rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
		rqstp->rq_arg.page_len = 0;
	} else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
			rqstp->rq_arg.page_len)
		rqstp->rq_arg.page_len = rqstp->rq_arg.len -
			rqstp->rq_arg.head[0].iov_len;
	else
		rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
			rqstp->rq_arg.page_len;

	/* reset result send buffer "put" position */
	resv->iov_len = 0;

	/*
	 * Skip the next two words because they've already been
	 * processed in the transport
	 */
	svc_getu32(argv);	/* XID */
	svc_getnl(argv);	/* CALLDIR */

	/* Parse and execute the bc call */
	proc_error = svc_process_common(rqstp, argv, resv);

	atomic_inc(&req->rq_xprt->bc_free_slots);
	if (!proc_error) {
		/* Processing error: drop the request */
		xprt_free_bc_request(req);
		return 0;
	}

	/* Finally, send the reply synchronously */
	memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
	task = rpc_run_bc_task(req);
	if (IS_ERR(task)) {
		error = PTR_ERR(task);
		goto out;
	}

	WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
	error = task->tk_status;
	rpc_put_task(task);

out:
	dprintk("svc: %s(), error=%d\n", __func__, error);
	return error;
}
Example #3
0
/**
 * nfs_sillyrename - Perform a silly-rename of a dentry
 * @dir: inode of directory that contains dentry
 * @dentry: dentry to be sillyrenamed
 *
 * NFSv2/3 is stateless and the server doesn't know when the client is
 * holding a file open. To prevent application problems when a file is
 * unlinked while it's still open, the client performs a "silly-rename".
 * That is, it renames the file to a hidden file in the same directory,
 * and only performs the unlink once the last reference to it is put.
 *
 * The final cleanup is done during dentry_iput.
 *
 * (Note: NFSv4 is stateful, and has opens, so in theory an NFSv4 server
 * could take responsibility for keeping open files referenced.  The server
 * would also need to ensure that opened-but-deleted files were kept over
 * reboots.  However, we may not assume a server does so.  (RFC 5661
 * does provide an OPEN4_RESULT_PRESERVE_UNLINKED flag that a server can
 * use to advertise that it does this; some day we may take advantage of
 * it.))
 */
int
nfs_sillyrename(struct inode *dir, struct dentry *dentry)
{
    static unsigned int sillycounter;
    const int      fileidsize  = sizeof(NFS_FILEID(dentry->d_inode))*2;
    const int      countersize = sizeof(sillycounter)*2;
    const int      slen        = sizeof(".nfs")+fileidsize+countersize-1;
    char           silly[slen+1];
    struct dentry *sdentry;
    struct rpc_task *task;
    int            error = -EIO;

    dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
             dentry->d_parent->d_name.name, dentry->d_name.name,
             dentry->d_count);
    nfs_inc_stats(dir, NFSIOS_SILLYRENAME);

    /*
     * We don't allow a dentry to be silly-renamed twice.
     */
    error = -EBUSY;
    if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
        goto out;

    sprintf(silly, ".nfs%*.*Lx",
            fileidsize, fileidsize,
            (unsigned long long)NFS_FILEID(dentry->d_inode));

    /* Return delegation in anticipation of the rename */
    NFS_PROTO(dentry->d_inode)->return_delegation(dentry->d_inode);

    sdentry = NULL;
    do {
        char *suffix = silly + slen - countersize;

        dput(sdentry);
        sillycounter++;
        sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);

        dfprintk(VFS, "NFS: trying to rename %s to %s\n",
                 dentry->d_name.name, silly);

        sdentry = lookup_one_len(silly, dentry->d_parent, slen);
        /*
         * N.B. Better to return EBUSY here ... it could be
         * dangerous to delete the file while it's in use.
         */
        if (IS_ERR(sdentry))
            goto out;
    } while (sdentry->d_inode != NULL); /* need negative lookup */

    /* queue unlink first. Can't do this from rpc_release as it
     * has to allocate memory
     */
    error = nfs_async_unlink(dir, dentry);
    if (error)
        goto out_dput;

    /* populate unlinkdata with the right dname */
    error = nfs_copy_dname(sdentry,
                           (struct nfs_unlinkdata *)dentry->d_fsdata);
    if (error) {
        nfs_cancel_async_unlink(dentry);
        goto out_dput;
    }

    /* run the rename task, undo unlink if it fails */
    task = nfs_async_rename(dir, dir, dentry, sdentry);
    if (IS_ERR(task)) {
        error = -EBUSY;
        nfs_cancel_async_unlink(dentry);
        goto out_dput;
    }

    /* wait for the RPC task to complete, unless a SIGKILL intervenes */
    error = rpc_wait_for_completion_task(task);
    if (error == 0)
        error = task->tk_status;
    switch (error) {
    case 0:
        /* The rename succeeded */
        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
        d_move(dentry, sdentry);
        break;
    case -ERESTARTSYS:
        /* The result of the rename is unknown. Play it safe by
         * forcing a new lookup */
        d_drop(dentry);
        d_drop(sdentry);
    }
    rpc_put_task(task);
out_dput:
    dput(sdentry);
out:
    return error;
}
Example #4
0
/**
 * rpcb_register - set or unset a port registration with the local rpcbind svc
 * @prog: RPC program number to bind
 * @vers: RPC version number to bind
 * @prot: transport protocol to use to make this request
 * @port: port value to register
 * @okay: result code
 *
 * port == 0 means unregister, port != 0 means register.
 *
 * This routine supports only rpcbind version 2.
 */
int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
{
	struct sockaddr_in sin = {
		.sin_family		= AF_INET,
		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
	};
	struct rpcbind_args map = {
		.r_prog		= prog,
		.r_vers		= vers,
		.r_prot		= prot,
		.r_port		= port,
	};
	struct rpc_message msg = {
		.rpc_proc	= &rpcb_procedures2[port ?
					RPCBPROC_SET : RPCBPROC_UNSET],
		.rpc_argp	= &map,
		.rpc_resp	= okay,
	};
	struct rpc_clnt *rpcb_clnt;
	int error = 0;

	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
			"rpcbind\n", (port ? "" : "un"),
			prog, vers, prot, port);

	rpcb_clnt = rpcb_create("localhost", (struct sockaddr *) &sin,
				sizeof(sin), XPRT_TRANSPORT_UDP, 2, 1);
	if (IS_ERR(rpcb_clnt))
		return PTR_ERR(rpcb_clnt);

	error = rpc_call_sync(rpcb_clnt, &msg, 0);

	rpc_shutdown_client(rpcb_clnt);
	if (error < 0)
		printk(KERN_WARNING "RPC: failed to contact local rpcbind "
				"server (errno %d).\n", -error);
	dprintk("RPC:       registration status %d/%d\n", error, *okay);

	return error;
}

/**
 * rpcb_getport_sync - obtain the port for an RPC service on a given host
 * @sin: address of remote peer
 * @prog: RPC program number to bind
 * @vers: RPC version number to bind
 * @prot: transport protocol to use to make this request
 *
 * Return value is the requested advertised port number,
 * or a negative errno value.
 *
 * Called from outside the RPC client in a synchronous task context.
 * Uses default timeout parameters specified by underlying transport.
 *
 * XXX: Needs to support IPv6
 */
int rpcb_getport_sync(struct sockaddr_in *sin, u32 prog, u32 vers, int prot)
{
	struct rpcbind_args map = {
		.r_prog		= prog,
		.r_vers		= vers,
		.r_prot		= prot,
		.r_port		= 0,
	};
	struct rpc_message msg = {
		.rpc_proc	= &rpcb_procedures2[RPCBPROC_GETPORT],
		.rpc_argp	= &map,
		.rpc_resp	= &map.r_port,
	};
	struct rpc_clnt	*rpcb_clnt;
	int status;

	dprintk("RPC:       %s(" NIPQUAD_FMT ", %u, %u, %d)\n",
		__FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);

	rpcb_clnt = rpcb_create(NULL, (struct sockaddr *)sin,
				sizeof(*sin), prot, 2, 0);
	if (IS_ERR(rpcb_clnt))
		return PTR_ERR(rpcb_clnt);

	status = rpc_call_sync(rpcb_clnt, &msg, 0);
	rpc_shutdown_client(rpcb_clnt);

	if (status >= 0) {
		if (map.r_port != 0)
			return map.r_port;
		status = -EACCES;
	}
	return status;
}
EXPORT_SYMBOL_GPL(rpcb_getport_sync);

static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbind_args *map, int version)
{
	struct rpc_message msg = {
		.rpc_proc = rpcb_next_version[version].rpc_proc,
		.rpc_argp = map,
		.rpc_resp = &map->r_port,
	};
	struct rpc_task_setup task_setup_data = {
		.rpc_client = rpcb_clnt,
		.rpc_message = &msg,
		.callback_ops = &rpcb_getport_ops,
		.callback_data = map,
		.flags = RPC_TASK_ASYNC,
	};

	return rpc_run_task(&task_setup_data);
}

/**
 * rpcb_getport_async - obtain the port for a given RPC service on a given host
 * @task: task that is waiting for portmapper request
 *
 * This one can be called for an ongoing RPC request, and can be used in
 * an async (rpciod) context.
 */
void rpcb_getport_async(struct rpc_task *task)
{
	struct rpc_clnt *clnt = task->tk_client;
	u32 bind_version;
	struct rpc_xprt *xprt = task->tk_xprt;
	struct rpc_clnt	*rpcb_clnt;
	static struct rpcbind_args *map;
	struct rpc_task	*child;
	struct sockaddr_storage addr;
	struct sockaddr *sap = (struct sockaddr *)&addr;
	size_t salen;
	int status;
	struct rpcb_info *info;

	dprintk("RPC: %5u %s(%s, %u, %u, %d)\n",
		task->tk_pid, __FUNCTION__,
		clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot);

	/* Autobind on cloned rpc clients is discouraged */
	BUG_ON(clnt->cl_parent != clnt);

	if (xprt_test_and_set_binding(xprt)) {
		status = -EAGAIN;	/* tell caller to check again */
		dprintk("RPC: %5u %s: waiting for another binder\n",
			task->tk_pid, __FUNCTION__);
		goto bailout_nowake;
	}

	/* Put self on queue before sending rpcbind request, in case
	 * rpcb_getport_done completes before we return from rpc_run_task */
	rpc_sleep_on(&xprt->binding, task, NULL, NULL);

	/* Someone else may have bound if we slept */
	if (xprt_bound(xprt)) {
		status = 0;
		dprintk("RPC: %5u %s: already bound\n",
			task->tk_pid, __FUNCTION__);
		goto bailout_nofree;
	}

	salen = rpc_peeraddr(clnt, sap, sizeof(addr));

	/* Don't ever use rpcbind v2 for AF_INET6 requests */
	switch (sap->sa_family) {
	case AF_INET:
		info = rpcb_next_version;
		break;
	case AF_INET6:
		info = rpcb_next_version6;
		break;
	default:
		status = -EAFNOSUPPORT;
		dprintk("RPC: %5u %s: bad address family\n",
				task->tk_pid, __FUNCTION__);
		goto bailout_nofree;
	}
	if (info[xprt->bind_index].rpc_proc == NULL) {
		xprt->bind_index = 0;
		status = -EPFNOSUPPORT;
		dprintk("RPC: %5u %s: no more getport versions available\n",
			task->tk_pid, __FUNCTION__);
		goto bailout_nofree;
	}
	bind_version = info[xprt->bind_index].rpc_vers;

	dprintk("RPC: %5u %s: trying rpcbind version %u\n",
		task->tk_pid, __FUNCTION__, bind_version);

	rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot,
				bind_version, 0);
	if (IS_ERR(rpcb_clnt)) {
		status = PTR_ERR(rpcb_clnt);
		dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n",
			task->tk_pid, __FUNCTION__, PTR_ERR(rpcb_clnt));
		goto bailout_nofree;
	}

	map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC);
	if (!map) {
		status = -ENOMEM;
		dprintk("RPC: %5u %s: no memory available\n",
			task->tk_pid, __FUNCTION__);
		goto bailout_nofree;
	}
	map->r_prog = clnt->cl_prog;
	map->r_vers = clnt->cl_vers;
	map->r_prot = xprt->prot;
	map->r_port = 0;
	map->r_xprt = xprt_get(xprt);
	map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
	map->r_addr = rpc_peeraddr2str(rpcb_clnt, RPC_DISPLAY_UNIVERSAL_ADDR);
	map->r_owner = RPCB_OWNER_STRING;	/* ignored for GETADDR */

	child = rpcb_call_async(rpcb_clnt, map, xprt->bind_index);
	rpc_release_client(rpcb_clnt);
	if (IS_ERR(child)) {
		status = -EIO;
		dprintk("RPC: %5u %s: rpc_run_task failed\n",
			task->tk_pid, __FUNCTION__);
		goto bailout;
	}
	rpc_put_task(child);

	task->tk_xprt->stat.bind_count++;
	return;

bailout:
	kfree(map);
	xprt_put(xprt);
bailout_nofree:
	rpcb_wake_rpcbind_waiters(xprt, status);
bailout_nowake:
	task->tk_status = status;
}
EXPORT_SYMBOL_GPL(rpcb_getport_async);

/*
 * Rpcbind child task calls this callback via tk_exit.
 */
static void rpcb_getport_done(struct rpc_task *child, void *data)
{
	struct rpcbind_args *map = data;
	struct rpc_xprt *xprt = map->r_xprt;
	int status = child->tk_status;

	/* Garbage reply: retry with a lesser rpcbind version */
	if (status == -EIO)
		status = -EPROTONOSUPPORT;

	/* rpcbind server doesn't support this rpcbind protocol version */
	if (status == -EPROTONOSUPPORT)
		xprt->bind_index++;

	if (status < 0) {
		/* rpcbind server not available on remote host? */
		xprt->ops->set_port(xprt, 0);
	} else if (map->r_port == 0) {
		/* Requested RPC service wasn't registered on remote host */
		xprt->ops->set_port(xprt, 0);
		status = -EACCES;
	} else {
		/* Succeeded */
		xprt->ops->set_port(xprt, map->r_port);
		xprt_set_bound(xprt);
		status = 0;
	}

	dprintk("RPC: %5u rpcb_getport_done(status %d, port %u)\n",
			child->tk_pid, status, map->r_port);

	rpcb_wake_rpcbind_waiters(xprt, status);
}

static int rpcb_encode_mapping(struct rpc_rqst *req, __be32 *p,
			       struct rpcbind_args *rpcb)
{
	dprintk("RPC:       rpcb_encode_mapping(%u, %u, %d, %u)\n",
			rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
	*p++ = htonl(rpcb->r_prog);
	*p++ = htonl(rpcb->r_vers);
	*p++ = htonl(rpcb->r_prot);
	*p++ = htonl(rpcb->r_port);

	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
	return 0;
}

static int rpcb_decode_getport(struct rpc_rqst *req, __be32 *p,
			       unsigned short *portp)
{
	*portp = (unsigned short) ntohl(*p++);
	dprintk("RPC:      rpcb_decode_getport result %u\n",
			*portp);
	return 0;
}

static int rpcb_decode_set(struct rpc_rqst *req, __be32 *p,
			   unsigned int *boolp)
{
	*boolp = (unsigned int) ntohl(*p++);
	dprintk("RPC:      rpcb_decode_set result %u\n",
			*boolp);
	return 0;
}

static int rpcb_encode_getaddr(struct rpc_rqst *req, __be32 *p,
			       struct rpcbind_args *rpcb)
{
	dprintk("RPC:       rpcb_encode_getaddr(%u, %u, %s)\n",
			rpcb->r_prog, rpcb->r_vers, rpcb->r_addr);
	*p++ = htonl(rpcb->r_prog);
	*p++ = htonl(rpcb->r_vers);

	p = xdr_encode_string(p, rpcb->r_netid);
	p = xdr_encode_string(p, rpcb->r_addr);
	p = xdr_encode_string(p, rpcb->r_owner);

	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);

	return 0;
}
Example #5
0
/*
 * Set up the argument/result storage required for the RPC call.
 */
static int nfs_commit_rpcsetup(struct list_head *head,
		struct nfs_write_data *data,
		int how)
{
	struct nfs_page *first = nfs_list_entry(head->next);
	struct inode *inode = first->wb_context->path.dentry->d_inode;
	int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
	int priority = flush_task_priority(how);
	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = first->wb_context->cred,
	};
	struct rpc_task_setup task_setup_data = {
		.task = &data->task,
		.rpc_client = NFS_CLIENT(inode),
		.rpc_message = &msg,
		.callback_ops = &nfs_commit_ops,
		.callback_data = data,
		.workqueue = nfsiod_workqueue,
		.flags = flags,
		.priority = priority,
	};

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	list_splice_init(head, &data->pages);

	data->inode	  = inode;
	data->cred	  = msg.rpc_cred;

	data->args.fh     = NFS_FH(data->inode);
	/* Note: we always request a commit of the entire inode */
	data->args.offset = 0;
	data->args.count  = 0;
	data->args.context = get_nfs_open_context(first->wb_context);
	data->res.count   = 0;
	data->res.fattr   = &data->fattr;
	data->res.verf    = &data->verf;
	nfs_fattr_init(&data->fattr);

	/* Set up the initial task struct.  */
	NFS_PROTO(inode)->commit_setup(data, &msg);

	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);

	task = rpc_run_task(&task_setup_data);
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

/*
 * Commit dirty pages
 */
static int
nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
	struct nfs_write_data	*data;
	struct nfs_page         *req;

	data = nfs_commitdata_alloc();

	if (!data)
		goto out_bad;

	/* Set up the argument struct */
	return nfs_commit_rpcsetup(head, data, how);
 out_bad:
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_mark_request_commit(req);
		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
				BDI_RECLAIMABLE);
		nfs_clear_page_tag_locked(req);
	}
	return -ENOMEM;
}

/*
 * COMMIT call returned
 */
static void nfs_commit_done(struct rpc_task *task, void *calldata)
{
	struct nfs_write_data	*data = calldata;

        dprintk("NFS: %5u nfs_commit_done (status %d)\n",
                                task->tk_pid, task->tk_status);

	/* Call the NFS version-specific code */
	if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
		return;
}

static void nfs_commit_release(void *calldata)
{
	struct nfs_write_data	*data = calldata;
	struct nfs_page		*req;
	int status = data->task.tk_status;

	while (!list_empty(&data->pages)) {
		req = nfs_list_entry(data->pages.next);
		nfs_list_remove_request(req);
		nfs_clear_request_commit(req);

		dprintk("NFS:       commit (%s/%lld %d@%lld)",
			req->wb_context->path.dentry->d_inode->i_sb->s_id,
			(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
			req->wb_bytes,
			(long long)req_offset(req));
		if (status < 0) {
			nfs_context_set_write_error(req->wb_context, status);
			nfs_inode_remove_request(req);
			dprintk(", error = %d\n", status);
			goto next;
		}

		/* Okay, COMMIT succeeded, apparently. Check the verifier
		 * returned by the server against all stored verfs. */
		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
			/* We have a match */
			nfs_inode_remove_request(req);
			dprintk(" OK\n");
			goto next;
		}
		/* We have a mismatch. Write the page again */
		dprintk(" mismatch\n");
		nfs_mark_request_dirty(req);
	next:
		nfs_clear_page_tag_locked(req);
	}
	nfs_commitdata_release(calldata);
}

static const struct rpc_call_ops nfs_commit_ops = {
#if defined(CONFIG_NFS_V4_1)
	.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
	.rpc_call_done = nfs_commit_done,
	.rpc_release = nfs_commit_release,
};

int nfs_commit_inode(struct inode *inode, int how)
{
	LIST_HEAD(head);
	int res;

	spin_lock(&inode->i_lock);
	res = nfs_scan_commit(inode, &head, 0, 0);
	spin_unlock(&inode->i_lock);
	if (res) {
		int error = nfs_commit_list(inode, &head, how);
		if (error < 0)
			return error;
	}
	return res;
}
#else
static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
	return 0;
}
#endif

long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
{
	struct inode *inode = mapping->host;
	pgoff_t idx_start, idx_end;
	unsigned int npages = 0;
	LIST_HEAD(head);
	int nocommit = how & FLUSH_NOCOMMIT;
	long pages, ret;

	/* FIXME */
	if (wbc->range_cyclic)
		idx_start = 0;
	else {
		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (idx_end > idx_start) {
			pgoff_t l_npages = 1 + idx_end - idx_start;
			npages = l_npages;
			if (sizeof(npages) != sizeof(l_npages) &&
					(pgoff_t)npages != l_npages)
				npages = 0;
		}
	}
	how &= ~FLUSH_NOCOMMIT;
	spin_lock(&inode->i_lock);
	do {
		ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
		if (ret != 0)
			continue;
		if (nocommit)
			break;
		pages = nfs_scan_commit(inode, &head, idx_start, npages);
		if (pages == 0)
			break;
		if (how & FLUSH_INVALIDATE) {
			spin_unlock(&inode->i_lock);
			nfs_cancel_commit_list(&head);
			ret = pages;
			spin_lock(&inode->i_lock);
			continue;
		}
		pages += nfs_scan_commit(inode, &head, 0, 0);
		spin_unlock(&inode->i_lock);
		ret = nfs_commit_list(inode, &head, how);
		spin_lock(&inode->i_lock);

	} while (ret >= 0);
	spin_unlock(&inode->i_lock);
	return ret;
}

static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
{
	int ret;

	ret = nfs_writepages(mapping, wbc);
	if (ret < 0)
		goto out;
	ret = nfs_sync_mapping_wait(mapping, wbc, how);
	if (ret < 0)
		goto out;
	return 0;
out:
	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return ret;
}
Example #6
0
/*
 * Set up the argument/result storage required for the RPC call.
 */
static int nfs_write_rpcsetup(struct nfs_page *req,
		struct nfs_write_data *data,
		const struct rpc_call_ops *call_ops,
		unsigned int count, unsigned int offset,
		int how)
{
	struct inode *inode = req->wb_context->path.dentry->d_inode;
	int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
	int priority = flush_task_priority(how);
	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = req->wb_context->cred,
	};
	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.task = &data->task,
		.rpc_message = &msg,
		.callback_ops = call_ops,
		.callback_data = data,
		.workqueue = nfsiod_workqueue,
		.flags = flags,
		.priority = priority,
	};

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	data->req = req;
	data->inode = inode = req->wb_context->path.dentry->d_inode;
	data->cred = msg.rpc_cred;

	data->args.fh     = NFS_FH(inode);
	data->args.offset = req_offset(req) + offset;
	data->args.pgbase = req->wb_pgbase + offset;
	data->args.pages  = data->pagevec;
	data->args.count  = count;
	data->args.context = get_nfs_open_context(req->wb_context);
	data->args.stable  = NFS_UNSTABLE;
	if (how & FLUSH_STABLE) {
		data->args.stable = NFS_DATA_SYNC;
		if (!nfs_need_commit(NFS_I(inode)))
			data->args.stable = NFS_FILE_SYNC;
	}

	data->res.fattr   = &data->fattr;
	data->res.count   = count;
	data->res.verf    = &data->verf;
	nfs_fattr_init(&data->fattr);

	/* Set up the initial task struct.  */
	NFS_PROTO(inode)->write_setup(data, &msg);

	dprintk("NFS: %5u initiated write call "
		"(req %s/%lld, %u bytes @ offset %llu)\n",
		data->task.tk_pid,
		inode->i_sb->s_id,
		(long long)NFS_FILEID(inode),
		count,
		(unsigned long long)data->args.offset);

	task = rpc_run_task(&task_setup_data);
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

/* If a nfs_flush_* function fails, it should remove reqs from @head and
 * call this on each, which will prepare them to be retried on next
 * writeback using standard nfs.
 */
static void nfs_redirty_request(struct nfs_page *req)
{
	nfs_mark_request_dirty(req);
	nfs_end_page_writeback(req->wb_page);
	nfs_clear_page_tag_locked(req);
}

/*
 * Generate multiple small requests to write out a single
 * contiguous dirty area on one page.
 */
static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
	struct nfs_page *req = nfs_list_entry(head->next);
	struct page *page = req->wb_page;
	struct nfs_write_data *data;
	size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
	unsigned int offset;
	int requests = 0;
	int ret = 0;
	LIST_HEAD(list);

	nfs_list_remove_request(req);

	nbytes = count;
	do {
		size_t len = min(nbytes, wsize);

		data = nfs_writedata_alloc(1);
		if (!data)
			goto out_bad;
		list_add(&data->pages, &list);
		requests++;
		nbytes -= len;
	} while (nbytes != 0);
	atomic_set(&req->wb_complete, requests);

	ClearPageError(page);
	offset = 0;
	nbytes = count;
	do {
		int ret2;

		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del_init(&data->pages);

		data->pagevec[0] = page;

		if (nbytes < wsize)
			wsize = nbytes;
		ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
				   wsize, offset, how);
		if (ret == 0)
			ret = ret2;
		offset += wsize;
		nbytes -= wsize;
	} while (nbytes != 0);

	return ret;

out_bad:
	while (!list_empty(&list)) {
		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del(&data->pages);
		nfs_writedata_release(data);
	}
	nfs_redirty_request(req);
	return -ENOMEM;
}

/*
 * Create an RPC task for the given write request and kick it.
 * The page must have been locked by the caller.
 *
 * It may happen that the page we're passed is not marked dirty.
 * This is the case if nfs_updatepage detects a conflicting request
 * that has been written but not committed.
 */
static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
	struct nfs_page		*req;
	struct page		**pages;
	struct nfs_write_data	*data;

	data = nfs_writedata_alloc(npages);
	if (!data)
		goto out_bad;

	pages = data->pagevec;
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_list_add_request(req, &data->pages);
		ClearPageError(req->wb_page);
		*pages++ = req->wb_page;
	}
	req = nfs_list_entry(data->pages.next);

	/* Set up the argument struct */
	return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
 out_bad:
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_redirty_request(req);
	}
	return -ENOMEM;
}

static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
				  struct inode *inode, int ioflags)
{
	size_t wsize = NFS_SERVER(inode)->wsize;

	if (wsize < PAGE_CACHE_SIZE)
		nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
	else
		nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
}
Example #7
0
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
{
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = data->cred,
	};
	struct rpc_task_setup task_setup_data = {
		.rpc_message = &msg,
		.callback_ops = &nfs_unlink_ops,
		.callback_data = data,
		.flags = RPC_TASK_ASYNC,
	};
	struct rpc_task *task;
	struct dentry *alias;

	alias = d_lookup(parent, &data->args.name);
	if (alias != NULL) {
		int ret = 0;

		/*
		 * Hey, we raced with lookup... See if we need to transfer
		 * the sillyrename information to the aliased dentry.
		 */
		nfs_free_dname(data);
		spin_lock(&alias->d_lock);
		if (alias->d_inode != NULL &&
		    !(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
			alias->d_fsdata = data;
			alias->d_flags |= DCACHE_NFSFS_RENAMED;
			ret = 1;
		}
		spin_unlock(&alias->d_lock);
		nfs_dec_sillycount(dir);
		dput(alias);
		return ret;
	}
	data->dir = igrab(dir);
	if (!data->dir) {
		nfs_dec_sillycount(dir);
		return 0;
	}
	nfs_sb_active(NFS_SERVER(dir));
	data->args.fh = NFS_FH(dir);
	nfs_fattr_init(&data->res.dir_attr);

	NFS_PROTO(dir)->unlink_setup(&msg, dir);

	task_setup_data.rpc_client = NFS_CLIENT(dir);
	task = rpc_run_task(&task_setup_data);
	if (!IS_ERR(task))
		rpc_put_task(task);
	return 1;
}

static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
{
	struct dentry *parent;
	struct inode *dir;
	int ret = 0;


	parent = dget_parent(dentry);
	if (parent == NULL)
		goto out_free;
	dir = parent->d_inode;
	if (nfs_copy_dname(dentry, data) != 0)
		goto out_dput;
	/* Non-exclusive lock protects against concurrent lookup() calls */
	spin_lock(&dir->i_lock);
	if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) {
		/* Deferred delete */
		hlist_add_head(&data->list, &NFS_I(dir)->silly_list);
		spin_unlock(&dir->i_lock);
		ret = 1;
		goto out_dput;
	}
	spin_unlock(&dir->i_lock);
	ret = nfs_do_call_unlink(parent, dir, data);
out_dput:
	dput(parent);
out_free:
	return ret;
}

void nfs_block_sillyrename(struct dentry *dentry)
{
	struct nfs_inode *nfsi = NFS_I(dentry->d_inode);

	wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1);
}

void nfs_unblock_sillyrename(struct dentry *dentry)
{
	struct inode *dir = dentry->d_inode;
	struct nfs_inode *nfsi = NFS_I(dir);
	struct nfs_unlinkdata *data;

	atomic_inc(&nfsi->silly_count);
	spin_lock(&dir->i_lock);
	while (!hlist_empty(&nfsi->silly_list)) {
		if (!atomic_inc_not_zero(&nfsi->silly_count))
			break;
		data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list);
		hlist_del(&data->list);
		spin_unlock(&dir->i_lock);
		if (nfs_do_call_unlink(dentry, dir, data) == 0)
			nfs_free_unlinkdata(data);
		spin_lock(&dir->i_lock);
	}
	spin_unlock(&dir->i_lock);
}

/**
 * nfs_async_unlink - asynchronous unlinking of a file
 * @dir: parent directory of dentry
 * @dentry: dentry to unlink
 */
int
nfs_async_unlink(struct inode *dir, struct dentry *dentry)
{
	struct nfs_unlinkdata *data;
	int status = -ENOMEM;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (data == NULL)
		goto out;

	data->cred = rpc_lookup_cred();
	if (IS_ERR(data->cred)) {
		status = PTR_ERR(data->cred);
		goto out_free;
	}

	status = -EBUSY;
	spin_lock(&dentry->d_lock);
	if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
		goto out_unlock;
	dentry->d_flags |= DCACHE_NFSFS_RENAMED;
	dentry->d_fsdata = data;
	spin_unlock(&dentry->d_lock);
	return 0;
out_unlock:
	spin_unlock(&dentry->d_lock);
	put_rpccred(data->cred);
out_free:
	kfree(data);
out:
	return status;
}

/**
 * nfs_complete_unlink - Initialize completion of the sillydelete
 * @dentry: dentry to delete
 * @inode: inode
 *
 * Since we're most likely to be called by dentry_iput(), we
 * only use the dentry to find the sillydelete. We then copy the name
 * into the qstr.
 */
void
nfs_complete_unlink(struct dentry *dentry, struct inode *inode)
{
	struct nfs_unlinkdata	*data = NULL;

	spin_lock(&dentry->d_lock);
	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
		dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
		data = dentry->d_fsdata;
	}
	spin_unlock(&dentry->d_lock);

	if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
		nfs_free_unlinkdata(data);
}
Example #8
0
static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
		struct nfs_lock_context *lock, loff_t offset, loff_t len)
{
	struct inode *inode = file_inode(filep);
	struct nfs_server *server = NFS_SERVER(inode);
	struct nfs42_falloc_args args = {
		.falloc_fh	= NFS_FH(inode),
		.falloc_offset	= offset,
		.falloc_length	= len,
		.falloc_bitmask	= server->cache_consistency_bitmask,
	};
	struct nfs42_falloc_res res = {
		.falloc_server	= server,
	};
	int status;

	msg->rpc_argp = &args;
	msg->rpc_resp = &res;

	status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
			lock, FMODE_WRITE);
	if (status)
		return status;

	res.falloc_fattr = nfs_alloc_fattr();
	if (!res.falloc_fattr)
		return -ENOMEM;

	status = nfs4_call_sync(server->client, server, msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == 0)
		status = nfs_post_op_update_inode(inode, res.falloc_fattr);

	kfree(res.falloc_fattr);
	return status;
}

static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
				loff_t offset, loff_t len)
{
	struct nfs_server *server = NFS_SERVER(file_inode(filep));
	struct nfs4_exception exception = { };
	struct nfs_lock_context *lock;
	int err;

	lock = nfs_get_lock_context(nfs_file_open_context(filep));
	if (IS_ERR(lock))
		return PTR_ERR(lock);

	exception.inode = file_inode(filep);
	exception.state = lock->open_context->state;

	do {
		err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
		if (err == -ENOTSUPP) {
			err = -EOPNOTSUPP;
			break;
		}
		err = nfs4_handle_exception(server, err, &exception);
	} while (exception.retry);

	nfs_put_lock_context(lock);
	return err;
}

int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
	};
	struct inode *inode = file_inode(filep);
	int err;

	if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
		return -EOPNOTSUPP;

	inode_lock(inode);

	err = nfs42_proc_fallocate(&msg, filep, offset, len);
	if (err == -EOPNOTSUPP)
		NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;

	inode_unlock(inode);
	return err;
}

int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
	};
	struct inode *inode = file_inode(filep);
	int err;

	if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
		return -EOPNOTSUPP;

	inode_lock(inode);
	err = nfs_sync_inode(inode);
	if (err)
		goto out_unlock;

	err = nfs42_proc_fallocate(&msg, filep, offset, len);
	if (err == 0)
		truncate_pagecache_range(inode, offset, (offset + len) -1);
	if (err == -EOPNOTSUPP)
		NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
out_unlock:
	inode_unlock(inode);
	return err;
}

static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
				struct nfs_lock_context *src_lock,
				struct file *dst, loff_t pos_dst,
				struct nfs_lock_context *dst_lock,
				size_t count)
{
	struct nfs42_copy_args args = {
		.src_fh		= NFS_FH(file_inode(src)),
		.src_pos	= pos_src,
		.dst_fh		= NFS_FH(file_inode(dst)),
		.dst_pos	= pos_dst,
		.count		= count,
	};
	struct nfs42_copy_res res;
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
		.rpc_argp = &args,
		.rpc_resp = &res,
	};
	struct inode *dst_inode = file_inode(dst);
	struct nfs_server *server = NFS_SERVER(dst_inode);
	int status;

	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
				     src_lock, FMODE_READ);
	if (status)
		return status;

	status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
			pos_src, pos_src + (loff_t)count - 1);
	if (status)
		return status;

	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
				     dst_lock, FMODE_WRITE);
	if (status)
		return status;

	status = nfs_sync_inode(dst_inode);
	if (status)
		return status;

	status = nfs4_call_sync(server->client, server, &msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == -ENOTSUPP)
		server->caps &= ~NFS_CAP_COPY;
	if (status)
		return status;

	if (res.write_res.verifier.committed != NFS_FILE_SYNC) {
		status = nfs_commit_file(dst, &res.write_res.verifier.verifier);
		if (status)
			return status;
	}

	truncate_pagecache_range(dst_inode, pos_dst,
				 pos_dst + res.write_res.count);

	return res.write_res.count;
}

ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
			struct file *dst, loff_t pos_dst,
			size_t count)
{
	struct nfs_server *server = NFS_SERVER(file_inode(dst));
	struct nfs_lock_context *src_lock;
	struct nfs_lock_context *dst_lock;
	struct nfs4_exception src_exception = { };
	struct nfs4_exception dst_exception = { };
	ssize_t err, err2;

	if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
		return -EOPNOTSUPP;

	src_lock = nfs_get_lock_context(nfs_file_open_context(src));
	if (IS_ERR(src_lock))
		return PTR_ERR(src_lock);

	src_exception.inode = file_inode(src);
	src_exception.state = src_lock->open_context->state;

	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
	if (IS_ERR(dst_lock)) {
		err = PTR_ERR(dst_lock);
		goto out_put_src_lock;
	}

	dst_exception.inode = file_inode(dst);
	dst_exception.state = dst_lock->open_context->state;

	do {
		inode_lock(file_inode(dst));
		err = _nfs42_proc_copy(src, pos_src, src_lock,
				       dst, pos_dst, dst_lock, count);
		inode_unlock(file_inode(dst));

		if (err == -ENOTSUPP) {
			err = -EOPNOTSUPP;
			break;
		}

		err2 = nfs4_handle_exception(server, err, &src_exception);
		err  = nfs4_handle_exception(server, err, &dst_exception);
		if (!err)
			err = err2;
	} while (src_exception.retry || dst_exception.retry);

	nfs_put_lock_context(dst_lock);
out_put_src_lock:
	nfs_put_lock_context(src_lock);
	return err;
}

static loff_t _nfs42_proc_llseek(struct file *filep,
		struct nfs_lock_context *lock, loff_t offset, int whence)
{
	struct inode *inode = file_inode(filep);
	struct nfs42_seek_args args = {
		.sa_fh		= NFS_FH(inode),
		.sa_offset	= offset,
		.sa_what	= (whence == SEEK_HOLE) ?
					NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
	};
	struct nfs42_seek_res res;
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
		.rpc_argp = &args,
		.rpc_resp = &res,
	};
	struct nfs_server *server = NFS_SERVER(inode);
	int status;

	if (!nfs_server_capable(inode, NFS_CAP_SEEK))
		return -ENOTSUPP;

	status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
			lock, FMODE_READ);
	if (status)
		return status;

	status = nfs_filemap_write_and_wait_range(inode->i_mapping,
			offset, LLONG_MAX);
	if (status)
		return status;

	status = nfs4_call_sync(server->client, server, &msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == -ENOTSUPP)
		server->caps &= ~NFS_CAP_SEEK;
	if (status)
		return status;

	return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
}

loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
{
	struct nfs_server *server = NFS_SERVER(file_inode(filep));
	struct nfs4_exception exception = { };
	struct nfs_lock_context *lock;
	loff_t err;

	lock = nfs_get_lock_context(nfs_file_open_context(filep));
	if (IS_ERR(lock))
		return PTR_ERR(lock);

	exception.inode = file_inode(filep);
	exception.state = lock->open_context->state;

	do {
		err = _nfs42_proc_llseek(filep, lock, offset, whence);
		if (err >= 0)
			break;
		if (err == -ENOTSUPP) {
			err = -EOPNOTSUPP;
			break;
		}
		err = nfs4_handle_exception(server, err, &exception);
	} while (exception.retry);

	nfs_put_lock_context(lock);
	return err;
}


static void
nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
{
	struct nfs42_layoutstat_data *data = calldata;
	struct inode *inode = data->inode;
	struct nfs_server *server = NFS_SERVER(inode);
	struct pnfs_layout_hdr *lo;

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (!pnfs_layout_is_valid(lo)) {
		spin_unlock(&inode->i_lock);
		rpc_exit(task, 0);
		return;
	}
	nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
	spin_unlock(&inode->i_lock);
	nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
			     &data->res.seq_res, task);

}

static void
nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
{
	struct nfs42_layoutstat_data *data = calldata;
	struct inode *inode = data->inode;
	struct pnfs_layout_hdr *lo;

	if (!nfs4_sequence_done(task, &data->res.seq_res))
		return;

	switch (task->tk_status) {
	case 0:
		break;
	case -NFS4ERR_EXPIRED:
	case -NFS4ERR_ADMIN_REVOKED:
	case -NFS4ERR_DELEG_REVOKED:
	case -NFS4ERR_STALE_STATEID:
	case -NFS4ERR_BAD_STATEID:
		spin_lock(&inode->i_lock);
		lo = NFS_I(inode)->layout;
		if (pnfs_layout_is_valid(lo) &&
		    nfs4_stateid_match(&data->args.stateid,
					     &lo->plh_stateid)) {
			LIST_HEAD(head);

			/*
			 * Mark the bad layout state as invalid, then retry
			 * with the current stateid.
			 */
			pnfs_mark_layout_stateid_invalid(lo, &head);
			spin_unlock(&inode->i_lock);
			pnfs_free_lseg_list(&head);
		} else
			spin_unlock(&inode->i_lock);
		break;
	case -NFS4ERR_OLD_STATEID:
		spin_lock(&inode->i_lock);
		lo = NFS_I(inode)->layout;
		if (pnfs_layout_is_valid(lo) &&
		    nfs4_stateid_match_other(&data->args.stateid,
					&lo->plh_stateid)) {
			/* Do we need to delay before resending? */
			if (!nfs4_stateid_is_newer(&lo->plh_stateid,
						&data->args.stateid))
				rpc_delay(task, HZ);
			rpc_restart_call_prepare(task);
		}
		spin_unlock(&inode->i_lock);
		break;
	case -ENOTSUPP:
	case -EOPNOTSUPP:
		NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
	}

	dprintk("%s server returns %d\n", __func__, task->tk_status);
}

static void
nfs42_layoutstat_release(void *calldata)
{
	struct nfs42_layoutstat_data *data = calldata;
	struct nfs_server *nfss = NFS_SERVER(data->args.inode);

	if (nfss->pnfs_curr_ld->cleanup_layoutstats)
		nfss->pnfs_curr_ld->cleanup_layoutstats(data);

	pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
	smp_mb__before_atomic();
	clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
	smp_mb__after_atomic();
	nfs_iput_and_deactive(data->inode);
	kfree(data->args.devinfo);
	kfree(data);
}

static const struct rpc_call_ops nfs42_layoutstat_ops = {
	.rpc_call_prepare = nfs42_layoutstat_prepare,
	.rpc_call_done = nfs42_layoutstat_done,
	.rpc_release = nfs42_layoutstat_release,
};

int nfs42_proc_layoutstats_generic(struct nfs_server *server,
				   struct nfs42_layoutstat_data *data)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
	};
	struct rpc_task_setup task_setup = {
		.rpc_client = server->client,
		.rpc_message = &msg,
		.callback_ops = &nfs42_layoutstat_ops,
		.callback_data = data,
		.flags = RPC_TASK_ASYNC,
	};
	struct rpc_task *task;

	data->inode = nfs_igrab_and_active(data->args.inode);
	if (!data->inode) {
		nfs42_layoutstat_release(data);
		return -EAGAIN;
	}
	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
	task = rpc_run_task(&task_setup);
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
		struct file *dst_f, struct nfs_lock_context *src_lock,
		struct nfs_lock_context *dst_lock, loff_t src_offset,
		loff_t dst_offset, loff_t count)
{
	struct inode *src_inode = file_inode(src_f);
	struct inode *dst_inode = file_inode(dst_f);
	struct nfs_server *server = NFS_SERVER(dst_inode);
	struct nfs42_clone_args args = {
		.src_fh = NFS_FH(src_inode),
		.dst_fh = NFS_FH(dst_inode),
		.src_offset = src_offset,
		.dst_offset = dst_offset,
		.count = count,
		.dst_bitmask = server->cache_consistency_bitmask,
	};
	struct nfs42_clone_res res = {
		.server	= server,
	};
	int status;

	msg->rpc_argp = &args;
	msg->rpc_resp = &res;

	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
			src_lock, FMODE_READ);
	if (status)
		return status;

	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
			dst_lock, FMODE_WRITE);
	if (status)
		return status;

	res.dst_fattr = nfs_alloc_fattr();
	if (!res.dst_fattr)
		return -ENOMEM;

	status = nfs4_call_sync(server->client, server, msg,
				&args.seq_args, &res.seq_res, 0);
	if (status == 0)
		status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);

	kfree(res.dst_fattr);
	return status;
}

int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
		     loff_t src_offset, loff_t dst_offset, loff_t count)
{
	struct rpc_message msg = {
		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
	};
	struct inode *inode = file_inode(src_f);
	struct nfs_server *server = NFS_SERVER(file_inode(src_f));
	struct nfs_lock_context *src_lock;
	struct nfs_lock_context *dst_lock;
	struct nfs4_exception src_exception = { };
	struct nfs4_exception dst_exception = { };
	int err, err2;

	if (!nfs_server_capable(inode, NFS_CAP_CLONE))
		return -EOPNOTSUPP;

	src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
	if (IS_ERR(src_lock))
		return PTR_ERR(src_lock);

	src_exception.inode = file_inode(src_f);
	src_exception.state = src_lock->open_context->state;

	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
	if (IS_ERR(dst_lock)) {
		err = PTR_ERR(dst_lock);
		goto out_put_src_lock;
	}

	dst_exception.inode = file_inode(dst_f);
	dst_exception.state = dst_lock->open_context->state;

	do {
		err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
					src_offset, dst_offset, count);
		if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
			NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
			err = -EOPNOTSUPP;
			break;
		}

		err2 = nfs4_handle_exception(server, err, &src_exception);
		err = nfs4_handle_exception(server, err, &dst_exception);
		if (!err)
			err = err2;
	} while (src_exception.retry || dst_exception.retry);

	nfs_put_lock_context(dst_lock);
out_put_src_lock:
	nfs_put_lock_context(src_lock);
	return err;
}
Example #9
0
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
		      const struct rpc_call_ops *call_ops, int how, int flags)
{
	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_argp = &hdr->args,
		.rpc_resp = &hdr->res,
		.rpc_cred = hdr->cred,
	};
	struct rpc_task_setup task_setup_data = {
		.rpc_client = clnt,
		.task = &hdr->task,
		.rpc_message = &msg,
		.callback_ops = call_ops,
		.callback_data = hdr,
		.workqueue = nfsiod_workqueue,
		.flags = RPC_TASK_ASYNC | flags,
	};
	int ret = 0;

	hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how);

	dprintk("NFS: %5u initiated pgio call "
		"(req %s/%llu, %u bytes @ offset %llu)\n",
		hdr->task.tk_pid,
		hdr->inode->i_sb->s_id,
		(unsigned long long)NFS_FILEID(hdr->inode),
		hdr->args.count,
		(unsigned long long)hdr->args.offset);

	task = rpc_run_task(&task_setup_data);
	if (IS_ERR(task)) {
		ret = PTR_ERR(task);
		goto out;
	}
	if (how & FLUSH_SYNC) {
		ret = rpc_wait_for_completion_task(task);
		if (ret == 0)
			ret = task->tk_status;
	}
	rpc_put_task(task);
out:
	return ret;
}
EXPORT_SYMBOL_GPL(nfs_initiate_pgio);

/**
 * nfs_pgio_error - Clean up from a pageio error
 * @desc: IO descriptor
 * @hdr: pageio header
 */
static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
			  struct nfs_pgio_header *hdr)
{
	set_bit(NFS_IOHDR_REDO, &hdr->flags);
	nfs_pgio_data_destroy(hdr);
	hdr->completion_ops->completion(hdr);
	desc->pg_completion_ops->error_cleanup(&desc->pg_list);
	return -ENOMEM;
}

/**
 * nfs_pgio_release - Release pageio data
 * @calldata: The pageio header to release
 */
static void nfs_pgio_release(void *calldata)
{
	struct nfs_pgio_header *hdr = calldata;
	if (hdr->rw_ops->rw_release)
		hdr->rw_ops->rw_release(hdr);
	nfs_pgio_data_destroy(hdr);
	hdr->completion_ops->completion(hdr);
}

/**
 * nfs_pageio_init - initialise a page io descriptor
 * @desc: pointer to descriptor
 * @inode: pointer to inode
 * @doio: pointer to io function
 * @bsize: io block size
 * @io_flags: extra parameters for the io function
 */
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
		     struct inode *inode,
		     const struct nfs_pageio_ops *pg_ops,
		     const struct nfs_pgio_completion_ops *compl_ops,
		     const struct nfs_rw_ops *rw_ops,
		     size_t bsize,
		     int io_flags)
{
	INIT_LIST_HEAD(&desc->pg_list);
	desc->pg_bytes_written = 0;
	desc->pg_count = 0;
	desc->pg_bsize = bsize;
	desc->pg_base = 0;
	desc->pg_moreio = 0;
	desc->pg_recoalesce = 0;
	desc->pg_inode = inode;
	desc->pg_ops = pg_ops;
	desc->pg_completion_ops = compl_ops;
	desc->pg_rw_ops = rw_ops;
	desc->pg_ioflags = io_flags;
	desc->pg_error = 0;
	desc->pg_lseg = NULL;
	desc->pg_dreq = NULL;
	desc->pg_layout_private = NULL;
}
static int
rbio_submit(struct ploop_io * io, struct nfs_read_data * nreq,
	    const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;
	struct rpc_task *task;

	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};

	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.rpc_message = &msg,
		.callback_ops = cb,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
		.workqueue = nfsio_workqueue,
#endif
		.flags = RPC_TASK_ASYNC,
	};

	nreq->res.count = nreq->args.count;
	nreq->header->cred = msg.rpc_cred;
	nreq->args.context = ctx;

	task_setup_data.task = &nreq->task;
	task_setup_data.callback_data = nreq;
	msg.rpc_argp = &nreq->args;
	msg.rpc_resp = &nreq->res;
	NFS_PROTO(inode)->read_setup(nreq, &msg);

	task = rpc_run_task(&task_setup_data);
	if (unlikely(IS_ERR(task)))
		return PTR_ERR(task);

	rpc_put_task(task);
	return 0;
}

#else

static int
rbio_submit(struct ploop_io * io, struct nfs_read_data * nreq,
	    const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	nreq->res.count = nreq->args.count;
	nreq->cred = ctx->cred;
	nreq->args.context = ctx;

	rpc_init_task(&nreq->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, cb, nreq);
	NFS_PROTO(inode)->read_setup(nreq);

	nreq->task.tk_cookie = (unsigned long) inode;

	lock_kernel();
	rpc_execute(&nreq->task);
	unlock_kernel();
	return 0;
}
#endif

static void
nfsio_submit_read(struct ploop_io *io, struct ploop_request * preq,
		  struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t rsize = NFS_SERVER(inode)->rsize;
	struct nfs_read_data *nreq = NULL;
	loff_t pos;
	unsigned int prev_end;
	struct bio * b;

	ploop_prepare_io_request(preq);

	pos = sbl->head->bi_sector;
	pos = ((loff_t)iblk << preq->plo->cluster_log) | (pos & ((1<<preq->plo->cluster_log) - 1));
	pos <<= 9;

	prev_end = PAGE_SIZE;

	for (b = sbl->head; b != NULL; b = b->bi_next) {
		int bv_idx;

		for (bv_idx = 0; bv_idx < b->bi_vcnt; bv_idx++) {
			struct bio_vec * bv = &b->bi_io_vec[bv_idx];

			if (nreq && nreq->args.count + bv->bv_len <= rsize) {
				if (nreq->pages.pagevec[nreq->pages.npages-1] == bv->bv_page &&
				    prev_end == bv->bv_offset) {
					nreq->args.count += bv->bv_len;
					pos += bv->bv_len;
					prev_end += bv->bv_len;
					continue;
				}
				if (nreq->pages.npages < MAX_NBIO_PAGES &&
				    bv->bv_offset == 0 && prev_end == PAGE_SIZE) {
					nreq->args.count += bv->bv_len;
					nreq->pages.pagevec[nreq->pages.npages] = bv->bv_page;
					nreq->pages.npages++;
					pos += bv->bv_len;
					prev_end = bv->bv_offset + bv->bv_len;
					continue;
				}
			}

			if (nreq) {
				int err;

				atomic_inc(&preq->io_count);

				err = rbio_submit(io, nreq, &nfsio_read_ops);
				if (err) {
					PLOOP_REQ_SET_ERROR(preq, err);
					ploop_complete_io_request(preq);
					goto out;
				}
			}

			nreq = rbio_init(pos, bv->bv_page, bv->bv_offset,
					 bv->bv_len, preq, inode);

			if (nreq == NULL) {
				PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
				goto out;
			}

			pos += bv->bv_len;
			prev_end = bv->bv_offset + bv->bv_len;
		}
	}

	if (nreq) {
		int err;

		atomic_inc(&preq->io_count);

		err = rbio_submit(io, nreq, &nfsio_read_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			ploop_complete_io_request(preq);
			goto out;
		}
	}

out:
	ploop_complete_io_request(preq);
}

static void nfsio_write_result(struct rpc_task *task, void *calldata)
{
	struct nfs_write_data *data = calldata;
	struct nfs_writeargs	*argp = &data->args;
	struct nfs_writeres	*resp = &data->res;
	int status;

	status = NFS_PROTO(data->header->inode)->write_done(task, data);
	if (status != 0)
		return;

	if (task->tk_status >= 0 && resp->count < argp->count)
		task->tk_status = -EIO;
}

static void nfsio_write_release(void *calldata)
{
	struct nfs_write_data *nreq = calldata;
	struct ploop_request *preq = (struct ploop_request *) nreq->header->req;
	int status = nreq->task.tk_status;

	if (unlikely(status < 0))
		PLOOP_REQ_SET_ERROR(preq, status);

	if (!preq->error &&
	    nreq->res.verf->committed != NFS_FILE_SYNC) {
		if (!test_and_set_bit(PLOOP_REQ_UNSTABLE, &preq->state))
			memcpy(&preq->verf, &nreq->res.verf->verifier, 8);
	}
	nfsio_complete_io_request(preq);

	nfsio_wbio_release(calldata);
}

static const struct rpc_call_ops nfsio_write_ops = {
	.rpc_call_done = nfsio_write_result,
	.rpc_release = nfsio_write_release,
};

static struct nfs_write_data *
wbio_init(loff_t pos, struct page * page, unsigned int off, unsigned int len,
	  void * priv, struct inode * inode)
{
	struct nfs_write_data * nreq;

	nreq = nfsio_wbio_alloc(MAX_NBIO_PAGES);
	if (unlikely(nreq == NULL))
		return NULL;

	nreq->args.offset = pos;
	nreq->args.pgbase = off;
	nreq->args.count = len;
	nreq->pages.pagevec[0] = page;
	nreq->pages.npages = 1;
	nreq->header->req = priv;
	nreq->header->inode = inode;
	nreq->args.fh = NFS_FH(inode);
	nreq->args.pages = nreq->pages.pagevec;
	nreq->args.stable = NFS_UNSTABLE;
	nreq->res.fattr = &nreq->fattr;
	nreq->res.verf = &nreq->verf;
	return nreq;
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)

static int wbio_submit(struct ploop_io * io, struct nfs_write_data *nreq,
		       const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};

	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.rpc_message = &msg,
		.callback_ops = cb,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
		.workqueue = nfsio_workqueue,
#endif
		.flags = RPC_TASK_ASYNC,
	};

	if (verify_bounce(nreq))
		return -ENOMEM;

	nreq->res.count = nreq->args.count;
	nreq->args.context = ctx;
	nreq->header->cred = msg.rpc_cred;

	task_setup_data.task = &nreq->task;
	task_setup_data.callback_data = nreq;
	msg.rpc_argp = &nreq->args;
	msg.rpc_resp = &nreq->res;
	NFS_PROTO(inode)->write_setup(nreq, &msg);

	task = rpc_run_task(&task_setup_data);
	if (unlikely(IS_ERR(task)))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

#else

static int wbio_submit(struct ploop_io * io, struct nfs_write_data *nreq,
		       const struct rpc_call_ops * cb)
{
	struct nfs_open_context *ctx = nfs_file_open_context(io->files.file);
	struct inode *inode = io->files.inode;

	if (verify_bounce(nreq))
		return -ENOMEM;

	nreq->res.count = nreq->args.count;
	nreq->args.context = ctx;
	nreq->cred = ctx->cred;

	rpc_init_task(&nreq->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, cb, nreq);
	NFS_PROTO(inode)->write_setup(nreq, NFS_UNSTABLE);

	nreq->task.tk_priority = RPC_PRIORITY_NORMAL;
	nreq->task.tk_cookie = (unsigned long) inode;

	lock_kernel();
	rpc_execute(&nreq->task);
	unlock_kernel();
	return 0;
}

#endif


static void
nfsio_submit_write(struct ploop_io *io, struct ploop_request * preq,
		   struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t wsize = NFS_SERVER(inode)->wsize;
	struct nfs_write_data *nreq = NULL;
	loff_t pos;
	struct bio * b;
	unsigned int prev_end;

	nfsio_prepare_io_request(preq);

	pos = sbl->head->bi_sector;
	pos = ((loff_t)iblk << preq->plo->cluster_log) | (pos & ((1<<preq->plo->cluster_log) - 1));
	ploop_prepare_tracker(preq, pos);
	pos <<= 9;

	prev_end = PAGE_SIZE;

	for (b = sbl->head; b != NULL; b = b->bi_next) {
		int bv_idx;

		for (bv_idx = 0; bv_idx < b->bi_vcnt; bv_idx++) {
			struct bio_vec * bv = &b->bi_io_vec[bv_idx];

			if (nreq && nreq->args.count + bv->bv_len <= wsize) {
				if (nreq->pages.pagevec[nreq->pages.npages-1] == bv->bv_page &&
				    prev_end == bv->bv_offset) {
					nreq->args.count += bv->bv_len;
					pos += bv->bv_len;
					prev_end += bv->bv_len;
					continue;
				}
				if (nreq->pages.npages < MAX_NBIO_PAGES &&
				    bv->bv_offset == 0 && prev_end == PAGE_SIZE) {
					nreq->args.count += bv->bv_len;
					nreq->pages.pagevec[nreq->pages.npages] = bv->bv_page;
					nreq->pages.npages++;
					pos += bv->bv_len;
					prev_end = bv->bv_offset + bv->bv_len;
					continue;
				}
			}

			if (nreq) {
				int err;
				atomic_inc(&preq->io_count);
				err = wbio_submit(io, nreq, &nfsio_write_ops);
				if (err) {
					PLOOP_REQ_SET_ERROR(preq, err);
					nfsio_complete_io_request(preq);
					goto out;
				}
			}

			nreq = wbio_init(pos, bv->bv_page, bv->bv_offset,
					 bv->bv_len, preq, inode);

			if (nreq == NULL) {
				PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
				goto out;
			}

			prev_end = bv->bv_offset + bv->bv_len;
			pos += bv->bv_len;
		}
	}

	if (nreq) {
		int err;
		atomic_inc(&preq->io_count);
		err = wbio_submit(io, nreq, &nfsio_write_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			nfsio_complete_io_request(preq);
		}
	}

out:
	nfsio_complete_io_request(preq);
}

static void
nfsio_submit(struct ploop_io *io, struct ploop_request * preq,
	     unsigned long rw,
	     struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	if (iblk == PLOOP_ZERO_INDEX)
		iblk = 0;

	if (rw & (1<<BIO_RW))
		nfsio_submit_write(io, preq, sbl, iblk, size);
	else
		nfsio_submit_read(io, preq, sbl, iblk, size);
}

struct bio_list_walk
{
	struct bio * cur;
	int idx;
	int bv_off;
};

static void
nfsio_submit_write_pad(struct ploop_io *io, struct ploop_request * preq,
		       struct bio_list *sbl, iblock_t iblk, unsigned int size)
{
	struct inode *inode = io->files.inode;
	size_t wsize = NFS_SERVER(inode)->wsize;
	struct nfs_write_data *nreq = NULL;
	struct bio_list_walk bw;
	unsigned prev_end;

	loff_t pos, end_pos, start, end;

	/* pos..end_pos is the range which we are going to write */
	pos = (loff_t)iblk << (preq->plo->cluster_log + 9);
	end_pos = pos + (1 << (preq->plo->cluster_log + 9));

	/* start..end is data that we have. The rest must be zero padded. */
	start = pos + ((sbl->head->bi_sector & ((1<<preq->plo->cluster_log) - 1)) << 9);
	end = start + (size << 9);

	nfsio_prepare_io_request(preq);
	ploop_prepare_tracker(preq, start >> 9);

	prev_end = PAGE_SIZE;

#if 1
	/* GCC, shut up! */
	bw.cur = sbl->head;
	bw.idx = 0;
	bw.bv_off = 0;
	BUG_ON(bw.cur->bi_io_vec[0].bv_len & 511);
#endif

	while (pos < end_pos) {
		struct page * page;
		unsigned int poff, plen;

		if (pos < start) {
			page = ZERO_PAGE(0);
			poff = 0;
			plen = start - pos;
			if (plen > PAGE_SIZE)
				plen = PAGE_SIZE;
		} else if (pos >= end) {
			page = ZERO_PAGE(0);
			poff = 0;
			plen = end_pos - pos;
			if (plen > PAGE_SIZE)
				plen = PAGE_SIZE;
		} else {
			/* pos >= start && pos < end */
			struct bio_vec * bv;

			if (pos == start) {
				bw.cur = sbl->head;
				bw.idx = 0;
				bw.bv_off = 0;
				BUG_ON(bw.cur->bi_io_vec[0].bv_len & 511);
			}
			bv = bw.cur->bi_io_vec + bw.idx;

			if (bw.bv_off >= bv->bv_len) {
				bw.idx++;
				bv++;
				bw.bv_off = 0;
				if (bw.idx >= bw.cur->bi_vcnt) {
					bw.cur = bw.cur->bi_next;
					bw.idx = 0;
					bw.bv_off = 0;
					bv = bw.cur->bi_io_vec;
				}
				BUG_ON(bv->bv_len & 511);
			}

			page = bv->bv_page;
			poff = bv->bv_offset + bw.bv_off;
			plen = bv->bv_len - bw.bv_off;
		}

		if (nreq && nreq->args.count + plen <= wsize) {
			if (nreq->pages.pagevec[nreq->pages.npages-1] == page &&
			    prev_end == poff) {
				nreq->args.count += plen;
				pos += plen;
				bw.bv_off += plen;
				prev_end += plen;
				continue;
			}
			if (nreq->pages.npages < MAX_NBIO_PAGES &&
			    poff == 0 && prev_end == PAGE_SIZE) {
				nreq->args.count += plen;
				nreq->pages.pagevec[nreq->pages.npages] = page;
				nreq->pages.npages++;
				pos += plen;
				bw.bv_off += plen;
				prev_end = poff + plen;
				continue;
			}
		}

		if (nreq) {
			int err;
			atomic_inc(&preq->io_count);
			err = wbio_submit(io, nreq, &nfsio_write_ops);
			if (err) {
				PLOOP_REQ_SET_ERROR(preq, err);
				nfsio_complete_io_request(preq);
				goto out;
			}
		}

		nreq = wbio_init(pos, page, poff, plen, preq, inode);

		if (nreq == NULL) {
			PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
			goto out;
		}

		prev_end = poff + plen;
		pos += plen;
		bw.bv_off += plen;
	}

	if (nreq) {
		int err;
		atomic_inc(&preq->io_count);
		err = wbio_submit(io, nreq, &nfsio_write_ops);
		if (err) {
			PLOOP_REQ_SET_ERROR(preq, err);
			nfsio_complete_io_request(preq);
		}
	}

out:
	nfsio_complete_io_request(preq);
}


static void
nfsio_submit_alloc(struct ploop_io *io, struct ploop_request * preq,
		 struct bio_list * sbl, unsigned int size)
{
	iblock_t iblk = io->alloc_head++;

	if (!(io->files.file->f_mode & FMODE_WRITE)) {
		PLOOP_FAIL_REQUEST(preq, -EBADF);
		return;
	}
	preq->iblock = iblk;
	preq->eng_state = PLOOP_E_DATA_WBI;

	nfsio_submit_write_pad(io, preq, sbl, iblk, size);
}

static void nfsio_destroy(struct ploop_io * io)
{
	if (io->fsync_thread) {
		kthread_stop(io->fsync_thread);
		io->fsync_thread = NULL;
	}

	if (io->files.file) {
		struct file * file = io->files.file;
		mutex_lock(&io->plo->sysfs_mutex);
		io->files.file = NULL;
		if (io->files.mapping)
			(void)invalidate_inode_pages2(io->files.mapping);
		mutex_unlock(&io->plo->sysfs_mutex);
		fput(file);
	}
}

static int nfsio_sync(struct ploop_io * io)
{
	return 0;
}

static int nfsio_stop(struct ploop_io * io)
{
	return 0;
}


static int
nfsio_init(struct ploop_io * io)
{
	INIT_LIST_HEAD(&io->fsync_queue);
	init_waitqueue_head(&io->fsync_waitq);
	return 0;
}
Example #11
0
int
nfs_sillyrename(struct inode *dir, struct dentry *dentry)
{
	static unsigned int sillycounter;
	const int      fileidsize  = sizeof(NFS_FILEID(dentry->d_inode))*2;
	const int      countersize = sizeof(sillycounter)*2;
	const int      slen        = sizeof(".nfs")+fileidsize+countersize-1;
	char           silly[slen+1];
	struct dentry *sdentry;
	struct rpc_task *task;
	int            error = -EIO;

	dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
		dentry->d_parent->d_name.name, dentry->d_name.name,
		dentry->d_count);
	nfs_inc_stats(dir, NFSIOS_SILLYRENAME);

	error = -EBUSY;
	if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
		goto out;

	sprintf(silly, ".nfs%*.*Lx",
		fileidsize, fileidsize,
		(unsigned long long)NFS_FILEID(dentry->d_inode));

	
	nfs_inode_return_delegation(dentry->d_inode);

	sdentry = NULL;
	do {
		char *suffix = silly + slen - countersize;

		dput(sdentry);
		sillycounter++;
		sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);

		dfprintk(VFS, "NFS: trying to rename %s to %s\n",
				dentry->d_name.name, silly);

		sdentry = lookup_one_len(silly, dentry->d_parent, slen);
		if (IS_ERR(sdentry))
			goto out;
	} while (sdentry->d_inode != NULL); 

	error = nfs_async_unlink(dir, dentry);
	if (error)
		goto out_dput;

	
	error = nfs_copy_dname(sdentry,
				(struct nfs_unlinkdata *)dentry->d_fsdata);
	if (error) {
		nfs_cancel_async_unlink(dentry);
		goto out_dput;
	}

	
	task = nfs_async_rename(dir, dir, dentry, sdentry);
	if (IS_ERR(task)) {
		error = -EBUSY;
		nfs_cancel_async_unlink(dentry);
		goto out_dput;
	}

	
	error = rpc_wait_for_completion_task(task);
	if (error == 0)
		error = task->tk_status;
	switch (error) {
	case 0:
		/* The rename succeeded */
		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
		d_move(dentry, sdentry);
		break;
	case -ERESTARTSYS:
		/* The result of the rename is unknown. Play it safe by
		 * forcing a new lookup */
		d_drop(dentry);
		d_drop(sdentry);
	}
	rpc_put_task(task);
out_dput:
	dput(sdentry);
out:
	return error;
}