Esempio n. 1
0
/*
 * 2.	Allocate the buffer. For details, see sched.c:rpc_malloc.
 *	(Note: buffer memory is freed in rpc_task_release).
 */
static void
call_allocate(struct rpc_task *task)
{
	struct rpc_clnt	*clnt = task->tk_client;
	unsigned int	bufsiz;

	dprintk("RPC: %4d call_allocate (status %d)\n", 
				task->tk_pid, task->tk_status);
	task->tk_action = call_encode;
	if (task->tk_buffer)
		return;

	/* FIXME: compute buffer requirements more exactly using
	 * auth->au_wslack */
	bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;

	if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL)
		return;
	printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 

	if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
		xprt_release(task);
		task->tk_action = call_reserve;
		rpc_delay(task, HZ>>4);
		return;
	}
Esempio n. 2
0
/*
 * User-visible entry point to the scheduler.
 * The recursion protection is for debugging. It should go away once
 * the code has stabilized.
 */
void
rpc_execute(struct rpc_task *task)
{
	static int	executing = 0;
	int		incr = RPC_IS_ASYNC(task)? 1 : 0;

	if (incr) {
		if (rpc_inhibit) {
			printk(KERN_INFO "RPC: execution inhibited!\n");
			return;
		}
		if (executing)
			printk(KERN_WARNING "RPC: %d tasks executed\n", executing);
	}
	
	executing += incr;
	__rpc_execute(task);
	executing -= incr;
}
Esempio n. 3
0
/**
 * xprt_rdma_connect - try to establish a transport connection
 * @xprt: transport state
 * @task: RPC scheduler context
 *
 */
static void
xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);

	if (r_xprt->rx_ep.rep_connected != 0) {
		/* Reconnect */
		schedule_delayed_work(&r_xprt->rx_connect_worker,
				      xprt->reestablish_timeout);
		xprt->reestablish_timeout <<= 1;
		if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO)
			xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO;
		else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
			xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
	} else {
		schedule_delayed_work(&r_xprt->rx_connect_worker, 0);
		if (!RPC_IS_ASYNC(task))
			flush_delayed_work(&r_xprt->rx_connect_worker);
	}
}
Esempio n. 4
0
/*
 * Make an RPC task runnable.
 *
 * Note: If the task is ASYNC, this must be called with 
 * interrupts disabled to protect the wait queue operation.
 */
static inline void
rpc_make_runnable(struct rpc_task *task)
{
	if (task->tk_timeout) {
		printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n");
		return;
	}
	if (RPC_IS_ASYNC(task)) {
		int status;
		status = rpc_add_wait_queue(&schedq, task);
		if (status)
		{
			printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
			task->tk_status = status;
		}
		wake_up(&rpciod_idle);
	} else {
		wake_up(&task->tk_wait);
	}
	task->tk_flags |= RPC_TASK_RUNNING;
}
Esempio n. 5
0
/**
 * xs_connect - connect a socket to a remote endpoint
 * @task: address of RPC task that manages state of connect request
 *
 * TCP: If the remote end dropped the connection, delay reconnecting.
 *
 * UDP socket connects are synchronous, but we use a work queue anyway
 * to guarantee that even unprivileged user processes can set up a
 * socket on a privileged port.
 *
 * If a UDP socket connect fails, the delay behavior here prevents
 * retry floods (hard mounts).
 */
static void xs_connect(struct rpc_task *task)
{
	struct rpc_xprt *xprt = task->tk_xprt;

	if (xprt_test_and_set_connecting(xprt))
		return;

	if (xprt->sock != NULL) {
		dprintk("RPC:      xs_connect delayed xprt %p for %lu seconds\n",
				xprt, xprt->reestablish_timeout / HZ);
		schedule_delayed_work(&xprt->connect_worker,
					xprt->reestablish_timeout);
		xprt->reestablish_timeout <<= 1;
		if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
	} else {
		dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
		schedule_work(&xprt->connect_worker);

		/* flush_scheduled_work can sleep... */
		if (!RPC_IS_ASYNC(task))
			flush_scheduled_work();
	}
}
Esempio n. 6
0
/*
 * This function returns all RDMA resources to the pool.
 */
static void
xprt_rdma_free(void *buffer)
{
	struct rpcrdma_req *req;
	struct rpcrdma_xprt *r_xprt;
	struct rpcrdma_regbuf *rb;

	if (buffer == NULL)
		return;

	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
	req = rb->rg_owner;
	if (req->rl_backchannel)
		return;

	r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);

	dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);

	r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req,
					    !RPC_IS_ASYNC(req->rl_task));

	rpcrdma_buffer_put(req);
}
Esempio n. 7
0
/*
 * This is the RPC `scheduler' (or rather, the finite state machine).
 */
static int
__rpc_execute(struct rpc_task *task)
{
	unsigned long	oldflags;
	int		status = 0;

	dprintk("RPC: %4d rpc_execute flgs %x\n",
				task->tk_pid, task->tk_flags);

	if (!RPC_IS_RUNNING(task)) {
		printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n");
		return 0;
	}

	while (1) {
		/*
		 * Execute any pending callback.
		 */
		if (task->tk_flags & RPC_TASK_CALLBACK) {
			/* Define a callback save pointer */
			void (*save_callback)(struct rpc_task *);
	
			task->tk_flags &= ~RPC_TASK_CALLBACK;
			/* 
			 * If a callback exists, save it, reset it,
			 * call it.
			 * The save is needed to stop from resetting
			 * another callback set within the callback handler
			 * - Dave
			 */
			if (task->tk_callback) {
				save_callback=task->tk_callback;
				task->tk_callback=NULL;
				save_callback(task);
			}
		}

		/*
		 * No handler for next step means exit.
		 */
		if (!task->tk_action)
			break;

		/*
		 * Perform the next FSM step.
		 * tk_action may be NULL when the task has been killed
		 * by someone else.
		 */
		if (RPC_IS_RUNNING(task) && task->tk_action)
			task->tk_action(task);

		/*
		 * Check whether task is sleeping.
		 * Note that if the task may go to sleep in tk_action,
		 * and the RPC reply arrives before we get here, it will
		 * have state RUNNING, but will still be on schedq.
		 */
		save_flags(oldflags); cli();
		if (RPC_IS_RUNNING(task)) {
			if (task->tk_rpcwait == &schedq)
				rpc_remove_wait_queue(task);
		} else while (!RPC_IS_RUNNING(task)) {
			if (RPC_IS_ASYNC(task)) {
				restore_flags(oldflags);
				return 0;
			}

			/* sync task: sleep here */
			dprintk("RPC: %4d sync task going to sleep\n",
							task->tk_pid);
			if (current->pid == rpciod_pid)
				printk(KERN_ERR "RPC: rpciod waiting on sync task!\n");
			sleep_on(&task->tk_wait);

			/*
			 * When the task received a signal, remove from
			 * any queues etc, and make runnable again.
			 */
			if (signalled())
				__rpc_wake_up(task);

			dprintk("RPC: %4d sync task resuming\n",
							task->tk_pid);
		}
		restore_flags(oldflags);

		/*
		 * When a sync task receives a signal, it exits with
		 * -ERESTARTSYS. In order to catch any callbacks that
		 * clean up after sleeping on some queue, we don't
		 * break the loop here, but go around once more.
		 */
		if (!RPC_IS_ASYNC(task) && signalled()) {
			dprintk("RPC: %4d got signal\n", task->tk_pid);
			rpc_exit(task, -ERESTARTSYS);
		}
	}

	dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
	if (task->tk_exit) {
		status = task->tk_status;
		task->tk_exit(task);
	}

	return status;
}