Exemple #1
0
/*
 * Decrement the user count and bring down lockd if we're the last.
 */
void
lockd_down(struct net *net)
{
	mutex_lock(&nlmsvc_mutex);
	lockd_down_net(nlmsvc_rqst->rq_server, net);
	if (nlmsvc_users) {
		if (--nlmsvc_users)
			goto out;
	} else {
		printk(KERN_ERR "lockd_down: no users! task=%p\n",
			nlmsvc_task);
		BUG();
	}

	if (!nlmsvc_task) {
		printk(KERN_ERR "lockd_down: no lockd running.\n");
		BUG();
	}
	kthread_stop(nlmsvc_task);
	dprintk("lockd_down: service stopped\n");
	svc_exit_thread(nlmsvc_rqst);
	dprintk("lockd_down: service destroyed\n");
	nlmsvc_task = NULL;
	nlmsvc_rqst = NULL;
out:
	mutex_unlock(&nlmsvc_mutex);
}
Exemple #2
0
/*
 * Create a server thread
 */
int
svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
{
	struct svc_rqst	*rqstp;
	int		error = -ENOMEM;

	rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL);
	if (!rqstp)
		goto out;

	memset(rqstp, 0, sizeof(*rqstp));
	if (!(rqstp->rq_argp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
	 || !(rqstp->rq_resp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
	 || !svc_init_buffer(&rqstp->rq_defbuf, serv->sv_bufsz))
		goto out_thread;

	serv->sv_nrthreads++;
	rqstp->rq_server = serv;
	error = kernel_thread((int (*)(void *)) func, rqstp, 0);
	if (error < 0)
		goto out_thread;
	error = 0;
out:
	return error;

out_thread:
	svc_exit_thread(rqstp);
	goto out;
}
Exemple #3
0
/*
 * Decrement the user count and bring down lockd if we're the last.
 */
void
lockd_down(void)
{
	mutex_lock(&nlmsvc_mutex);
	if (nlmsvc_users) {
		if (--nlmsvc_users) {
			lockd_down_net(current->nsproxy->net_ns);
			goto out;
		}
	} else {
		printk(KERN_ERR "lockd_down: no users! task=%p\n",
			nlmsvc_task);
		BUG();
	}

	if (!nlmsvc_task) {
		printk(KERN_ERR "lockd_down: no lockd running.\n");
		BUG();
	}
	kthread_stop(nlmsvc_task);
	svc_exit_thread(nlmsvc_rqst);
	nlmsvc_task = NULL;
	nlmsvc_rqst = NULL;
out:
	mutex_unlock(&nlmsvc_mutex);
}
Exemple #4
0
/*
 * Create a server thread
 */
int
svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
{
	struct svc_rqst	*rqstp;
	int		error = -ENOMEM;

	rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
	if (!rqstp)
		goto out;

	init_waitqueue_head(&rqstp->rq_wait);

	if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
	 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
	 || !svc_init_buffer(rqstp, serv->sv_bufsz))
		goto out_thread;

	serv->sv_nrthreads++;
	rqstp->rq_server = serv;
	error = kernel_thread((int (*)(void *)) func, rqstp, 0);
	if (error < 0)
		goto out_thread;
	svc_sock_update_bufs(serv);
	error = 0;
out:
	return error;

out_thread:
	svc_exit_thread(rqstp);
	goto out;
}
Exemple #5
0
/*
 * Bring up the callback thread if it is not already up.
 */
int nfs_callback_up(void)
{
	struct svc_serv *serv = NULL;
	int ret = 0;

	mutex_lock(&nfs_callback_mutex);
	if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
		goto out;
	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE,
				nfs_callback_family, NULL);
	ret = -ENOMEM;
	if (!serv)
		goto out_err;

	ret = svc_create_xprt(serv, "tcp", nfs_callback_set_tcpport,
			      SVC_SOCK_ANONYMOUS);
	if (ret <= 0)
		goto out_err;
	nfs_callback_tcpport = ret;
	dprintk("NFS: Callback listener port = %u (af %u)\n",
			nfs_callback_tcpport, nfs_callback_family);

	nfs_callback_info.rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
	if (IS_ERR(nfs_callback_info.rqst)) {
		ret = PTR_ERR(nfs_callback_info.rqst);
		nfs_callback_info.rqst = NULL;
		goto out_err;
	}

	svc_sock_update_bufs(serv);

	nfs_callback_info.task = kthread_run(nfs_callback_svc,
					     nfs_callback_info.rqst,
					     "nfsv4-svc");
	if (IS_ERR(nfs_callback_info.task)) {
		ret = PTR_ERR(nfs_callback_info.task);
		svc_exit_thread(nfs_callback_info.rqst);
		nfs_callback_info.rqst = NULL;
		nfs_callback_info.task = NULL;
		goto out_err;
	}
out:
	/*
	 * svc_create creates the svc_serv with sv_nrthreads == 1, and then
	 * svc_prepare_thread increments that. So we need to call svc_destroy
	 * on both success and failure so that the refcount is 1 when the
	 * thread exits.
	 */
	if (serv)
		svc_destroy(serv);
	mutex_unlock(&nfs_callback_mutex);
	return ret;
out_err:
	dprintk("NFS: Couldn't create callback socket or server thread; "
		"err = %d\n", ret);
	nfs_callback_info.users--;
	goto out;
}
Exemple #6
0
/*
 * Create or destroy enough new threads to make the number
 * of threads the given number.  If `pool' is non-NULL, applies
 * only to threads in that pool, otherwise round-robins between
 * all pools.  Caller must ensure that mutual exclusion between this and
 * server startup or shutdown.
 *
 * Destroying threads relies on the service threads filling in
 * rqstp->rq_task, which only the nfs ones do.  Assumes the serv
 * has been created using svc_create_pooled().
 *
 * Based on code that used to be in nfsd_svc() but tweaked
 * to be pool-aware.
 */
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
	struct svc_rqst	*rqstp;
	struct task_struct *task;
	struct svc_pool *chosen_pool;
	int error = 0;
	unsigned int state = serv->sv_nrthreads-1;
	int node;

	if (pool == NULL) {
		/* The -1 assumes caller has done a svc_get() */
		nrservs -= (serv->sv_nrthreads-1);
	} else {
		spin_lock_bh(&pool->sp_lock);
		nrservs -= pool->sp_nrthreads;
		spin_unlock_bh(&pool->sp_lock);
	}

	/* create new threads */
	while (nrservs > 0) {
		nrservs--;
		chosen_pool = choose_pool(serv, pool, &state);

		node = svc_pool_map_get_node(chosen_pool->sp_id);
		rqstp = svc_prepare_thread(serv, chosen_pool, node);
		if (IS_ERR(rqstp)) {
			error = PTR_ERR(rqstp);
			break;
		}

		__module_get(serv->sv_ops->svo_module);
		task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
					      node, "%s", serv->sv_name);
		if (IS_ERR(task)) {
			error = PTR_ERR(task);
			module_put(serv->sv_ops->svo_module);
			svc_exit_thread(rqstp);
			break;
		}

		rqstp->rq_task = task;
		if (serv->sv_nrpools > 1)
			svc_pool_map_set_cpumask(task, chosen_pool->sp_id);

		svc_sock_update_bufs(serv);
		wake_up_process(task);
	}
	/* destroy old threads */
	while (nrservs < 0 &&
	       (task = choose_victim(serv, pool, &state)) != NULL) {
		send_sig(SIGINT, task, 1);
		nrservs++;
	}

	return error;
}
Exemple #7
0
/*
 * Kill the callback thread if it's no longer being used.
 */
void nfs_callback_down(void)
{
	mutex_lock(&nfs_callback_mutex);
	nfs_callback_info.users--;
	if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL) {
		kthread_stop(nfs_callback_info.task);
		svc_exit_thread(nfs_callback_info.rqst);
		nfs_callback_info.rqst = NULL;
		nfs_callback_info.task = NULL;
	}
	mutex_unlock(&nfs_callback_mutex);
}
Exemple #8
0
/*
 * This is the callback kernel thread.
 */
static void nfs_callback_svc(struct svc_rqst *rqstp)
{
    struct svc_serv *serv = rqstp->rq_server;
    int err;

    __module_get(THIS_MODULE);
    lock_kernel();

    nfs_callback_info.pid = current->pid;
    daemonize("nfsv4-svc");
    /* Process request with signals blocked, but allow SIGKILL.  */
    allow_signal(SIGKILL);

    complete(&nfs_callback_info.started);

    for(;;) {
        if (signalled()) {
            if (nfs_callback_info.users == 0)
                break;
            flush_signals(current);
        }
        /*
         * Listen for a request on the socket
         */
        err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT);
        if (err == -EAGAIN || err == -EINTR)
            continue;
        if (err < 0) {
            printk(KERN_WARNING
                   "%s: terminating on error %d\n",
                   __FUNCTION__, -err);
            break;
        }
        dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__,
                NIPQUAD(rqstp->rq_addr.sin_addr.s_addr));
        svc_process(serv, rqstp);
    }
    flush_signals(current);
    svc_exit_thread(rqstp);
    nfs_callback_info.pid = 0;
    complete(&nfs_callback_info.stopped);
    unlock_kernel();
    module_put_and_exit(0);
}
Exemple #9
0
static int lockd_start_svc(struct svc_serv *serv)
{
	int error;

	if (nlmsvc_rqst)
		return 0;

	/*
	 * Create the kernel thread and wait for it to start.
	 */
	nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
	if (IS_ERR(nlmsvc_rqst)) {
		error = PTR_ERR(nlmsvc_rqst);
		printk(KERN_WARNING
			"lockd_up: svc_rqst allocation failed, error=%d\n",
			error);
		goto out_rqst;
	}

	svc_sock_update_bufs(serv);
	serv->sv_maxconn = nlm_max_connections;

	nlmsvc_task = kthread_create(lockd, nlmsvc_rqst, "%s", serv->sv_name);
	if (IS_ERR(nlmsvc_task)) {
		error = PTR_ERR(nlmsvc_task);
		printk(KERN_WARNING
			"lockd_up: kthread_run failed, error=%d\n", error);
		goto out_task;
	}
	nlmsvc_rqst->rq_task = nlmsvc_task;
	wake_up_process(nlmsvc_task);

	dprintk("lockd_up: service started\n");
	return 0;

out_task:
	svc_exit_thread(nlmsvc_rqst);
	nlmsvc_task = NULL;
out_rqst:
	nlmsvc_rqst = NULL;
	return error;
}
Exemple #10
0
/*
 * This is the NFSv4 callback kernel thread.
 */
static int
nfs4_callback_svc(void *vrqstp)
{
	int err;
	struct svc_rqst *rqstp = vrqstp;

	set_freezable();

	while (!kthread_freezable_should_stop(NULL)) {

		if (signal_pending(current))
			flush_signals(current);
		/*
		 * Listen for a request on the socket
		 */
		err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		svc_process(rqstp);
	}
	svc_exit_thread(rqstp);
	module_put_and_exit(0);
	return 0;
}
Exemple #11
0
/*
 * Bring up the lockd process if it's not already up.
 */
int lockd_up(void)
{
	struct svc_serv *serv;
	int		error = 0;

	mutex_lock(&nlmsvc_mutex);
	/*
	 * Check whether we're already up and running.
	 */
	if (nlmsvc_rqst)
		goto out;

	/*
	 * Sanity check: if there's no pid,
	 * we should be the first user ...
	 */
	if (nlmsvc_users)
		printk(KERN_WARNING
			"lockd_up: no pid, %d users??\n", nlmsvc_users);

	error = -ENOMEM;
	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
	if (!serv) {
		printk(KERN_WARNING "lockd_up: create service failed\n");
		goto out;
	}

	error = make_socks(serv);
	if (error < 0)
		goto destroy_and_out;

	/*
	 * Create the kernel thread and wait for it to start.
	 */
	nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
	if (IS_ERR(nlmsvc_rqst)) {
		error = PTR_ERR(nlmsvc_rqst);
		nlmsvc_rqst = NULL;
		printk(KERN_WARNING
			"lockd_up: svc_rqst allocation failed, error=%d\n",
			error);
		goto destroy_and_out;
	}

	svc_sock_update_bufs(serv);
	serv->sv_maxconn = nlm_max_connections;

	nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
	if (IS_ERR(nlmsvc_task)) {
		error = PTR_ERR(nlmsvc_task);
		svc_exit_thread(nlmsvc_rqst);
		nlmsvc_task = NULL;
		nlmsvc_rqst = NULL;
		printk(KERN_WARNING
			"lockd_up: kthread_run failed, error=%d\n", error);
		goto destroy_and_out;
	}

	/*
	 * Note: svc_serv structures have an initial use count of 1,
	 * so we exit through here on both success and failure.
	 */
destroy_and_out:
	svc_destroy(serv);
out:
	if (!error)
		nlmsvc_users++;
	mutex_unlock(&nlmsvc_mutex);
	return error;
}
Exemple #12
0
/*
 * This is the lockd kernel thread
 */
static void
lockd(struct svc_rqst *rqstp)
{
	int		err = 0;
	unsigned long grace_period_expire;

	/* Lock module and set up kernel thread */
	/* lockd_up is waiting for us to startup, so will
	 * be holding a reference to this module, so it
	 * is safe to just claim another reference
	 */
	__module_get(THIS_MODULE);
	lock_kernel();

	/*
	 * Let our maker know we're running.
	 */
	nlmsvc_pid = current->pid;
	nlmsvc_serv = rqstp->rq_server;
	complete(&lockd_start_done);

	daemonize("lockd");
	set_freezable();

	/* Process request with signals blocked, but allow SIGKILL.  */
	allow_signal(SIGKILL);

	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

	if (!nlm_timeout)
		nlm_timeout = LOCKD_DFLT_TIMEO;
	nlmsvc_timeout = nlm_timeout * HZ;

	grace_period_expire = set_grace_period();

	/*
	 * The main request loop. We don't terminate until the last
	 * NFS mount or NFS daemon has gone away, and we've been sent a
	 * signal, or else another process has taken over our job.
	 */
	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) {
		long timeout = MAX_SCHEDULE_TIMEOUT;
		char buf[RPC_MAX_ADDRBUFLEN];

		if (signalled()) {
			flush_signals(current);
			if (nlmsvc_ops) {
				nlmsvc_invalidate_all();
				grace_period_expire = set_grace_period();
			}
		}

		/*
		 * Retry any blocked locks that have been notified by
		 * the VFS. Don't do this during grace period.
		 * (Theoretically, there shouldn't even be blocked locks
		 * during grace period).
		 */
		if (!nlmsvc_grace_period) {
			timeout = nlmsvc_retry_blocked();
		} else if (time_before(grace_period_expire, jiffies))
			clear_grace_period();

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		err = svc_recv(rqstp, timeout);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		if (err < 0) {
			printk(KERN_WARNING
			       "lockd: terminating on error %d\n",
			       -err);
			break;
		}

		dprintk("lockd: request from %s\n",
				svc_print_addr(rqstp, buf, sizeof(buf)));

		svc_process(rqstp);
	}

	flush_signals(current);

	/*
	 * Check whether there's a new lockd process before
	 * shutting down the hosts and clearing the slot.
	 */
	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
		if (nlmsvc_ops)
			nlmsvc_invalidate_all();
		nlm_shutdown_hosts();
		nlmsvc_pid = 0;
		nlmsvc_serv = NULL;
	} else
		printk(KERN_DEBUG
			"lockd: new process, skipping host shutdown\n");
	wake_up(&lockd_exit);

	/* Exit the RPC thread */
	svc_exit_thread(rqstp);

	/* Release module */
	unlock_kernel();
	module_put_and_exit(0);
}
Exemple #13
0
/*
 * This is the lockd kernel thread
 */
static void
lockd(struct svc_rqst *rqstp)
{
	struct svc_serv	*serv = rqstp->rq_server;
	int		err = 0;
	unsigned long grace_period_expire;

	/* Lock module and set up kernel thread */
	MOD_INC_USE_COUNT;
	lock_kernel();

	/*
	 * Let our maker know we're running.
	 */
	nlmsvc_pid = current->pid;
	up(&lockd_start);

	daemonize();
	reparent_to_init();
	sprintf(current->comm, "lockd");

	/* Process request with signals blocked.  */
	spin_lock_irq(&current->sighand->siglock);
	siginitsetinv(&current->blocked, sigmask(SIGKILL));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/* kick rpciod */
	rpciod_up();

	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

	if (!nlm_timeout)
		nlm_timeout = LOCKD_DFLT_TIMEO;
	nlmsvc_timeout = nlm_timeout * HZ;

	grace_period_expire = set_grace_period();

	/*
	 * The main request loop. We don't terminate until the last
	 * NFS mount or NFS daemon has gone away, and we've been sent a
	 * signal, or else another process has taken over our job.
	 */
	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid)
	{
		long timeout = MAX_SCHEDULE_TIMEOUT;
		if (signalled()) {
			spin_lock_irq(&current->sighand->siglock);
			flush_signals(current);
			spin_unlock_irq(&current->sighand->siglock);
			if (nlmsvc_ops) {
				nlmsvc_ops->detach();
				grace_period_expire = set_grace_period();
			}
		}

		/*
		 * Retry any blocked locks that have been notified by
		 * the VFS. Don't do this during grace period.
		 * (Theoretically, there shouldn't even be blocked locks
		 * during grace period).
		 */
		if (!nlmsvc_grace_period)
			timeout = nlmsvc_retry_blocked();

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		err = svc_recv(serv, rqstp, timeout);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		if (err < 0) {
			printk(KERN_WARNING
			       "lockd: terminating on error %d\n",
			       -err);
			break;
		}

		dprintk("lockd: request from %08x\n",
			(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));

		/*
		 * Look up the NFS client handle. The handle is needed for
		 * all but the GRANTED callback RPCs.
		 */
		rqstp->rq_client = NULL;
		if (nlmsvc_ops) {
			nlmsvc_ops->exp_readlock();
			rqstp->rq_client =
				nlmsvc_ops->exp_getclient(&rqstp->rq_addr);
		}

		if (nlmsvc_grace_period &&
		    time_before(grace_period_expire, jiffies))
			nlmsvc_grace_period = 0;
		svc_process(serv, rqstp);

		/* Unlock export hash tables */
		if (nlmsvc_ops)
			nlmsvc_ops->exp_unlock();
	}

	/*
	 * Check whether there's a new lockd process before
	 * shutting down the hosts and clearing the slot.
	 */
	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
		if (nlmsvc_ops)
			nlmsvc_ops->detach();
		nlm_shutdown_hosts();
		nlmsvc_pid = 0;
	} else
		printk(KERN_DEBUG
			"lockd: new process, skipping host shutdown\n");
	wake_up(&lockd_exit);
		
	/* Exit the RPC thread */
	svc_exit_thread(rqstp);

	/* release rpciod */
	rpciod_down();

	/* Release module */
	MOD_DEC_USE_COUNT;
}
Exemple #14
0
int lockd_up(struct net *net)
{
    struct svc_serv *serv;
    int		error = 0;
    struct lockd_net *ln = net_generic(net, lockd_net_id);

    mutex_lock(&nlmsvc_mutex);
    if (nlmsvc_rqst) {
        error = lockd_up_net(nlmsvc_rqst->rq_server, net);
        goto out;
    }

    if (nlmsvc_users)
        printk(KERN_WARNING
               "lockd_up: no pid, %d users??\n", nlmsvc_users);

    error = -ENOMEM;
    serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
    if (!serv) {
        printk(KERN_WARNING "lockd_up: create service failed\n");
        goto out;
    }

    error = svc_bind(serv, net);
    if (error < 0) {
        printk(KERN_WARNING "lockd_up: bind service failed\n");
        goto destroy_and_out;
    }

    ln->nlmsvc_users++;

    error = make_socks(serv, net);
    if (error < 0)
        goto err_start;

    nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
    if (IS_ERR(nlmsvc_rqst)) {
        error = PTR_ERR(nlmsvc_rqst);
        nlmsvc_rqst = NULL;
        printk(KERN_WARNING
               "lockd_up: svc_rqst allocation failed, error=%d\n",
               error);
        goto err_start;
    }

    svc_sock_update_bufs(serv);
    serv->sv_maxconn = nlm_max_connections;

    nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
    if (IS_ERR(nlmsvc_task)) {
        error = PTR_ERR(nlmsvc_task);
        svc_exit_thread(nlmsvc_rqst);
        nlmsvc_task = NULL;
        nlmsvc_rqst = NULL;
        printk(KERN_WARNING
               "lockd_up: kthread_run failed, error=%d\n", error);
        goto err_start;
    }

destroy_and_out:
    svc_destroy(serv);
out:
    if (!error)
        nlmsvc_users++;
    mutex_unlock(&nlmsvc_mutex);
    return error;

err_start:
    lockd_down_net(serv, net);
    goto destroy_and_out;
}
/*
 * This is the NFS server kernel thread
 */
static void
nfsd(struct svc_rqst *rqstp)
{
	struct svc_serv	*serv = rqstp->rq_server;
	struct fs_struct *fsp;
	int		err;
	struct nfsd_list me;
	sigset_t shutdown_mask, allowed_mask;

	/* Lock module and set up kernel thread */
	lock_kernel();
	daemonize("nfsd");
	current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;

	/* After daemonize() this kernel thread shares current->fs
	 * with the init process. We need to create files with a
	 * umask of 0 instead of init's umask. */
	fsp = copy_fs_struct(current->fs);
	if (!fsp) {
		printk("Unable to start nfsd thread: out of memory\n");
		goto out;
	}
	exit_fs(current);
	current->fs = fsp;
	current->fs->umask = 0;

	siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
	siginitsetinv(&allowed_mask, ALLOWED_SIGS);

	nfsdstats.th_cnt++;

	lockd_up();				/* start lockd */

	me.task = current;
	list_add(&me.list, &nfsd_list);

	unlock_kernel();

	/*
	 * We want less throttling in balance_dirty_pages() so that nfs to
	 * localhost doesn't cause nfsd to lock up due to all the client's
	 * dirty pages.
	 */
	current->flags |= PF_LESS_THROTTLE;

	/*
	 * The main request loop
	 */
	for (;;) {
		/* Block all but the shutdown signals */
		sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		while ((err = svc_recv(serv, rqstp,
				       60*60*HZ)) == -EAGAIN)
			;
		if (err < 0)
			break;
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_inc(&nfsd_busy);

		/* Lock the export hash tables for reading. */
		exp_readlock();

		/* Process request with signals blocked.  */
		sigprocmask(SIG_SETMASK, &allowed_mask, NULL);

		svc_process(serv, rqstp);

		/* Unlock export hash tables */
		exp_readunlock();
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_dec(&nfsd_busy);
	}

	if (err != -EINTR) {
		printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
	} else {
		unsigned int	signo;

		for (signo = 1; signo <= _NSIG; signo++)
			if (sigismember(&current->pending.signal, signo) &&
			    !sigismember(&current->blocked, signo))
				break;
		err = signo;
	}

	lock_kernel();

	/* Release lockd */
	lockd_down();

	/* Check if this is last thread */
	if (serv->sv_nrthreads==1) {
		
		printk(KERN_WARNING "nfsd: last server has exited\n");
		if (err != SIG_NOCLEAN) {
			printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
			nfsd_export_flush();
		}
		nfsd_serv = NULL;
	        nfsd_racache_shutdown();	/* release read-ahead cache */
		nfs4_state_shutdown();
	}
	list_del(&me.list);
	nfsdstats.th_cnt --;

out:
	/* Release the thread */
	svc_exit_thread(rqstp);

	/* Release module */
	module_put_and_exit(0);
}