static int cryptomgr_test_cipher(void *data)
{
	struct crypto_test_param *param = data;
	u32 type = param->type;

	ocrypto_alg_tested(param->driver,
			   alg_test(param->driver, param->alg, type,
				    NCRYPTO_ALG_TESTED));

	kfree(param);
	module_put_and_exit(0);
}
Example #2
0
/*
 * This is the callback kernel thread.
 */
static void nfs_callback_svc(struct svc_rqst *rqstp)
{
    struct svc_serv *serv = rqstp->rq_server;
    int err;

    __module_get(THIS_MODULE);
    lock_kernel();

    nfs_callback_info.pid = current->pid;
    daemonize("nfsv4-svc");
    /* Process request with signals blocked, but allow SIGKILL.  */
    allow_signal(SIGKILL);

    complete(&nfs_callback_info.started);

    for(;;) {
        if (signalled()) {
            if (nfs_callback_info.users == 0)
                break;
            flush_signals(current);
        }
        /*
         * Listen for a request on the socket
         */
        err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT);
        if (err == -EAGAIN || err == -EINTR)
            continue;
        if (err < 0) {
            printk(KERN_WARNING
                   "%s: terminating on error %d\n",
                   __FUNCTION__, -err);
            break;
        }
        dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__,
                NIPQUAD(rqstp->rq_addr.sin_addr.s_addr));
        svc_process(serv, rqstp);
    }
    flush_signals(current);
    svc_exit_thread(rqstp);
    nfs_callback_info.pid = 0;
    complete(&nfs_callback_info.stopped);
    unlock_kernel();
    module_put_and_exit(0);
}
Example #3
0
/*
 * This is the NFSv4 callback kernel thread.
 */
static int
nfs4_callback_svc(void *vrqstp)
{
	int err;
	struct svc_rqst *rqstp = vrqstp;

	set_freezable();

	while (!kthread_freezable_should_stop(NULL)) {

		if (signal_pending(current))
			flush_signals(current);
		/*
		 * Listen for a request on the socket
		 */
		err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		svc_process(rqstp);
	}
	svc_exit_thread(rqstp);
	module_put_and_exit(0);
	return 0;
}
Example #4
0
/*
 * This is the lockd kernel thread
 */
static void
lockd(struct svc_rqst *rqstp)
{
	int		err = 0;
	unsigned long grace_period_expire;

	/* Lock module and set up kernel thread */
	/* lockd_up is waiting for us to startup, so will
	 * be holding a reference to this module, so it
	 * is safe to just claim another reference
	 */
	__module_get(THIS_MODULE);
	lock_kernel();

	/*
	 * Let our maker know we're running.
	 */
	nlmsvc_pid = current->pid;
	nlmsvc_serv = rqstp->rq_server;
	complete(&lockd_start_done);

	daemonize("lockd");
	set_freezable();

	/* Process request with signals blocked, but allow SIGKILL.  */
	allow_signal(SIGKILL);

	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

	if (!nlm_timeout)
		nlm_timeout = LOCKD_DFLT_TIMEO;
	nlmsvc_timeout = nlm_timeout * HZ;

	grace_period_expire = set_grace_period();

	/*
	 * The main request loop. We don't terminate until the last
	 * NFS mount or NFS daemon has gone away, and we've been sent a
	 * signal, or else another process has taken over our job.
	 */
	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) {
		long timeout = MAX_SCHEDULE_TIMEOUT;
		char buf[RPC_MAX_ADDRBUFLEN];

		if (signalled()) {
			flush_signals(current);
			if (nlmsvc_ops) {
				nlmsvc_invalidate_all();
				grace_period_expire = set_grace_period();
			}
		}

		/*
		 * Retry any blocked locks that have been notified by
		 * the VFS. Don't do this during grace period.
		 * (Theoretically, there shouldn't even be blocked locks
		 * during grace period).
		 */
		if (!nlmsvc_grace_period) {
			timeout = nlmsvc_retry_blocked();
		} else if (time_before(grace_period_expire, jiffies))
			clear_grace_period();

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		err = svc_recv(rqstp, timeout);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		if (err < 0) {
			printk(KERN_WARNING
			       "lockd: terminating on error %d\n",
			       -err);
			break;
		}

		dprintk("lockd: request from %s\n",
				svc_print_addr(rqstp, buf, sizeof(buf)));

		svc_process(rqstp);
	}

	flush_signals(current);

	/*
	 * Check whether there's a new lockd process before
	 * shutting down the hosts and clearing the slot.
	 */
	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
		if (nlmsvc_ops)
			nlmsvc_invalidate_all();
		nlm_shutdown_hosts();
		nlmsvc_pid = 0;
		nlmsvc_serv = NULL;
	} else
		printk(KERN_DEBUG
			"lockd: new process, skipping host shutdown\n");
	wake_up(&lockd_exit);

	/* Exit the RPC thread */
	svc_exit_thread(rqstp);

	/* Release module */
	unlock_kernel();
	module_put_and_exit(0);
}
/*
 * This is the NFS server kernel thread
 */
static void
nfsd(struct svc_rqst *rqstp)
{
	struct svc_serv	*serv = rqstp->rq_server;
	struct fs_struct *fsp;
	int		err;
	struct nfsd_list me;
	sigset_t shutdown_mask, allowed_mask;

	/* Lock module and set up kernel thread */
	lock_kernel();
	daemonize("nfsd");
	current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;

	/* After daemonize() this kernel thread shares current->fs
	 * with the init process. We need to create files with a
	 * umask of 0 instead of init's umask. */
	fsp = copy_fs_struct(current->fs);
	if (!fsp) {
		printk("Unable to start nfsd thread: out of memory\n");
		goto out;
	}
	exit_fs(current);
	current->fs = fsp;
	current->fs->umask = 0;

	siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
	siginitsetinv(&allowed_mask, ALLOWED_SIGS);

	nfsdstats.th_cnt++;

	lockd_up();				/* start lockd */

	me.task = current;
	list_add(&me.list, &nfsd_list);

	unlock_kernel();

	/*
	 * We want less throttling in balance_dirty_pages() so that nfs to
	 * localhost doesn't cause nfsd to lock up due to all the client's
	 * dirty pages.
	 */
	current->flags |= PF_LESS_THROTTLE;

	/*
	 * The main request loop
	 */
	for (;;) {
		/* Block all but the shutdown signals */
		sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		while ((err = svc_recv(serv, rqstp,
				       60*60*HZ)) == -EAGAIN)
			;
		if (err < 0)
			break;
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_inc(&nfsd_busy);

		/* Lock the export hash tables for reading. */
		exp_readlock();

		/* Process request with signals blocked.  */
		sigprocmask(SIG_SETMASK, &allowed_mask, NULL);

		svc_process(serv, rqstp);

		/* Unlock export hash tables */
		exp_readunlock();
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_dec(&nfsd_busy);
	}

	if (err != -EINTR) {
		printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
	} else {
		unsigned int	signo;

		for (signo = 1; signo <= _NSIG; signo++)
			if (sigismember(&current->pending.signal, signo) &&
			    !sigismember(&current->blocked, signo))
				break;
		err = signo;
	}

	lock_kernel();

	/* Release lockd */
	lockd_down();

	/* Check if this is last thread */
	if (serv->sv_nrthreads==1) {
		
		printk(KERN_WARNING "nfsd: last server has exited\n");
		if (err != SIG_NOCLEAN) {
			printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
			nfsd_export_flush();
		}
		nfsd_serv = NULL;
	        nfsd_racache_shutdown();	/* release read-ahead cache */
		nfs4_state_shutdown();
	}
	list_del(&me.list);
	nfsdstats.th_cnt --;

out:
	/* Release the thread */
	svc_exit_thread(rqstp);

	/* Release module */
	module_put_and_exit(0);
}