示例#1
0
文件: svc.c 项目: xiandaicxsj/copyKvm
/*
 * This is the lockd kernel thread
 */
static void
lockd(struct svc_rqst *rqstp)
{
	int		err = 0;
	unsigned long grace_period_expire;

	/* Lock module and set up kernel thread */
	/* lockd_up is waiting for us to startup, so will
	 * be holding a reference to this module, so it
	 * is safe to just claim another reference
	 */
	__module_get(THIS_MODULE);
	lock_kernel();

	/*
	 * Let our maker know we're running.
	 */
	nlmsvc_pid = current->pid;
	nlmsvc_serv = rqstp->rq_server;
	complete(&lockd_start_done);

	daemonize("lockd");

	/* Process request with signals blocked, but allow SIGKILL.  */
	allow_signal(SIGKILL);

	/* kick rpciod */
	rpciod_up();

	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

	if (!nlm_timeout)
		nlm_timeout = LOCKD_DFLT_TIMEO;
	nlmsvc_timeout = nlm_timeout * HZ;

	grace_period_expire = set_grace_period();

	/*
	 * The main request loop. We don't terminate until the last
	 * NFS mount or NFS daemon has gone away, and we've been sent a
	 * signal, or else another process has taken over our job.
	 */
	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) {
		long timeout = MAX_SCHEDULE_TIMEOUT;

		if (signalled()) {
			flush_signals(current);
			if (nlmsvc_ops) {
				nlmsvc_invalidate_all();
				grace_period_expire = set_grace_period();
			}
		}

		/*
		 * Retry any blocked locks that have been notified by
		 * the VFS. Don't do this during grace period.
		 * (Theoretically, there shouldn't even be blocked locks
		 * during grace period).
		 */
		if (!nlmsvc_grace_period) {
			timeout = nlmsvc_retry_blocked();
		} else if (time_before(grace_period_expire, jiffies))
			clear_grace_period();

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		err = svc_recv(rqstp, timeout);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		if (err < 0) {
			printk(KERN_WARNING
			       "lockd: terminating on error %d\n",
			       -err);
			break;
		}

		dprintk("lockd: request from %08x\n",
			(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));

		svc_process(rqstp);

	}

	flush_signals(current);

	/*
	 * Check whether there's a new lockd process before
	 * shutting down the hosts and clearing the slot.
	 */
	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
		if (nlmsvc_ops)
			nlmsvc_invalidate_all();
		nlm_shutdown_hosts();
		nlmsvc_pid = 0;
		nlmsvc_serv = NULL;
	} else
		printk(KERN_DEBUG
			"lockd: new process, skipping host shutdown\n");
	wake_up(&lockd_exit);

	/* Exit the RPC thread */
	svc_exit_thread(rqstp);

	/* release rpciod */
	rpciod_down();

	/* Release module */
	unlock_kernel();
	module_put_and_exit(0);
}
示例#2
0
/*
 * The way this works is that the mount process passes a structure
 * in the data argument which contains the server's IP address
 * and the root file handle obtained from the server's mount
 * daemon. We stash these away in the private superblock fields.
 */
struct super_block *
nfs_read_super(struct super_block *sb, void *raw_data, int silent)
{
    struct nfs_mount_data	*data = (struct nfs_mount_data *) raw_data;
    struct nfs_server	*server;
    struct rpc_xprt		*xprt;
    struct rpc_clnt		*clnt;
    struct nfs_fh		*root_fh;
    struct inode		*root_inode;
    unsigned int		authflavor;
    int			tcp;
    struct sockaddr_in	srvaddr;
    struct rpc_timeout	timeparms;
    struct nfs_fattr	fattr;

    MOD_INC_USE_COUNT;
    if (!data)
        goto out_miss_args;

    /* No NFS V3. */
    if (data->flags & NFS_MOUNT_VER3)
        goto out_fail;

    /* Don't complain if "mount" is newer. */
    if (data->version < NFS_MOUNT_VERSION) {
        printk("nfs warning: mount version %s than kernel\n",
               data->version < NFS_MOUNT_VERSION ? "older" : "newer");
        if (data->version < 2)
            data->namlen = 0;
        if (data->version < 3)
            data->bsize  = 0;
    }

    /* We now require that the mount process passes the remote address */
    memcpy(&srvaddr, &data->addr, sizeof(srvaddr));
    if (srvaddr.sin_addr.s_addr == INADDR_ANY)
        goto out_no_remote;

    lock_super(sb);

    sb->s_flags |= MS_ODD_RENAME; /* This should go away */

    sb->s_magic      = NFS_SUPER_MAGIC;
    sb->s_op         = &nfs_sops;
    sb->s_blocksize  = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
    sb->u.nfs_sb.s_root = data->root;
    server           = &sb->u.nfs_sb.s_server;
    server->rsize    = nfs_block_size(data->rsize, NULL);
    server->wsize    = nfs_block_size(data->wsize, NULL);
    server->flags    = data->flags;

    if (data->flags & NFS_MOUNT_NOAC) {
        data->acregmin = data->acregmax = 0;
        data->acdirmin = data->acdirmax = 0;
    }
    server->acregmin = data->acregmin*HZ;
    server->acregmax = data->acregmax*HZ;
    server->acdirmin = data->acdirmin*HZ;
    server->acdirmax = data->acdirmax*HZ;

    server->hostname = kmalloc(strlen(data->hostname) + 1, GFP_KERNEL);
    if (!server->hostname)
        goto out_unlock;
    strcpy(server->hostname, data->hostname);

    /* Which protocol do we use? */
    tcp   = (data->flags & NFS_MOUNT_TCP);

    /* Initialize timeout values */
    timeparms.to_initval = data->timeo * HZ / 10;
    timeparms.to_retries = data->retrans;
    timeparms.to_maxval  = tcp? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT;
    timeparms.to_exponential = 1;

    /* Now create transport and client */
    xprt = xprt_create_proto(tcp? IPPROTO_TCP : IPPROTO_UDP,
                             &srvaddr, &timeparms);
    if (xprt == NULL)
        goto out_no_xprt;

    /* Choose authentication flavor */
    authflavor = RPC_AUTH_UNIX;
    if (data->flags & NFS_MOUNT_SECURE)
        authflavor = RPC_AUTH_DES;
    else if (data->flags & NFS_MOUNT_KERBEROS)
        authflavor = RPC_AUTH_KRB;

    clnt = rpc_create_client(xprt, server->hostname, &nfs_program,
                             NFS_VERSION, authflavor);
    if (clnt == NULL)
        goto out_no_client;

    clnt->cl_intr     = (data->flags & NFS_MOUNT_INTR)? 1 : 0;
    clnt->cl_softrtry = (data->flags & NFS_MOUNT_SOFT)? 1 : 0;
    clnt->cl_chatty   = 1;
    server->client    = clnt;

    /* Fire up rpciod if not yet running */
    if (rpciod_up() != 0)
        goto out_no_iod;

    /*
     * Keep the super block locked while we try to get
     * the root fh attributes.
     */
    root_fh = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
    if (!root_fh)
        goto out_no_fh;
    *root_fh = data->root;

    if (nfs_proc_getattr(server, root_fh, &fattr) != 0)
        goto out_no_fattr;

    clnt->cl_to_err = 1;

    root_inode = __nfs_fhget(sb, &fattr);
    if (!root_inode)
        goto out_no_root;
    sb->s_root = d_alloc_root(root_inode, NULL);
    if (!sb->s_root)
        goto out_no_root;
    sb->s_root->d_op = &nfs_dentry_operations;
    sb->s_root->d_fsdata = root_fh;

    /* We're airborne */
    unlock_super(sb);

    /* Check whether to start the lockd process */
    if (!(server->flags & NFS_MOUNT_NONLM))
        lockd_up();
    return sb;

    /* Yargs. It didn't work out. */
out_no_root:
    printk("nfs_read_super: get root inode failed\n");
    iput(root_inode);
    goto out_free_fh;

out_no_fattr:
    printk("nfs_read_super: get root fattr failed\n");
out_free_fh:
    kfree(root_fh);
out_no_fh:
    rpciod_down();
    goto out_shutdown;

out_no_iod:
    printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
out_shutdown:
    rpc_shutdown_client(server->client);
    goto out_free_host;

out_no_client:
    printk(KERN_WARNING "NFS: cannot create RPC client.\n");
    xprt_destroy(xprt);
    goto out_free_host;

out_no_xprt:
    printk(KERN_WARNING "NFS: cannot create RPC transport.\n");

out_free_host:
    kfree(server->hostname);
out_unlock:
    unlock_super(sb);
    goto out_fail;

out_no_remote:
    printk("NFS: mount program didn't pass remote address!\n");
    goto out_fail;

out_miss_args:
    printk("nfs_read_super: missing data argument\n");

out_fail:
    sb->s_dev = 0;
    MOD_DEC_USE_COUNT;
    return NULL;
}
示例#3
0
/*
 * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
 */
void
nfsd4_probe_callback(struct nfs4_client *clp)
{
	struct sockaddr_in	addr, saddr;
	struct nfs4_callback    *cb = &clp->cl_callback;
	struct rpc_timeout	timeparms;
	struct rpc_xprt *	xprt;
	struct rpc_program *	program = &cb->cb_program;
	struct rpc_stat *	stat = &cb->cb_stat;
	struct rpc_clnt *	clnt;
	struct rpc_message msg = {
		.rpc_proc       = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
		.rpc_argp       = clp,
	};
	char                    hostname[32];
	int status;

	if (atomic_read(&cb->cb_set))
		return;

	/* Initialize address */
	memset(&addr, 0, sizeof(addr));
	addr.sin_family = AF_INET;
	addr.sin_port = htons(cb->cb_port);
	addr.sin_addr.s_addr = htonl(cb->cb_addr);

	/* Initialize timeout */
	timeparms.to_initval = (NFSD_LEASE_TIME/4) * HZ;
	timeparms.to_retries = 0;
	timeparms.to_maxval = (NFSD_LEASE_TIME/2) * HZ;
	timeparms.to_exponential = 1;

	/* Create RPC transport */
	xprt = xprt_create_proto(IPPROTO_TCP, &addr, &timeparms);
	if (IS_ERR(xprt)) {
		dprintk("NFSD: couldn't create callback transport!\n");
		goto out_err;
	}

	/* Initialize rpc_program */
	program->name = "nfs4_cb";
	program->number = cb->cb_prog;
	program->nrvers = ARRAY_SIZE(nfs_cb_version);
	program->version = nfs_cb_version;
	program->stats = stat;

	/* Initialize rpc_stat */
	memset(stat, 0, sizeof(struct rpc_stat));
	stat->program = program;

	/* Create RPC client
 	 *
	 * XXX AUTH_UNIX only - need AUTH_GSS....
	 */
	sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(addr.sin_addr.s_addr));
	clnt = rpc_new_client(xprt, hostname, program, 1, RPC_AUTH_UNIX);
	if (IS_ERR(clnt)) {
		dprintk("NFSD: couldn't create callback client\n");
		goto out_err;
	}
	clnt->cl_intr = 0;
	clnt->cl_softrtry = 1;

	/* Set source address */
	if (cb->cb_saddr){
		memset(&saddr, 0, sizeof(saddr));
		saddr.sin_family = AF_INET;
		saddr.sin_addr.s_addr = cb->cb_saddr;
		xprt->srcaddr = saddr;
	}

	/* Kick rpciod, put the call on the wire. */

	if (rpciod_up() != 0) {
		dprintk("nfsd: couldn't start rpciod for callbacks!\n");
		goto out_clnt;
	}

	cb->cb_client = clnt;

	/* the task holds a reference to the nfs4_client struct */
	atomic_inc(&clp->cl_count);

	msg.rpc_cred = nfsd4_lookupcred(clp,0);
	if (IS_ERR(msg.rpc_cred))
		goto out_rpciod;
	status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL);
	put_rpccred(msg.rpc_cred);

	if (status != 0) {
		dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n");
		goto out_rpciod;
	}
	return;

out_rpciod:
	atomic_dec(&clp->cl_count);
	rpciod_down();
	cb->cb_client = NULL;
out_clnt:
	rpc_shutdown_client(clnt);
out_err:
	dprintk("NFSD: warning: no callback path to client %.*s\n",
		(int)clp->cl_name.len, clp->cl_name.data);
}
示例#4
0
/*
 * This is the lockd kernel thread
 */
static void
lockd(struct svc_rqst *rqstp)
{
	struct svc_serv	*serv = rqstp->rq_server;
	int		err = 0;
	unsigned long grace_period_expire;

	/* Lock module and set up kernel thread */
	MOD_INC_USE_COUNT;
	lock_kernel();

	/*
	 * Let our maker know we're running.
	 */
	nlmsvc_pid = current->pid;
	up(&lockd_start);

	daemonize();
	reparent_to_init();
	sprintf(current->comm, "lockd");

	/* Process request with signals blocked.  */
	spin_lock_irq(&current->sighand->siglock);
	siginitsetinv(&current->blocked, sigmask(SIGKILL));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/* kick rpciod */
	rpciod_up();

	dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");

	if (!nlm_timeout)
		nlm_timeout = LOCKD_DFLT_TIMEO;
	nlmsvc_timeout = nlm_timeout * HZ;

	grace_period_expire = set_grace_period();

	/*
	 * The main request loop. We don't terminate until the last
	 * NFS mount or NFS daemon has gone away, and we've been sent a
	 * signal, or else another process has taken over our job.
	 */
	while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid)
	{
		long timeout = MAX_SCHEDULE_TIMEOUT;
		if (signalled()) {
			spin_lock_irq(&current->sighand->siglock);
			flush_signals(current);
			spin_unlock_irq(&current->sighand->siglock);
			if (nlmsvc_ops) {
				nlmsvc_ops->detach();
				grace_period_expire = set_grace_period();
			}
		}

		/*
		 * Retry any blocked locks that have been notified by
		 * the VFS. Don't do this during grace period.
		 * (Theoretically, there shouldn't even be blocked locks
		 * during grace period).
		 */
		if (!nlmsvc_grace_period)
			timeout = nlmsvc_retry_blocked();

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		err = svc_recv(serv, rqstp, timeout);
		if (err == -EAGAIN || err == -EINTR)
			continue;
		if (err < 0) {
			printk(KERN_WARNING
			       "lockd: terminating on error %d\n",
			       -err);
			break;
		}

		dprintk("lockd: request from %08x\n",
			(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));

		/*
		 * Look up the NFS client handle. The handle is needed for
		 * all but the GRANTED callback RPCs.
		 */
		rqstp->rq_client = NULL;
		if (nlmsvc_ops) {
			nlmsvc_ops->exp_readlock();
			rqstp->rq_client =
				nlmsvc_ops->exp_getclient(&rqstp->rq_addr);
		}

		if (nlmsvc_grace_period &&
		    time_before(grace_period_expire, jiffies))
			nlmsvc_grace_period = 0;
		svc_process(serv, rqstp);

		/* Unlock export hash tables */
		if (nlmsvc_ops)
			nlmsvc_ops->exp_unlock();
	}

	/*
	 * Check whether there's a new lockd process before
	 * shutting down the hosts and clearing the slot.
	 */
	if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
		if (nlmsvc_ops)
			nlmsvc_ops->detach();
		nlm_shutdown_hosts();
		nlmsvc_pid = 0;
	} else
		printk(KERN_DEBUG
			"lockd: new process, skipping host shutdown\n");
	wake_up(&lockd_exit);
		
	/* Exit the RPC thread */
	svc_exit_thread(rqstp);

	/* release rpciod */
	rpciod_down();

	/* Release module */
	MOD_DEC_USE_COUNT;
}