示例#1
0
文件: tlx.c 项目: alhazred/onarm
/*
 * Create a tli/xti endpoint, either bound to the address specified in
 * 'instance' for non-RPC services, else a kernel chosen address.
 * Returns -1 on failure, else 0.
 */
int
create_bound_endpoint(const instance_t *inst, tlx_info_t *tlx_info)
{
	int			fd;
	int			qlen;
	const char		*fmri = inst->fmri;
	struct netbuf		*reqaddr;
	struct netbuf		*retaddr;
	struct netbuf		netbuf;
	struct sockaddr_storage	ss;
	rpc_info_t		*rpc = tlx_info->pr_info.ri;

	debug_msg("Entering create_bound_endpoint");

	if ((fd = t_open(tlx_info->dev_name, O_RDWR, NULL)) == -1) {
		error_msg(gettext("Failed to open transport %s for "
		    "instance %s, proto %s: %s"), tlx_info->dev_name,
		    fmri, tlx_info->pr_info.proto, t_strerror(t_errno));
		return (-1);
	}

	if (tlx_info->pr_info.v6only) {
		int	on = 1;

		/* restrict to IPv6 communications only */
		if (tlx_setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &on,
		    sizeof (on)) == -1) {
			(void) t_close(fd);
			return (-1);
		}
	}

	/*
	 * Negotiate for the returning of the remote uid for loopback
	 * transports for RPC services. This needs to be done before the
	 * endpoint is bound using t_bind(), so that any requests to it
	 * contain the uid.
	 */
	if ((rpc != NULL) && (rpc->is_loopback))
		svc_fd_negotiate_ucred(fd);

	/*
	 * Bind the service's address to the endpoint and setup connection
	 * backlog. In the case of RPC services, we specify a NULL requested
	 * address and accept what we're given, storing the returned address
	 * for later RPC binding. In the case of non-RPC services we specify
	 * the service's associated address.
	 */
	if (rpc != NULL) {
		reqaddr = NULL;
		retaddr =  &(rpc->netbuf);
	} else {
		reqaddr = &(tlx_info->local_addr);
		netbuf.buf = (char *)&ss;
		netbuf.maxlen = sizeof (ss);
		retaddr = &netbuf;
	}

	/* ignored for conn/less services */
	qlen = inst->config->basic->conn_backlog;

	if ((tlx_bind(fd, reqaddr, retaddr, qlen) == -1) ||
	    ((reqaddr != NULL) && !netbufs_equal(reqaddr, retaddr))) {
		error_msg(gettext("Failed to bind to the requested address "
		    "for instance %s, proto %s"), fmri,
		    tlx_info->pr_info.proto);
		(void) t_close(fd);
		return (-1);
	}

	return (fd);
}
示例#2
0
文件: fmd_rpc.c 项目: andreiw/polaris
/*
 * We use our own private version of svc_create() which registers our services
 * only on loopback transports and enables an option whereby Solaris ucreds
 * are associated with each connection, permitting us to check privilege bits.
 */
static int
fmd_rpc_svc_create_local(void (*disp)(struct svc_req *, SVCXPRT *),
    rpcprog_t prog, rpcvers_t vers, uint_t ssz, uint_t rsz, int force)
{
	struct netconfig *ncp;
	struct netbuf buf;
	SVCXPRT *xprt;
	void *hdl;
	int fd, n = 0;

	char door[PATH_MAX];
	time_t tm;

	if ((hdl = setnetconfig()) == NULL) {
		fmd_error(EFMD_RPC_REG, "failed to iterate over "
		    "netconfig database: %s\n", nc_sperror());
		return (fmd_set_errno(EFMD_RPC_REG));
	}

	if (force)
		svc_unreg(prog, vers); /* clear stale rpcbind registrations */

	buf.buf = alloca(_SS_MAXSIZE);
	buf.maxlen = _SS_MAXSIZE;
	buf.len = 0;

	while ((ncp = getnetconfig(hdl)) != NULL) {
		if (strcmp(ncp->nc_protofmly, NC_LOOPBACK) != 0)
			continue;

		if (!force && rpcb_getaddr(prog, vers, ncp, &buf, HOST_SELF)) {
			(void) endnetconfig(hdl);
			return (fmd_set_errno(EFMD_RPC_BOUND));
		}

		if ((fd = t_open(ncp->nc_device, O_RDWR, NULL)) == -1) {
			fmd_error(EFMD_RPC_REG, "failed to open %s: %s\n",
			    ncp->nc_device, t_strerror(t_errno));
			continue;
		}

		svc_fd_negotiate_ucred(fd); /* enable ucred option on xprt */

		if ((xprt = svc_tli_create(fd, ncp, NULL, ssz, rsz)) == NULL) {
			(void) t_close(fd);
			continue;
		}

		if (svc_reg(xprt, prog, vers, disp, ncp) == FALSE) {
			fmd_error(EFMD_RPC_REG, "failed to register "
			    "rpc service on %s\n", ncp->nc_netid);
			svc_destroy(xprt);
			continue;
		}

		n++;
	}

	(void) endnetconfig(hdl);

	/*
	 * If we failed to register services (n == 0) because rpcbind is down,
	 * then check to see if the RPC door file exists before attempting an
	 * svc_door_create(), which cleverly destroys any existing door file.
	 * The RPC APIs have no stable errnos, so we use rpcb_gettime() as a
	 * hack to determine if rpcbind itself is down.
	 */
	if (!force && n == 0 && rpcb_gettime(HOST_SELF, &tm) == FALSE &&
	    snprintf(door, sizeof (door), RPC_DOOR_RENDEZVOUS,
	    prog, vers) > 0 && access(door, F_OK) == 0)
		return (fmd_set_errno(EFMD_RPC_BOUND));

	/*
	 * Attempt to create a door server for the RPC program as well.  Limit
	 * the maximum request size for the door transport to the receive size.
	 */
	if ((xprt = svc_door_create(disp, prog, vers, ssz)) == NULL) {
		fmd_error(EFMD_RPC_REG, "failed to create door for "
		    "rpc service 0x%lx/0x%lx\n", prog, vers);
	} else {
		(void) svc_control(xprt, SVCSET_CONNMAXREC, &rsz);
		n++;
	}

	return (n);
}