Exemplo n.º 1
0
static int
ng_ksocket_connect(hook_p hook)
{
	node_p node = NG_HOOK_NODE(hook);
	const priv_p priv = NG_NODE_PRIVATE(node);
	struct socket *const so = priv->so;

	/* Add our hook for incoming data and other events */
	SOCKBUF_LOCK(&priv->so->so_rcv);
	soupcall_set(priv->so, SO_RCV, ng_ksocket_incoming, node);
	SOCKBUF_UNLOCK(&priv->so->so_rcv);
	SOCKBUF_LOCK(&priv->so->so_snd);
	soupcall_set(priv->so, SO_SND, ng_ksocket_incoming, node);
	SOCKBUF_UNLOCK(&priv->so->so_snd);
	SOCK_LOCK(priv->so);
	priv->so->so_state |= SS_NBIO;
	SOCK_UNLOCK(priv->so);
	/*
	 * --Original comment--
	 * On a cloned socket we may have already received one or more
	 * upcalls which we couldn't handle without a hook.  Handle
	 * those now.
	 * We cannot call the upcall function directly
	 * from here, because until this function has returned our
	 * hook isn't connected.
	 *
	 * ---meta comment for -current ---
	 * XXX This is dubius.
	 * Upcalls between the time that the hook was
	 * first created and now (on another processesor) will
	 * be earlier on the queue than the request to finalise the hook.
	 * By the time the hook is finalised,
	 * The queued upcalls will have happenned and the code
	 * will have discarded them because of a lack of a hook.
	 * (socket not open).
	 *
	 * This is a bad byproduct of the complicated way in which hooks
	 * are now created (3 daisy chained async events).
	 *
	 * Since we are a netgraph operation 
	 * We know that we hold a lock on this node. This forces the
	 * request we make below to be queued rather than implemented
	 * immediatly which will cause the upcall function to be called a bit
	 * later.
	 * However, as we will run any waiting queued operations immediatly
	 * after doing this one, if we have not finalised the other end
	 * of the hook, those queued operations will fail.
	 */
	if (priv->flags & KSF_CLONED) {
		ng_send_fn(node, NULL, &ng_ksocket_incoming2, so, M_NOWAIT);
	}

	return (0);
}
Exemplo n.º 2
0
/*
 * Usage:
 *	xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
 *
 * Creates, registers, and returns a (rpc) tcp based transporter.
 * Once *xprt is initialized, it is registered as a transporter
 * see (svc.h, xprt_register).  This routine returns
 * a NULL if a problem occurred.
 *
 * The filedescriptor passed in is expected to refer to a bound, but
 * not yet connected socket.
 *
 * Since streams do buffered io similar to stdio, the caller can specify
 * how big the send and receive buffers are via the second and third parms;
 * 0 => use the system default.
 */
SVCXPRT *
svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
    size_t recvsize)
{
	SVCXPRT *xprt = NULL;
	struct sockaddr* sa;
	int error;

	SOCK_LOCK(so);
	if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
		SOCK_UNLOCK(so);
		CURVNET_SET(so->so_vnet);
		error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
		CURVNET_RESTORE();
		if (error)
			return (NULL);
		xprt = svc_vc_create_conn(pool, so, sa);
		free(sa, M_SONAME);
		return (xprt);
	}
	SOCK_UNLOCK(so);

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = so;
	xprt->xp_p1 = NULL;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_vc_rendezvous_ops;

	CURVNET_SET(so->so_vnet);
	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
	CURVNET_RESTORE();
	if (error) {
		goto cleanup_svc_vc_create;
	}

	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
	free(sa, M_SONAME);

	xprt_register(xprt);

	solisten(so, -1, curthread);

	SOCKBUF_LOCK(&so->so_rcv);
	xprt->xp_upcallset = 1;
	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
	SOCKBUF_UNLOCK(&so->so_rcv);

	return (xprt);
cleanup_svc_vc_create:
	if (xprt) {
		sx_destroy(&xprt->xp_lock);
		svc_xprt_free(xprt);
	}
	return (NULL);
}
Exemplo n.º 3
0
SVCXPRT *
svc_dg_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
    size_t recvsize)
{
	SVCXPRT *xprt;
	struct __rpc_sockinfo si;
	struct sockaddr* sa;
	int error;

	if (!__rpc_socket2sockinfo(so, &si)) {
		printf(svc_dg_str, svc_dg_err1);
		return (NULL);
	}
	/*
	 * Find the receive and the send size
	 */
	sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
	recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
	if ((sendsize == 0) || (recvsize == 0)) {
		printf(svc_dg_str, svc_dg_err2);
		return (NULL);
	}

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = so;
	xprt->xp_p1 = NULL;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_dg_ops;

	CURVNET_SET(so->so_vnet);
	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
	CURVNET_RESTORE();
	if (error)
		goto freedata;

	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
	free(sa, M_SONAME);

	xprt_register(xprt);

	SOCKBUF_LOCK(&so->so_rcv);
	soupcall_set(so, SO_RCV, svc_dg_soupcall, xprt);
	SOCKBUF_UNLOCK(&so->so_rcv);

	return (xprt);
freedata:
	(void) printf(svc_dg_str, __no_mem_str);
	if (xprt) {
		svc_xprt_free(xprt);
	}
	return (NULL);
}
Exemplo n.º 4
0
/*
 * XXX Need to implement reconnecting as necessary.  If that were to be
 *     needed, most likely all current vnodes would have to be renegotiated
 *     or otherwise invalidated (a la NFS "stale file handle").
 */
static int
p9fs_connect(struct mount *mp)
{
	struct p9fsmount *p9mp = VFSTOP9(mp);
	struct p9fs_session *p9s = &p9mp->p9_session;
	struct socket *so;
	int error;

	error = socreate(p9s->p9s_sockaddr.sa_family, &p9s->p9s_sock,
	    p9s->p9s_socktype, p9s->p9s_proto, curthread->td_ucred, curthread);
	if (error != 0) {
		vfs_mount_error(mp, "socreate");
		goto out;
	}

	so = p9s->p9s_sock;
	error = soconnect(so, &p9s->p9s_sockaddr, curthread);
	SOCK_LOCK(so);
	while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
		error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
		    "connec", 0);
		if (error)
			break;
	}
	if (error == 0) {
		error = so->so_error;
		so->so_error = 0;
	}
	SOCK_UNLOCK(so);
	if (error) {
		vfs_mount_error(mp, "soconnect");
		if (error == EINTR)
			so->so_state &= ~SS_ISCONNECTING;
		goto out;
	}

	if (so->so_proto->pr_flags & PR_CONNREQUIRED)
		p9fs_setsockopt(so, SO_KEEPALIVE);
	if (so->so_proto->pr_protocol == IPPROTO_TCP)
		p9fs_setsockopt(so, TCP_NODELAY);

	SOCKBUF_LOCK(&so->so_rcv);
	soupcall_set(so, SO_RCV, p9fs_client_upcall, p9mp);
	SOCKBUF_UNLOCK(&so->so_rcv);

	error = 0;

out:
	return (error);
}
Exemplo n.º 5
0
/*
 * Create a new transport for a socket optained via soaccept().
 */
SVCXPRT *
svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
{
	SVCXPRT *xprt = NULL;
	struct cf_conn *cd = NULL;
	struct sockaddr* sa = NULL;
	struct sockopt opt;
	int one = 1;
	int error;

	bzero(&opt, sizeof(struct sockopt));
	opt.sopt_dir = SOPT_SET;
	opt.sopt_level = SOL_SOCKET;
	opt.sopt_name = SO_KEEPALIVE;
	opt.sopt_val = &one;
	opt.sopt_valsize = sizeof(one);
	CURVNET_SET(so->so_vnet);
	error = sosetopt(so, &opt);
	if (error) {
		CURVNET_RESTORE();
		return (NULL);
	}

	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
		bzero(&opt, sizeof(struct sockopt));
		opt.sopt_dir = SOPT_SET;
		opt.sopt_level = IPPROTO_TCP;
		opt.sopt_name = TCP_NODELAY;
		opt.sopt_val = &one;
		opt.sopt_valsize = sizeof(one);
		error = sosetopt(so, &opt);
		if (error) {
			CURVNET_RESTORE();
			return (NULL);
		}
	}
	CURVNET_RESTORE();

	cd = mem_alloc(sizeof(*cd));
	cd->strm_stat = XPRT_IDLE;

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = so;
	xprt->xp_p1 = cd;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_vc_ops;

	/*
	 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
	 * has a 5 minute timer, server has a 6 minute timer.
	 */
	xprt->xp_idletimeout = 6 * 60;

	memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);

	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
	if (error)
		goto cleanup_svc_vc_create;

	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
	free(sa, M_SONAME);

	xprt_register(xprt);

	SOCKBUF_LOCK(&so->so_rcv);
	xprt->xp_upcallset = 1;
	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
	SOCKBUF_UNLOCK(&so->so_rcv);

	/*
	 * Throw the transport into the active list in case it already
	 * has some data buffered.
	 */
	sx_xlock(&xprt->xp_lock);
	xprt_active(xprt);
	sx_xunlock(&xprt->xp_lock);

	return (xprt);
cleanup_svc_vc_create:
	if (xprt) {
		mem_free(xprt, sizeof(*xprt));
	}
	if (cd)
		mem_free(cd, sizeof(*cd));
	return (NULL);
}
Exemplo n.º 6
0
/*
 * Handle the first completed incoming connection, assumed to be already
 * on the socket's so_comp queue.
 */
static void
ng_ksocket_finish_accept(priv_p priv)
{
	struct socket *const head = priv->so;
	struct socket *so;
	struct sockaddr *sa = NULL;
	struct ng_mesg *resp;
	struct ng_ksocket_accept *resp_data;
	node_p node;
	priv_p priv2;
	int len;
	int error;

	ACCEPT_LOCK();
	so = TAILQ_FIRST(&head->so_comp);
	if (so == NULL) {	/* Should never happen */
		ACCEPT_UNLOCK();
		return;
	}
	TAILQ_REMOVE(&head->so_comp, so, so_list);
	head->so_qlen--;
	so->so_qstate &= ~SQ_COMP;
	so->so_head = NULL;
	SOCK_LOCK(so);
	soref(so);
	so->so_state |= SS_NBIO;
	SOCK_UNLOCK(so);
	ACCEPT_UNLOCK();

	/* XXX KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0); */

	soaccept(so, &sa);

	len = OFFSETOF(struct ng_ksocket_accept, addr);
	if (sa != NULL)
		len += sa->sa_len;

	NG_MKMESSAGE(resp, NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, len,
	    M_NOWAIT);
	if (resp == NULL) {
		soclose(so);
		goto out;
	}
	resp->header.flags |= NGF_RESP;
	resp->header.token = priv->response_token;

	/* Clone a ksocket node to wrap the new socket */
        error = ng_make_node_common(&ng_ksocket_typestruct, &node);
        if (error) {
		free(resp, M_NETGRAPH);
		soclose(so);
		goto out;
	}

	if (ng_ksocket_constructor(node) != 0) {
		NG_NODE_UNREF(node);
		free(resp, M_NETGRAPH);
		soclose(so);
		goto out;
	}

	priv2 = NG_NODE_PRIVATE(node);
	priv2->so = so;
	priv2->flags |= KSF_CLONED | KSF_EMBRYONIC;

	/*
	 * Insert the cloned node into a list of embryonic children
	 * on the parent node.  When a hook is created on the cloned
	 * node it will be removed from this list.  When the parent
	 * is destroyed it will destroy any embryonic children it has.
	 */
	LIST_INSERT_HEAD(&priv->embryos, priv2, siblings);

	SOCKBUF_LOCK(&so->so_rcv);
	soupcall_set(so, SO_RCV, ng_ksocket_incoming, node);
	SOCKBUF_UNLOCK(&so->so_rcv);
	SOCKBUF_LOCK(&so->so_snd);
	soupcall_set(so, SO_SND, ng_ksocket_incoming, node);
	SOCKBUF_UNLOCK(&so->so_snd);

	/* Fill in the response data and send it or return it to the caller */
	resp_data = (struct ng_ksocket_accept *)resp->data;
	resp_data->nodeid = NG_NODE_ID(node);
	if (sa != NULL)
		bcopy(sa, &resp_data->addr, sa->sa_len);
	NG_SEND_MSG_ID(error, node, resp, priv->response_addr, 0);

out:
	if (sa != NULL)
		free(sa, M_SONAME);
}