Esempio n. 1
0
LWS_VISIBLE int
lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
{
	struct lws_context_per_thread *pt = &context->pt[tsi];
	lws_sockfd_type our_fd = 0, tmp_fd;
	struct lws_tokens eff_buf;
	unsigned int pending = 0;
	struct lws *wsi, *wsi1;
	char draining_flow = 0;
	int timed_out = 0;
	time_t now;
	int n, m;
	int more;

	/*
	 * you can call us with pollfd = NULL to just allow the once-per-second
	 * global timeout checks; if less than a second since the last check
	 * it returns immediately then.
	 */

	time(&now);

	/* TODO: if using libev, we should probably use timeout watchers... */
	if (context->last_timeout_check_s != now) {
		context->last_timeout_check_s = now;

		lws_plat_service_periodic(context);

		/* global timeout check once per second */

		if (pollfd)
			our_fd = pollfd->fd;

		wsi = context->pt[tsi].timeout_list;
		while (wsi) {
			/* we have to take copies, because he may be deleted */
			wsi1 = wsi->timeout_list;
			tmp_fd = wsi->sock;
			if (lws_service_timeout_check(wsi, (unsigned int)now)) {
				/* he did time out... */
				if (tmp_fd == our_fd)
					/* it was the guy we came to service! */
					timed_out = 1;
					/* he's gone, no need to mark as handled */
			}
			wsi = wsi1;
		}
#if 0
		{
			char s[300], *p = s;

			for (n = 0; n < context->count_threads; n++)
				p += sprintf(p, " %7lu (%5d), ",
					     context->pt[n].count_conns,
					     context->pt[n].fds_count);

			lwsl_notice("load: %s\n", s);
		}
#endif
	}

	/* the socket we came to service timed out, nothing to do */
	if (timed_out)
		return 0;

	/* just here for timeout management? */
	if (!pollfd)
		return 0;

	/* no, here to service a socket descriptor */
	wsi = wsi_from_fd(context, pollfd->fd);
	if (!wsi)
		/* not lws connection ... leave revents alone and return */
		return 0;

	/*
	 * so that caller can tell we handled, past here we need to
	 * zero down pollfd->revents after handling
	 */

#if LWS_POSIX

	/* handle session socket closed */

	if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
	    (pollfd->revents & LWS_POLLHUP)) {
		wsi->socket_is_permanently_unusable = 1;
		lwsl_debug("Session Socket %p (fd=%d) dead\n",
						       (void *)wsi, pollfd->fd);

		goto close_and_handled;
	}

#ifdef _WIN32
	if (pollfd->revents & LWS_POLLOUT)
		wsi->sock_send_blocking = FALSE;
#endif

#endif

	/* okay, what we came here to do... */

	switch (wsi->mode) {
	case LWSCM_HTTP_SERVING:
	case LWSCM_HTTP_SERVING_ACCEPTED:
	case LWSCM_SERVER_LISTENER:
	case LWSCM_SSL_ACK_PENDING:
		n = lws_server_socket_service(context, wsi, pollfd);
		if (n) /* closed by above */
			return 1;
		pending = lws_ssl_pending(wsi);
		if (pending)
			goto handle_pending;
		goto handled;

	case LWSCM_WS_SERVING:
	case LWSCM_WS_CLIENT:
	case LWSCM_HTTP2_SERVING:

		/* 1: something requested a callback when it was OK to write */

		if ((pollfd->revents & LWS_POLLOUT) &&
		    (wsi->state == LWSS_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
		     wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		     wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
		    lws_handle_POLLOUT_event(wsi, pollfd)) {
			if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
				wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
			lwsl_info("lws_service_fd: closing\n");
			goto close_and_handled;
		}
#if 1
		if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		    wsi->state == LWSS_AWAITING_CLOSE_ACK) {
			/*
			 * we stopped caring about anything except control
			 * packets.  Force flow control off, defeat tx
			 * draining.
			 */
			lws_rx_flow_control(wsi, 1);
			wsi->u.ws.tx_draining_ext = 0;
		}
#endif
		if (wsi->u.ws.tx_draining_ext) {
			/* we cannot deal with new RX until the TX ext
			 * path has been drained.  It's because new
			 * rx will, eg, crap on the wsi rx buf that
			 * may be needed to retain state.
			 *
			 * TX ext drain path MUST go through event loop
			 * to avoid blocking.
			 */
			break;
		}

		if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
			/* We cannot deal with any kind of new RX
			 * because we are RX-flowcontrolled.
			 */
			break;

		/* 2: RX Extension needs to be drained
		 */

		if (wsi->state == LWSS_ESTABLISHED &&
		    wsi->u.ws.rx_draining_ext) {

			lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
#ifndef LWS_NO_CLIENT
			if (wsi->mode == LWSCM_WS_CLIENT) {
				n = lws_client_rx_sm(wsi, 0);
				if (n < 0)
					/* we closed wsi */
					n = 0;
			} else
#endif
				n = lws_rx_sm(wsi, 0);

			goto handled;
		}

		if (wsi->u.ws.rx_draining_ext)
			/*
			 * We have RX EXT content to drain, but can't do it
			 * right now.  That means we cannot do anything lower
			 * priority either.
			 */
			break;

		/* 3: RX Flowcontrol buffer needs to be drained
		 */

		if (wsi->rxflow_buffer) {
			lwsl_info("draining rxflow (len %d)\n",
				wsi->rxflow_len - wsi->rxflow_pos
			);
			/* well, drain it */
			eff_buf.token = (char *)wsi->rxflow_buffer +
						wsi->rxflow_pos;
			eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
			draining_flow = 1;
			goto drain;
		}

		/* 4: any incoming data ready?
		 * notice if rx flow going off raced poll(), rx flow wins
		 */
		if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
			break;
read:
		eff_buf.token_len = lws_ssl_capable_read(wsi, pt->serv_buf,
					pending ? pending : LWS_MAX_SOCKET_IO_BUF);
		switch (eff_buf.token_len) {
		case 0:
			lwsl_info("service_fd: closing due to 0 length read\n");
			goto close_and_handled;
		case LWS_SSL_CAPABLE_MORE_SERVICE:
			lwsl_info("SSL Capable more service\n");
			n = 0;
			goto handled;
		case LWS_SSL_CAPABLE_ERROR:
			lwsl_info("Closing when error\n");
			goto close_and_handled;
		}

		/*
		 * give any active extensions a chance to munge the buffer
		 * before parse.  We pass in a pointer to an lws_tokens struct
		 * prepared with the default buffer and content length that's in
		 * there.  Rather than rewrite the default buffer, extensions
		 * that expect to grow the buffer can adapt .token to
		 * point to their own per-connection buffer in the extension
		 * user allocation.  By default with no extensions or no
		 * extension callback handling, just the normal input buffer is
		 * used then so it is efficient.
		 */

		eff_buf.token = (char *)pt->serv_buf;
drain:

		do {
			more = 0;

			m = lws_ext_cb_active(wsi,
				LWS_EXT_CB_PACKET_RX_PREPARSE, &eff_buf, 0);
			if (m < 0)
				goto close_and_handled;
			if (m)
				more = 1;

			/* service incoming data */

			if (eff_buf.token_len) {
				/*
				 * if draining from rxflow buffer, not
				 * critical to track what was used since at the
				 * use it bumps wsi->rxflow_pos.  If we come
				 * around again it will pick up from where it
				 * left off.
				 */
				n = lws_read(wsi, (unsigned char *)eff_buf.token,
					     eff_buf.token_len);
				if (n < 0) {
					/* we closed wsi */
					n = 0;
					goto handled;
				}
			}

			eff_buf.token = NULL;
			eff_buf.token_len = 0;
		} while (more);

		pending = lws_ssl_pending(wsi);
		if (pending) {
handle_pending:
			pending = pending > LWS_MAX_SOCKET_IO_BUF ?
					LWS_MAX_SOCKET_IO_BUF : pending;
			goto read;
		}

		if (draining_flow && wsi->rxflow_buffer &&
				 wsi->rxflow_pos == wsi->rxflow_len) {
			lwsl_info("flow buffer: drained\n");
			lws_free_set_NULL(wsi->rxflow_buffer);
			/* having drained the rxflow buffer, can rearm POLLIN */
#ifdef LWS_NO_SERVER
			n =
#endif
			_lws_rx_flow_control(wsi);
			/* n ignored, needed for NO_SERVER case */
		}

		break;

	default:
#ifdef LWS_NO_CLIENT
		break;
#else
		n = lws_client_socket_service(context, wsi, pollfd);
		if (n)
			return 1;
		goto handled;
#endif
	}

	n = 0;
	goto handled;

close_and_handled:
	lwsl_debug("Close and handled\n");
	lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
	/*
	 * pollfd may point to something else after the close
	 * due to pollfd swapping scheme on delete on some platforms
	 * we can't clear revents now because it'd be the wrong guy's revents
	 */
	return 1;

handled:
	pollfd->revents = 0;
	return n;
}
Esempio n. 2
0
File: client.c Progetto: 93i/godot
int
lws_client_socket_service(struct lws *wsi, struct lws_pollfd *pollfd,
			  struct lws *wsi_conn)
{
	struct lws_context *context = wsi->context;
	struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
	char *p = (char *)&pt->serv_buf[0];
	struct lws *w;
#if defined(LWS_WITH_TLS)
	char ebuf[128];
#endif
	const char *cce = NULL;
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
	ssize_t len = 0;
	unsigned char c;
#endif
	char *sb = p;
	int n = 0;
#if defined(LWS_WITH_SOCKS5)
	char conn_mode = 0, pending_timeout = 0;
#endif

	if ((pollfd->revents & LWS_POLLOUT) &&
	     wsi->keepalive_active &&
	     wsi->dll_client_transaction_queue_head.next) {
		struct lws *wfound = NULL;

		lwsl_debug("%s: pollout HANDSHAKE2\n", __func__);

		/*
		 * We have a transaction queued that wants to pipeline.
		 *
		 * We have to allow it to send headers strictly in the order
		 * that it was queued, ie, tail-first.
		 */
		lws_vhost_lock(wsi->vhost);
		lws_start_foreach_dll_safe(struct lws_dll_lws *, d, d1,
					   wsi->dll_client_transaction_queue_head.next) {
			struct lws *w = lws_container_of(d, struct lws,
						  dll_client_transaction_queue);

			lwsl_debug("%s: %p states 0x%x\n", __func__, w, w->wsistate);
			if (lwsi_state(w) == LRS_H1C_ISSUE_HANDSHAKE2)
				wfound = w;
		} lws_end_foreach_dll_safe(d, d1);

		if (wfound) {
			/*
			 * pollfd has the master sockfd in it... we
			 * need to use that in HANDSHAKE2 to understand
			 * which wsi to actually write on
			 */
			lws_client_socket_service(wfound, pollfd, wsi);
			lws_callback_on_writable(wsi);
		} else
			lwsl_debug("%s: didn't find anything in txn q in HS2\n",
							   __func__);

		lws_vhost_unlock(wsi->vhost);

		return 0;
	}

	switch (lwsi_state(wsi)) {

	case LRS_WAITING_CONNECT:

		/*
		 * we are under PENDING_TIMEOUT_SENT_CLIENT_HANDSHAKE
		 * timeout protection set in client-handshake.c
		 */

		if (!lws_client_connect_2(wsi)) {
			/* closed */
			lwsl_client("closed\n");
			return -1;
		}

		/* either still pending connection, or changed mode */
		return 0;

#if defined(LWS_WITH_SOCKS5)
	/* SOCKS Greeting Reply */
	case LRS_WAITING_SOCKS_GREETING_REPLY:
	case LRS_WAITING_SOCKS_AUTH_REPLY:
	case LRS_WAITING_SOCKS_CONNECT_REPLY:

		/* handle proxy hung up on us */

		if (pollfd->revents & LWS_POLLHUP) {
			lwsl_warn("SOCKS connection %p (fd=%d) dead\n",
				  (void *)wsi, pollfd->fd);
			goto bail3;
		}

		n = recv(wsi->desc.sockfd, sb, context->pt_serv_buf_size, 0);
		if (n < 0) {
			if (LWS_ERRNO == LWS_EAGAIN) {
				lwsl_debug("SOCKS read EAGAIN, retrying\n");
				return 0;
			}
			lwsl_err("ERROR reading from SOCKS socket\n");
			goto bail3;
		}

		switch (lwsi_state(wsi)) {

		case LRS_WAITING_SOCKS_GREETING_REPLY:
			if (pt->serv_buf[0] != SOCKS_VERSION_5)
				goto socks_reply_fail;

			if (pt->serv_buf[1] == SOCKS_AUTH_NO_AUTH) {
				lwsl_client("SOCKS GR: No Auth Method\n");
				socks_generate_msg(wsi, SOCKS_MSG_CONNECT, &len);
				conn_mode = LRS_WAITING_SOCKS_CONNECT_REPLY;
				pending_timeout =
				   PENDING_TIMEOUT_AWAITING_SOCKS_CONNECT_REPLY;
				goto socks_send;
			}

			if (pt->serv_buf[1] == SOCKS_AUTH_USERNAME_PASSWORD) {
				lwsl_client("SOCKS GR: User/Pw Method\n");
				socks_generate_msg(wsi,
						   SOCKS_MSG_USERNAME_PASSWORD,
						   &len);
				conn_mode = LRS_WAITING_SOCKS_AUTH_REPLY;
				pending_timeout =
				      PENDING_TIMEOUT_AWAITING_SOCKS_AUTH_REPLY;
				goto socks_send;
			}
			goto socks_reply_fail;

		case LRS_WAITING_SOCKS_AUTH_REPLY:
			if (pt->serv_buf[0] != SOCKS_SUBNEGOTIATION_VERSION_1 ||
			    pt->serv_buf[1] != SOCKS_SUBNEGOTIATION_STATUS_SUCCESS)
				goto socks_reply_fail;

			lwsl_client("SOCKS password OK, sending connect\n");
			socks_generate_msg(wsi, SOCKS_MSG_CONNECT, &len);
			conn_mode = LRS_WAITING_SOCKS_CONNECT_REPLY;
			pending_timeout =
				   PENDING_TIMEOUT_AWAITING_SOCKS_CONNECT_REPLY;
socks_send:
			n = send(wsi->desc.sockfd, (char *)pt->serv_buf, len,
				 MSG_NOSIGNAL);
			if (n < 0) {
				lwsl_debug("ERROR writing to socks proxy\n");
				goto bail3;
			}

			lws_set_timeout(wsi, pending_timeout, AWAITING_TIMEOUT);
			lwsi_set_state(wsi, conn_mode);
			break;

socks_reply_fail:
			lwsl_notice("socks reply: v%d, err %d\n",
				    pt->serv_buf[0], pt->serv_buf[1]);
			goto bail3;

		case LRS_WAITING_SOCKS_CONNECT_REPLY:
			if (pt->serv_buf[0] != SOCKS_VERSION_5 ||
			    pt->serv_buf[1] != SOCKS_REQUEST_REPLY_SUCCESS)
				goto socks_reply_fail;

			lwsl_client("socks connect OK\n");

			/* free stash since we are done with it */
			lws_client_stash_destroy(wsi);
			if (lws_hdr_simple_create(wsi,
						  _WSI_TOKEN_CLIENT_PEER_ADDRESS,
						  wsi->vhost->socks_proxy_address))
				goto bail3;

			wsi->c_port = wsi->vhost->socks_proxy_port;

			/* clear his proxy connection timeout */
			lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
			goto start_ws_handshake;
		}
		break;
#endif

	case LRS_WAITING_PROXY_REPLY:

		/* handle proxy hung up on us */

		if (pollfd->revents & LWS_POLLHUP) {

			lwsl_warn("Proxy connection %p (fd=%d) dead\n",
				  (void *)wsi, pollfd->fd);

			goto bail3;
		}

		n = recv(wsi->desc.sockfd, sb, context->pt_serv_buf_size, 0);
		if (n < 0) {
			if (LWS_ERRNO == LWS_EAGAIN) {
				lwsl_debug("Proxy read EAGAIN... retrying\n");
				return 0;
			}
			lwsl_err("ERROR reading from proxy socket\n");
			goto bail3;
		}

		pt->serv_buf[13] = '\0';
		if (strcmp(sb, "HTTP/1.0 200 ") &&
		    strcmp(sb, "HTTP/1.1 200 ")) {
			lwsl_err("ERROR proxy: %s\n", sb);
			goto bail3;
		}

		/* clear his proxy connection timeout */

		lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);

		/* fallthru */

	case LRS_H1C_ISSUE_HANDSHAKE:

		/*
		 * we are under PENDING_TIMEOUT_SENT_CLIENT_HANDSHAKE
		 * timeout protection set in client-handshake.c
		 *
		 * take care of our lws_callback_on_writable
		 * happening at a time when there's no real connection yet
		 */
#if defined(LWS_WITH_SOCKS5)
start_ws_handshake:
#endif
		if (lws_change_pollfd(wsi, LWS_POLLOUT, 0))
			return -1;

#if defined(LWS_WITH_TLS)
		/* we can retry this... just cook the SSL BIO the first time */

		if ((wsi->tls.use_ssl & LCCSCF_USE_SSL) && !wsi->tls.ssl &&
		    lws_ssl_client_bio_create(wsi) < 0) {
			cce = "bio_create failed";
			goto bail3;
		}

		if (wsi->tls.use_ssl & LCCSCF_USE_SSL) {
			n = lws_ssl_client_connect1(wsi);
			if (!n)
				return 0;
			if (n < 0) {
				cce = "lws_ssl_client_connect1 failed";
				goto bail3;
			}
		} else
			wsi->tls.ssl = NULL;

		/* fallthru */

	case LRS_WAITING_SSL:

		if (wsi->tls.use_ssl & LCCSCF_USE_SSL) {
			n = lws_ssl_client_connect2(wsi, ebuf, sizeof(ebuf));
			if (!n)
				return 0;
			if (n < 0) {
				cce = ebuf;
				goto bail3;
			}
		} else
			wsi->tls.ssl = NULL;
#endif
#if defined (LWS_WITH_HTTP2)
		if (wsi->client_h2_alpn) {
			/*
			 * We connected to the server and set up tls, and
			 * negotiated "h2".
			 *
			 * So this is it, we are an h2 master client connection
			 * now, not an h1 client connection.
			 */
			lws_tls_server_conn_alpn(wsi);

			/* send the H2 preface to legitimize the connection */
			if (lws_h2_issue_preface(wsi)) {
				cce = "error sending h2 preface";
				goto bail3;
			}

			break;
		}
#endif
		lwsi_set_state(wsi, LRS_H1C_ISSUE_HANDSHAKE2);
		lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_CLIENT_HS_SEND,
				context->timeout_secs);

		/* fallthru */

	case LRS_H1C_ISSUE_HANDSHAKE2:
		p = lws_generate_client_handshake(wsi, p);
		if (p == NULL) {
			if (wsi->role_ops == &role_ops_raw_skt ||
			    wsi->role_ops == &role_ops_raw_file)
				return 0;

			lwsl_err("Failed to generate handshake for client\n");
			lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "chs");
			return 0;
		}

		/* send our request to the server */
		lws_latency_pre(context, wsi);

		w = _lws_client_wsi_master(wsi);
		lwsl_info("%s: HANDSHAKE2: %p: sending headers on %p (wsistate 0x%x 0x%x)\n",
				__func__, wsi, w, wsi->wsistate, w->wsistate);

		n = lws_ssl_capable_write(w, (unsigned char *)sb, (int)(p - sb));
		lws_latency(context, wsi, "send lws_issue_raw", n,
			    n == p - sb);
		switch (n) {
		case LWS_SSL_CAPABLE_ERROR:
			lwsl_debug("ERROR writing to client socket\n");
			lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "cws");
			return 0;
		case LWS_SSL_CAPABLE_MORE_SERVICE:
			lws_callback_on_writable(wsi);
			break;
		}

		if (wsi->client_http_body_pending) {
			lwsi_set_state(wsi, LRS_ISSUE_HTTP_BODY);
			lws_set_timeout(wsi,
					PENDING_TIMEOUT_CLIENT_ISSUE_PAYLOAD,
					context->timeout_secs);
			/* user code must ask for writable callback */
			break;
		}

		lwsi_set_state(wsi, LRS_WAITING_SERVER_REPLY);
		wsi->hdr_parsing_completed = 0;

		if (lwsi_state(w) == LRS_IDLING) {
			lwsi_set_state(w, LRS_WAITING_SERVER_REPLY);
			w->hdr_parsing_completed = 0;
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
			w->http.ah->parser_state = WSI_TOKEN_NAME_PART;
			w->http.ah->lextable_pos = 0;
			/* If we're (re)starting on headers, need other implied init */
			wsi->http.ah->ues = URIES_IDLE;
#endif
		}

		lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_SERVER_RESPONSE,
				wsi->context->timeout_secs);

		lws_callback_on_writable(w);

		goto client_http_body_sent;

	case LRS_ISSUE_HTTP_BODY:
		if (wsi->client_http_body_pending) {
			//lws_set_timeout(wsi,
			//		PENDING_TIMEOUT_CLIENT_ISSUE_PAYLOAD,
			//		context->timeout_secs);
			/* user code must ask for writable callback */
			break;
		}
client_http_body_sent:
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
		/* prepare ourselves to do the parsing */
		wsi->http.ah->parser_state = WSI_TOKEN_NAME_PART;
		wsi->http.ah->lextable_pos = 0;
#endif
		lwsi_set_state(wsi, LRS_WAITING_SERVER_REPLY);
		lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_SERVER_RESPONSE,
				context->timeout_secs);
		break;

	case LRS_WAITING_SERVER_REPLY:
		/*
		 * handle server hanging up on us...
		 * but if there is POLLIN waiting, handle that first
		 */
		if ((pollfd->revents & (LWS_POLLIN | LWS_POLLHUP)) ==
								LWS_POLLHUP) {

			lwsl_debug("Server connection %p (fd=%d) dead\n",
				(void *)wsi, pollfd->fd);
			cce = "Peer hung up";
			goto bail3;
		}

		if (!(pollfd->revents & LWS_POLLIN))
			break;

#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
		/* interpret the server response
		 *
		 *  HTTP/1.1 101 Switching Protocols
		 *  Upgrade: websocket
		 *  Connection: Upgrade
		 *  Sec-WebSocket-Accept: me89jWimTRKTWwrS3aRrL53YZSo=
		 *  Sec-WebSocket-Nonce: AQIDBAUGBwgJCgsMDQ4PEC==
		 *  Sec-WebSocket-Protocol: chat
		 *
		 * we have to take some care here to only take from the
		 * socket bytewise.  The browser may (and has been seen to
		 * in the case that onopen() performs websocket traffic)
		 * coalesce both handshake response and websocket traffic
		 * in one packet, since at that point the connection is
		 * definitively ready from browser pov.
		 */
		len = 1;
		while (wsi->http.ah->parser_state != WSI_PARSING_COMPLETE &&
		       len > 0) {
			int plen = 1;

			n = lws_ssl_capable_read(wsi, &c, 1);
			lws_latency(context, wsi, "send lws_issue_raw", n,
				    n == 1);
			switch (n) {
			case 0:
			case LWS_SSL_CAPABLE_ERROR:
				cce = "read failed";
				goto bail3;
			case LWS_SSL_CAPABLE_MORE_SERVICE:
				return 0;
			}

			if (lws_parse(wsi, &c, &plen)) {
				lwsl_warn("problems parsing header\n");
				goto bail3;
			}
		}

		/*
		 * hs may also be coming in multiple packets, there is a 5-sec
		 * libwebsocket timeout still active here too, so if parsing did
		 * not complete just wait for next packet coming in this state
		 */
		if (wsi->http.ah->parser_state != WSI_PARSING_COMPLETE)
			break;

#endif

		/*
		 * otherwise deal with the handshake.  If there's any
		 * packet traffic already arrived we'll trigger poll() again
		 * right away and deal with it that way
		 */
		return lws_client_interpret_server_handshake(wsi);

bail3:
		lwsl_info("closing conn at LWS_CONNMODE...SERVER_REPLY\n");
		if (cce)
			lwsl_info("reason: %s\n", cce);
		wsi->protocol->callback(wsi,
			LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
			wsi->user_space, (void *)cce, cce ? strlen(cce) : 0);
		wsi->already_did_cce = 1;
		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "cbail3");
		return -1;

	default:
		break;
	}

	return 0;
}
Esempio n. 3
0
LWS_VISIBLE int
libwebsocket_service_fd(struct libwebsocket_context *context,
							  struct libwebsocket_pollfd *pollfd)
{
	struct libwebsocket *wsi;
	int n;
	int m;
	int listen_socket_fds_index = 0;
	time_t now;
	int timed_out = 0;
	int our_fd = 0;
	char draining_flow = 0;
	int more;
	struct lws_tokens eff_buf;

	if (context->listen_service_fd)
		listen_socket_fds_index = context->lws_lookup[
			     context->listen_service_fd]->position_in_fds_table;

	/*
	 * you can call us with pollfd = NULL to just allow the once-per-second
	 * global timeout checks; if less than a second since the last check
	 * it returns immediately then.
	 */

	time(&now);

	/* TODO: if using libev, we should probably use timeout watchers... */
	if (context->last_timeout_check_s != now) {
		context->last_timeout_check_s = now;

		lws_plat_service_periodic(context);

		/* global timeout check once per second */

		if (pollfd)
			our_fd = pollfd->fd;

		for (n = 0; n < context->fds_count; n++) {
			m = context->fds[n].fd;
			wsi = context->lws_lookup[m];
			if (!wsi)
				continue;

			if (libwebsocket_service_timeout_check(context, wsi, now))
				/* he did time out... */
				if (m == our_fd) {
					/* it was the guy we came to service! */
					timed_out = 1;
					/* mark as handled */
					if (pollfd)
						pollfd->revents = 0;
				}
		}
	}

	/* the socket we came to service timed out, nothing to do */
	if (timed_out)
		return 0;

	/* just here for timeout management? */
	if (pollfd == NULL)
		return 0;

	/* no, here to service a socket descriptor */
	wsi = context->lws_lookup[pollfd->fd];
	if (wsi == NULL)
		/* not lws connection ... leave revents alone and return */
		return 0;

	/*
	 * so that caller can tell we handled, past here we need to
	 * zero down pollfd->revents after handling
	 */

	/*
	 * deal with listen service piggybacking
	 * every listen_service_modulo services of other fds, we
	 * sneak one in to service the listen socket if there's anything waiting
	 *
	 * To handle connection storms, as found in ab, if we previously saw a
	 * pending connection here, it causes us to check again next time.
	 */

	if (context->listen_service_fd && pollfd !=
				       &context->fds[listen_socket_fds_index]) {
		context->listen_service_count++;
		if (context->listen_service_extraseen ||
				context->listen_service_count ==
					       context->listen_service_modulo) {
			context->listen_service_count = 0;
			m = 1;
			if (context->listen_service_extraseen > 5)
				m = 2;
			while (m--) {
				/*
				 * even with extpoll, we prepared this
				 * internal fds for listen
				 */
				n = lws_poll_listen_fd(&context->fds[listen_socket_fds_index]);
				if (n > 0) { /* there's a conn waiting for us */
					libwebsocket_service_fd(context,
						&context->
						  fds[listen_socket_fds_index]);
					context->listen_service_extraseen++;
				} else {
					if (context->listen_service_extraseen)
						context->
						     listen_service_extraseen--;
					break;
				}
			}
		}

	}

	/* handle session socket closed */

	if ((!(pollfd->revents & LWS_POLLIN)) &&
			(pollfd->revents & LWS_POLLHUP)) {

		lwsl_debug("Session Socket %p (fd=%d) dead\n",
						       (void *)wsi, pollfd->fd);

		goto close_and_handled;
	}

	/* okay, what we came here to do... */

	switch (wsi->mode) {
	case LWS_CONNMODE_HTTP_SERVING:
	case LWS_CONNMODE_HTTP_SERVING_ACCEPTED:
	case LWS_CONNMODE_SERVER_LISTENER:
	case LWS_CONNMODE_SSL_ACK_PENDING:
		n = lws_server_socket_service(context, wsi, pollfd);
		if (n < 0)
			goto close_and_handled;
		goto handled;

	case LWS_CONNMODE_WS_SERVING:
	case LWS_CONNMODE_WS_CLIENT:
	case LWS_CONNMODE_HTTP2_SERVING:

		/* the guy requested a callback when it was OK to write */

		if ((pollfd->revents & LWS_POLLOUT) &&
			(wsi->state == WSI_STATE_ESTABLISHED || wsi->state == WSI_STATE_HTTP2_ESTABLISHED || wsi->state == WSI_STATE_HTTP2_ESTABLISHED_PRE_SETTINGS ||
				wsi->state == WSI_STATE_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
			   lws_handle_POLLOUT_event(context, wsi, pollfd)) {
			lwsl_info("libwebsocket_service_fd: closing\n");
			goto close_and_handled;
		}

		if (wsi->rxflow_buffer &&
			      (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) {
			lwsl_info("draining rxflow\n");
			/* well, drain it */
			eff_buf.token = (char *)wsi->rxflow_buffer +
						wsi->rxflow_pos;
			eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
			draining_flow = 1;
			goto drain;
		}

		/* any incoming data ready? */

		if (!(pollfd->revents & LWS_POLLIN))
			break;

		eff_buf.token_len = lws_ssl_capable_read(context, wsi,
				context->service_buffer,
					       sizeof(context->service_buffer));
		switch (eff_buf.token_len) {
		case 0:
			lwsl_info("service_fd: closing due to 0 length read\n");
			goto close_and_handled;
		case LWS_SSL_CAPABLE_MORE_SERVICE:
			lwsl_info("SSL Capable more service\n");
			n = 0;
			goto handled;
		case LWS_SSL_CAPABLE_ERROR:
			lwsl_info("Closing when error\n");
			goto close_and_handled;
		}

		/*
		 * give any active extensions a chance to munge the buffer
		 * before parse.  We pass in a pointer to an lws_tokens struct
		 * prepared with the default buffer and content length that's in
		 * there.  Rather than rewrite the default buffer, extensions
		 * that expect to grow the buffer can adapt .token to
		 * point to their own per-connection buffer in the extension
		 * user allocation.  By default with no extensions or no
		 * extension callback handling, just the normal input buffer is
		 * used then so it is efficient.
		 */

		eff_buf.token = (char *)context->service_buffer;
drain:

		do {

			more = 0;
			
			m = lws_ext_callback_for_each_active(wsi,
				LWS_EXT_CALLBACK_PACKET_RX_PREPARSE, &eff_buf, 0);
			if (m < 0)
				goto close_and_handled;
			if (m)
				more = 1;

			/* service incoming data */

			if (eff_buf.token_len) {
				n = libwebsocket_read(context, wsi,
					(unsigned char *)eff_buf.token,
							    eff_buf.token_len);
				if (n < 0) {
					/* we closed wsi */
					n = 0;
					goto handled;
				}
			}

			eff_buf.token = NULL;
			eff_buf.token_len = 0;
		} while (more);

		if (draining_flow && wsi->rxflow_buffer &&
				 wsi->rxflow_pos == wsi->rxflow_len) {
			lwsl_info("flow buffer: drained\n");
			lws_free2(wsi->rxflow_buffer);
			/* having drained the rxflow buffer, can rearm POLLIN */
			_libwebsocket_rx_flow_control(wsi); /* n ignored, needed for NO_SERVER case */
		}

		break;

	default:
#ifdef LWS_NO_CLIENT
		break;
#else
		n = lws_client_socket_service(context, wsi, pollfd);
		goto handled;
#endif
	}

	n = 0;
	goto handled;

close_and_handled:
	lwsl_debug("Close and handled\n");
	libwebsocket_close_and_free_session(context, wsi,
						LWS_CLOSE_STATUS_NOSTATUS);
	n = 1;

handled:
	pollfd->revents = 0;
	return n;
}
Esempio n. 4
0
static int
rops_handle_POLLIN_raw_proxy(struct lws_context_per_thread *pt, struct lws *wsi,
			     struct lws_pollfd *pollfd)
{
	struct lws_tokens ebuf;
	int n, buffered;

	/* pending truncated sends have uber priority */

	if (lws_has_buffered_out(wsi)) {
		if (!(pollfd->revents & LWS_POLLOUT))
			return LWS_HPI_RET_HANDLED;

		/* drain the output buflist */
		if (lws_issue_raw(wsi, NULL, 0) < 0)
			goto fail;
		/*
		 * we can't afford to allow input processing to send
		 * something new, so spin around he event loop until
		 * he doesn't have any partials
		 */
		return LWS_HPI_RET_HANDLED;
	}

	if ((pollfd->revents & pollfd->events & LWS_POLLIN) &&
	    /* any tunnel has to have been established... */
	    lwsi_state(wsi) != LRS_SSL_ACK_PENDING &&
	    !(wsi->favoured_pollin &&
	      (pollfd->revents & pollfd->events & LWS_POLLOUT))) {

		buffered = lws_buflist_aware_read(pt, wsi, &ebuf);
		switch (ebuf.len) {
		case 0:
			lwsl_info("%s: read 0 len\n", __func__);
			wsi->seen_zero_length_recv = 1;
			lws_change_pollfd(wsi, LWS_POLLIN, 0);

			/*
			 * we need to go to fail here, since it's the only
			 * chance we get to understand that the socket has
			 * closed
			 */
			// goto try_pollout;
			goto fail;

		case LWS_SSL_CAPABLE_ERROR:
			goto fail;
		case LWS_SSL_CAPABLE_MORE_SERVICE:
			goto try_pollout;
		}
		n = user_callback_handle_rxflow(wsi->protocol->callback,
						wsi, lwsi_role_client(wsi) ?
						 LWS_CALLBACK_RAW_PROXY_CLI_RX :
						 LWS_CALLBACK_RAW_PROXY_SRV_RX,
						wsi->user_space, ebuf.token,
						ebuf.len);
		if (n < 0) {
			lwsl_info("LWS_CALLBACK_RAW_PROXY_*_RX fail\n");
			goto fail;
		}

		if (lws_buflist_aware_consume(wsi, &ebuf, ebuf.len, buffered))
			return LWS_HPI_RET_PLEASE_CLOSE_ME;
	} else
		if (wsi->favoured_pollin &&
		    (pollfd->revents & pollfd->events & LWS_POLLOUT))
			/* we balanced the last favouring of pollin */
			wsi->favoured_pollin = 0;

try_pollout:

	if (!(pollfd->revents & LWS_POLLOUT))
		return LWS_HPI_RET_HANDLED;

	if (lws_handle_POLLOUT_event(wsi, pollfd)) {
		lwsl_debug("POLLOUT event closed it\n");
		return LWS_HPI_RET_PLEASE_CLOSE_ME;
	}

#if !defined(LWS_NO_CLIENT)
	if (lws_client_socket_service(wsi, pollfd, NULL))
		return LWS_HPI_RET_WSI_ALREADY_DIED;
#endif

	return LWS_HPI_RET_HANDLED;

fail:
	lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "raw svc fail");

	return LWS_HPI_RET_WSI_ALREADY_DIED;
}