示例#1
0
int
lws_interpret_incoming_packet(struct lws *wsi, unsigned char **buf, size_t len)
{
	int m;

	lwsl_parser("%s: received %d byte packet\n", __func__, (int)len);
#if 0
	lwsl_hexdump(*buf, len);
#endif

	/* let the rx protocol state machine have as much as it needs */

	while (len) {
		/*
		 * we were accepting input but now we stopped doing so
		 */
		if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) {
			lws_rxflow_cache(wsi, *buf, 0, len);
			lwsl_parser("%s: cached %d\n", __func__, len);
			return 1;
		}

		if (wsi->u.ws.rx_draining_ext) {
			m = lws_rx_sm(wsi, 0);
			if (m < 0)
				return -1;
			continue;
		}

		/* account for what we're using in rxflow buffer */
		if (wsi->rxflow_buffer)
			wsi->rxflow_pos++;

		/* consume payload bytes efficiently */
		if (wsi->lws_rx_parse_state ==
		    LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED)
			lws_payload_until_length_exhausted(wsi, buf, &len);

		/* process the byte */
		m = lws_rx_sm(wsi, *(*buf)++);
		if (m < 0)
			return -1;
		len--;
	}

	lwsl_parser("%s: exit with %d unused\n", __func__, (int)len);

	return 0;
}
示例#2
0
int lws_interpret_incoming_packet(struct lws *wsi, unsigned char *buf,
				  size_t len)
{
	size_t n = 0;
	int m;

#if 0
	lwsl_parser("received %d byte packet\n", (int)len);
	lwsl_hexdump(buf, len);
#endif

	/* let the rx protocol state machine have as much as it needs */

	while (n < len) {
		/*
		 * we were accepting input but now we stopped doing so
		 */
		if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) {
			lws_rxflow_cache(wsi, buf, n, len);

			return 1;
		}

		/* account for what we're using in rxflow buffer */
		if (wsi->rxflow_buffer)
			wsi->rxflow_pos++;

		/* process the byte */
		m = lws_rx_sm(wsi, buf[n++]);
		if (m < 0)
			return -1;
	}

	return 0;
}
示例#3
0
int
lws_handshake_client(struct lws *wsi, unsigned char **buf, size_t len)
{
	int m;

	switch (wsi->mode) {
	case LWSCM_WSCL_WAITING_PROXY_REPLY:
	case LWSCM_WSCL_ISSUE_HANDSHAKE:
	case LWSCM_WSCL_WAITING_SERVER_REPLY:
	case LWSCM_WSCL_WAITING_EXTENSION_CONNECT:
	case LWSCM_WS_CLIENT:
		while (len) {
			/*
			 * we were accepting input but now we stopped doing so
			 */
			if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) {
				lwsl_debug("%s: caching %ld\n", __func__, (long)len);
				lws_rxflow_cache(wsi, *buf, 0, len);
				return 0;
			}
			if (wsi->u.ws.rx_draining_ext) {
#if !defined(LWS_NO_CLIENT)
				if (wsi->mode == LWSCM_WS_CLIENT)
					m = lws_client_rx_sm(wsi, 0);
				else
#endif
					m = lws_rx_sm(wsi, 0);
				if (m < 0)
					return -1;
				continue;
			}
			/* account for what we're using in rxflow buffer */
			if (wsi->rxflow_buffer)
				wsi->rxflow_pos++;

			if (lws_client_rx_sm(wsi, *(*buf)++)) {
				lwsl_debug("client_rx_sm exited\n");
				return -1;
			}
			len--;
		}
		lwsl_debug("%s: finished with %ld\n", __func__, (long)len);
		return 0;
	default:
		break;
	}

	return 0;
}
示例#4
0
LWS_VISIBLE int
lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
{
	struct lws_context_per_thread *pt = &context->pt[tsi];
	lws_sockfd_type our_fd = 0, tmp_fd;
	struct lws_tokens eff_buf;
	unsigned int pending = 0;
	struct lws *wsi, *wsi1;
	char draining_flow = 0;
	int timed_out = 0;
	time_t now;
	int n, m;
	int more;

	/*
	 * you can call us with pollfd = NULL to just allow the once-per-second
	 * global timeout checks; if less than a second since the last check
	 * it returns immediately then.
	 */

	time(&now);

	/* TODO: if using libev, we should probably use timeout watchers... */
	if (context->last_timeout_check_s != now) {
		context->last_timeout_check_s = now;

		lws_plat_service_periodic(context);

		/* global timeout check once per second */

		if (pollfd)
			our_fd = pollfd->fd;

		wsi = context->pt[tsi].timeout_list;
		while (wsi) {
			/* we have to take copies, because he may be deleted */
			wsi1 = wsi->timeout_list;
			tmp_fd = wsi->sock;
			if (lws_service_timeout_check(wsi, (unsigned int)now)) {
				/* he did time out... */
				if (tmp_fd == our_fd)
					/* it was the guy we came to service! */
					timed_out = 1;
					/* he's gone, no need to mark as handled */
			}
			wsi = wsi1;
		}
#if 0
		{
			char s[300], *p = s;

			for (n = 0; n < context->count_threads; n++)
				p += sprintf(p, " %7lu (%5d), ",
					     context->pt[n].count_conns,
					     context->pt[n].fds_count);

			lwsl_notice("load: %s\n", s);
		}
#endif
	}

	/* the socket we came to service timed out, nothing to do */
	if (timed_out)
		return 0;

	/* just here for timeout management? */
	if (!pollfd)
		return 0;

	/* no, here to service a socket descriptor */
	wsi = wsi_from_fd(context, pollfd->fd);
	if (!wsi)
		/* not lws connection ... leave revents alone and return */
		return 0;

	/*
	 * so that caller can tell we handled, past here we need to
	 * zero down pollfd->revents after handling
	 */

#if LWS_POSIX

	/* handle session socket closed */

	if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
	    (pollfd->revents & LWS_POLLHUP)) {
		wsi->socket_is_permanently_unusable = 1;
		lwsl_debug("Session Socket %p (fd=%d) dead\n",
						       (void *)wsi, pollfd->fd);

		goto close_and_handled;
	}

#ifdef _WIN32
	if (pollfd->revents & LWS_POLLOUT)
		wsi->sock_send_blocking = FALSE;
#endif

#endif

	/* okay, what we came here to do... */

	switch (wsi->mode) {
	case LWSCM_HTTP_SERVING:
	case LWSCM_HTTP_SERVING_ACCEPTED:
	case LWSCM_SERVER_LISTENER:
	case LWSCM_SSL_ACK_PENDING:
		n = lws_server_socket_service(context, wsi, pollfd);
		if (n) /* closed by above */
			return 1;
		pending = lws_ssl_pending(wsi);
		if (pending)
			goto handle_pending;
		goto handled;

	case LWSCM_WS_SERVING:
	case LWSCM_WS_CLIENT:
	case LWSCM_HTTP2_SERVING:

		/* 1: something requested a callback when it was OK to write */

		if ((pollfd->revents & LWS_POLLOUT) &&
		    (wsi->state == LWSS_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
		     wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		     wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
		    lws_handle_POLLOUT_event(wsi, pollfd)) {
			if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
				wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
			lwsl_info("lws_service_fd: closing\n");
			goto close_and_handled;
		}
#if 1
		if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		    wsi->state == LWSS_AWAITING_CLOSE_ACK) {
			/*
			 * we stopped caring about anything except control
			 * packets.  Force flow control off, defeat tx
			 * draining.
			 */
			lws_rx_flow_control(wsi, 1);
			wsi->u.ws.tx_draining_ext = 0;
		}
#endif
		if (wsi->u.ws.tx_draining_ext) {
			/* we cannot deal with new RX until the TX ext
			 * path has been drained.  It's because new
			 * rx will, eg, crap on the wsi rx buf that
			 * may be needed to retain state.
			 *
			 * TX ext drain path MUST go through event loop
			 * to avoid blocking.
			 */
			break;
		}

		if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
			/* We cannot deal with any kind of new RX
			 * because we are RX-flowcontrolled.
			 */
			break;

		/* 2: RX Extension needs to be drained
		 */

		if (wsi->state == LWSS_ESTABLISHED &&
		    wsi->u.ws.rx_draining_ext) {

			lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
#ifndef LWS_NO_CLIENT
			if (wsi->mode == LWSCM_WS_CLIENT) {
				n = lws_client_rx_sm(wsi, 0);
				if (n < 0)
					/* we closed wsi */
					n = 0;
			} else
#endif
				n = lws_rx_sm(wsi, 0);

			goto handled;
		}

		if (wsi->u.ws.rx_draining_ext)
			/*
			 * We have RX EXT content to drain, but can't do it
			 * right now.  That means we cannot do anything lower
			 * priority either.
			 */
			break;

		/* 3: RX Flowcontrol buffer needs to be drained
		 */

		if (wsi->rxflow_buffer) {
			lwsl_info("draining rxflow (len %d)\n",
				wsi->rxflow_len - wsi->rxflow_pos
			);
			/* well, drain it */
			eff_buf.token = (char *)wsi->rxflow_buffer +
						wsi->rxflow_pos;
			eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
			draining_flow = 1;
			goto drain;
		}

		/* 4: any incoming data ready?
		 * notice if rx flow going off raced poll(), rx flow wins
		 */
		if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
			break;
read:
		eff_buf.token_len = lws_ssl_capable_read(wsi, pt->serv_buf,
					pending ? pending : LWS_MAX_SOCKET_IO_BUF);
		switch (eff_buf.token_len) {
		case 0:
			lwsl_info("service_fd: closing due to 0 length read\n");
			goto close_and_handled;
		case LWS_SSL_CAPABLE_MORE_SERVICE:
			lwsl_info("SSL Capable more service\n");
			n = 0;
			goto handled;
		case LWS_SSL_CAPABLE_ERROR:
			lwsl_info("Closing when error\n");
			goto close_and_handled;
		}

		/*
		 * give any active extensions a chance to munge the buffer
		 * before parse.  We pass in a pointer to an lws_tokens struct
		 * prepared with the default buffer and content length that's in
		 * there.  Rather than rewrite the default buffer, extensions
		 * that expect to grow the buffer can adapt .token to
		 * point to their own per-connection buffer in the extension
		 * user allocation.  By default with no extensions or no
		 * extension callback handling, just the normal input buffer is
		 * used then so it is efficient.
		 */

		eff_buf.token = (char *)pt->serv_buf;
drain:

		do {
			more = 0;

			m = lws_ext_cb_active(wsi,
				LWS_EXT_CB_PACKET_RX_PREPARSE, &eff_buf, 0);
			if (m < 0)
				goto close_and_handled;
			if (m)
				more = 1;

			/* service incoming data */

			if (eff_buf.token_len) {
				/*
				 * if draining from rxflow buffer, not
				 * critical to track what was used since at the
				 * use it bumps wsi->rxflow_pos.  If we come
				 * around again it will pick up from where it
				 * left off.
				 */
				n = lws_read(wsi, (unsigned char *)eff_buf.token,
					     eff_buf.token_len);
				if (n < 0) {
					/* we closed wsi */
					n = 0;
					goto handled;
				}
			}

			eff_buf.token = NULL;
			eff_buf.token_len = 0;
		} while (more);

		pending = lws_ssl_pending(wsi);
		if (pending) {
handle_pending:
			pending = pending > LWS_MAX_SOCKET_IO_BUF ?
					LWS_MAX_SOCKET_IO_BUF : pending;
			goto read;
		}

		if (draining_flow && wsi->rxflow_buffer &&
				 wsi->rxflow_pos == wsi->rxflow_len) {
			lwsl_info("flow buffer: drained\n");
			lws_free_set_NULL(wsi->rxflow_buffer);
			/* having drained the rxflow buffer, can rearm POLLIN */
#ifdef LWS_NO_SERVER
			n =
#endif
			_lws_rx_flow_control(wsi);
			/* n ignored, needed for NO_SERVER case */
		}

		break;

	default:
#ifdef LWS_NO_CLIENT
		break;
#else
		n = lws_client_socket_service(context, wsi, pollfd);
		if (n)
			return 1;
		goto handled;
#endif
	}

	n = 0;
	goto handled;

close_and_handled:
	lwsl_debug("Close and handled\n");
	lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
	/*
	 * pollfd may point to something else after the close
	 * due to pollfd swapping scheme on delete on some platforms
	 * we can't clear revents now because it'd be the wrong guy's revents
	 */
	return 1;

handled:
	pollfd->revents = 0;
	return n;
}