Пример #1
0
void lws_set_protocol_write_pending(struct lws_context *context,
                    struct lws *wsi,
                    enum lws_pending_protocol_send pend)
{
    lwsl_info("setting pps %d\n", pend);
    
    if (wsi->pps)
        lwsl_err("pps overwrite\n");
    wsi->pps = pend;
    lws_rx_flow_control(wsi, 0);
    lws_callback_on_writable(context, wsi);
}
Пример #2
0
LWS_VISIBLE void
lws_rx_flow_allow_all_protocol(const struct lws_protocols *protocol)
{
    struct lws_context *context = protocol->owning_server;
    int n;
    struct lws *wsi;

    for (n = 0; n < context->fds_count; n++) {
        wsi = wsi_from_fd(context, context->fds[n].fd);
        if (!wsi)
            continue;
        if (wsi->protocol == protocol)
            lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
    }
}
Пример #3
0
LWS_VISIBLE int
lws_callback_http_dummy(struct lws *wsi, enum lws_callback_reasons reason,
			void *user, void *in, size_t len)
{
	struct lws_ssl_info *si;
#ifdef LWS_WITH_CGI
	struct lws_cgi_args *args;
#endif
#if defined(LWS_WITH_CGI) || defined(LWS_WITH_HTTP_PROXY)
	char buf[8192];
	int n;
#endif
#if defined(LWS_WITH_HTTP_PROXY)
	unsigned char **p, *end;
	struct lws *parent;
#endif

	switch (reason) {
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
	case LWS_CALLBACK_HTTP:
#ifndef LWS_NO_SERVER
		if (lws_return_http_status(wsi, HTTP_STATUS_NOT_FOUND, NULL))
			return -1;

		if (lws_http_transaction_completed(wsi))
#endif
			return -1;
		break;
#if !defined(LWS_NO_SERVER)
	case LWS_CALLBACK_HTTP_BODY_COMPLETION:
	case LWS_CALLBACK_HTTP_FILE_COMPLETION:
		if (lws_http_transaction_completed(wsi))
			return -1;
		break;
#endif

	case LWS_CALLBACK_HTTP_WRITEABLE:
#ifdef LWS_WITH_CGI
		if (wsi->reason_bf & (LWS_CB_REASON_AUX_BF__CGI_HEADERS |
				      LWS_CB_REASON_AUX_BF__CGI)) {
			n = lws_cgi_write_split_stdout_headers(wsi);
			if (n < 0) {
				lwsl_debug("AUX_BF__CGI forcing close\n");
				return -1;
			}
			if (!n)
				lws_rx_flow_control(
					wsi->http.cgi->stdwsi[LWS_STDOUT], 1);

			if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_HEADERS)
				wsi->reason_bf &=
					~LWS_CB_REASON_AUX_BF__CGI_HEADERS;
			else
				wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__CGI;

			if (wsi->http.cgi && wsi->http.cgi->cgi_transaction_over)
				return -1;
			break;
		}

		if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_CHUNK_END) {
			if (!wsi->http2_substream) {
				memcpy(buf + LWS_PRE, "0\x0d\x0a\x0d\x0a", 5);
				lwsl_debug("writing chunk term and exiting\n");
				n = lws_write(wsi, (unsigned char *)buf +
						   LWS_PRE, 5, LWS_WRITE_HTTP);
			} else
				n = lws_write(wsi, (unsigned char *)buf +
						   LWS_PRE, 0,
						   LWS_WRITE_HTTP_FINAL);

			/* always close after sending it */
			return -1;
		}
#endif
#if defined(LWS_WITH_HTTP_PROXY)

		if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY_HEADERS) {

			wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__PROXY_HEADERS;

			lwsl_debug("%s: %p: issuing proxy headers\n",
				    __func__, wsi);
			n = lws_write(wsi, wsi->http.pending_return_headers +
					   LWS_PRE,
				      wsi->http.pending_return_headers_len,
				      LWS_WRITE_HTTP_HEADERS);

			lws_free_set_NULL(wsi->http.pending_return_headers);

			if (n < 0) {
				lwsl_err("%s: EST_CLIENT_HTTP: write failed\n",
					 __func__);
				return -1;
			}
			lws_callback_on_writable(wsi);
			break;
		}

		if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY) {
			char *px = buf + LWS_PRE;
			int lenx = sizeof(buf) - LWS_PRE - 32;

			/*
			 * our sink is writeable and our source has something
			 * to read.  So read a lump of source material of
			 * suitable size to send or what's available, whichever
			 * is the smaller.
			 */
			wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__PROXY;
			if (!lws_get_child(wsi))
				break;

			/* this causes LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ */
			if (lws_http_client_read(lws_get_child(wsi), &px,
						 &lenx) < 0) {
				lwsl_info("%s: LWS_CB_REASON_AUX_BF__PROXY: "
					   "client closed\n", __func__);

				stream_close(wsi);

				return -1;
			}
			break;
		}

		if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY_TRANS_END) {
			lwsl_info("%s: LWS_CB_REASON_AUX_BF__PROXY_TRANS_END\n",
				   __func__);

			wsi->reason_bf &= ~LWS_CB_REASON_AUX_BF__PROXY_TRANS_END;

			if (stream_close(wsi))
				return -1;

			if (lws_http_transaction_completed(wsi))
				return -1;
		}
#endif
		break;

#if defined(LWS_WITH_HTTP_PROXY)
	case LWS_CALLBACK_RECEIVE_CLIENT_HTTP:
		assert(lws_get_parent(wsi));
		if (!lws_get_parent(wsi))
			break;
		lws_get_parent(wsi)->reason_bf |= LWS_CB_REASON_AUX_BF__PROXY;
		lws_callback_on_writable(lws_get_parent(wsi));
		break;

	case LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ: {
		char *out = buf + LWS_PRE;

		assert(lws_get_parent(wsi));

		if (wsi->http.proxy_parent_chunked) {

			if (len > sizeof(buf) - LWS_PRE - 16) {
				lwsl_err("oversize buf %d %d\n", (int)len,
						(int)sizeof(buf) - LWS_PRE - 16);
				return -1;
			}

			/*
			 * this only needs dealing with on http/1.1 to allow
			 * pipelining
			 */
			n = lws_snprintf(out, 14, "%X\x0d\x0a", (int)len);
			out += n;
			memcpy(out, in, len);
			out += len;
			*out++ = '\x0d';
			*out++ = '\x0a';

			n = lws_write(lws_get_parent(wsi),
				      (unsigned char *)buf + LWS_PRE,
				      len + n + 2, LWS_WRITE_HTTP);
		} else
			n = lws_write(lws_get_parent(wsi), (unsigned char *)in,
				      len, LWS_WRITE_HTTP);
		if (n < 0)
			return -1;
		break; }


	/* this handles the proxy case... */
	case LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: {
		unsigned char *start, *p, *end;

		/*
		 * We want to proxy these headers, but we are being called
		 * at the point the onward client was established, which is
		 * unrelated to the state or writability of our proxy
		 * connection.
		 *
		 * Therefore produce the headers using the onward client ah
		 * while we have it, and stick them on the output buflist to be
		 * written on the proxy connection as soon as convenient.
		 */

		parent = lws_get_parent(wsi);

		if (!parent)
			return 0;

		start = p = (unsigned char *)buf + LWS_PRE;
		end = p + sizeof(buf) - LWS_PRE - 256;

		if (lws_add_http_header_status(lws_get_parent(wsi),
				lws_http_client_http_response(wsi), &p, end))
			return 1;

		/*
		 * copy these headers from the client connection to the parent
		 */

		proxy_header(parent, wsi, end, 256,
				WSI_TOKEN_HTTP_CONTENT_LENGTH, &p, end);
		proxy_header(parent, wsi, end, 256,
				WSI_TOKEN_HTTP_CONTENT_TYPE, &p, end);
		proxy_header(parent, wsi, end, 256,
				WSI_TOKEN_HTTP_ETAG, &p, end);
		proxy_header(parent, wsi, end, 256,
				WSI_TOKEN_HTTP_ACCEPT_LANGUAGE, &p, end);
		proxy_header(parent, wsi, end, 256,
				WSI_TOKEN_HTTP_CONTENT_ENCODING, &p, end);
		proxy_header(parent, wsi, end, 256,
				WSI_TOKEN_HTTP_CACHE_CONTROL, &p, end);

		if (!parent->http2_substream)
			if (lws_add_http_header_by_token(parent,
				WSI_TOKEN_CONNECTION, (unsigned char *)"close",
				5, &p, end))
			return -1;

		/*
		 * We proxy using h1 only atm, and strip any chunking so it
		 * can go back out on h2 just fine.
		 *
		 * However if we are actually going out on h1, we need to add
		 * our own chunking since we still don't know the size.
		 */

		if (!parent->http2_substream &&
		    !lws_hdr_total_length(wsi, WSI_TOKEN_HTTP_CONTENT_LENGTH)) {
			lwsl_debug("downstream parent chunked\n");
			if (lws_add_http_header_by_token(parent,
					WSI_TOKEN_HTTP_TRANSFER_ENCODING,
					(unsigned char *)"chunked", 7, &p, end))
				return -1;

			wsi->http.proxy_parent_chunked = 1;
		}

		if (lws_finalize_http_header(parent, &p, end))
			return 1;

		parent->http.pending_return_headers_len =
					lws_ptr_diff(p, start);
		parent->http.pending_return_headers =
			lws_malloc(parent->http.pending_return_headers_len +
				    LWS_PRE, "return proxy headers");
		if (!parent->http.pending_return_headers)
			return -1;

		memcpy(parent->http.pending_return_headers + LWS_PRE, start,
		       parent->http.pending_return_headers_len);

		parent->reason_bf |= LWS_CB_REASON_AUX_BF__PROXY_HEADERS;

		lwsl_debug("%s: LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: "
			   "prepared headers\n", __func__);
		lws_callback_on_writable(parent);

		break; }

	case LWS_CALLBACK_COMPLETED_CLIENT_HTTP:
		lwsl_info("%s: COMPLETED_CLIENT_HTTP: %p (parent %p)\n",
					__func__, wsi, lws_get_parent(wsi));
		if (!lws_get_parent(wsi))
			break;
		lws_get_parent(wsi)->reason_bf |=
				LWS_CB_REASON_AUX_BF__PROXY_TRANS_END;
		lws_callback_on_writable(lws_get_parent(wsi));
		break;

	case LWS_CALLBACK_CLOSED_CLIENT_HTTP:
		if (!lws_get_parent(wsi))
			break;
		lwsl_err("%s: LWS_CALLBACK_CLOSED_CLIENT_HTTP\n", __func__);
		lws_set_timeout(lws_get_parent(wsi), LWS_TO_KILL_ASYNC,
				PENDING_TIMEOUT_KILLED_BY_PROXY_CLIENT_CLOSE);
		break;

	case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER:
		parent = lws_get_parent(wsi);

		if (!parent)
			break;

		p = (unsigned char **)in;
		end = (*p) + len;

		/*
		 * copy these headers from the parent request to the client
		 * connection's request
		 */

		proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
				WSI_TOKEN_HOST, p, end);
		proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
				WSI_TOKEN_HTTP_ETAG, p, end);
		proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
				WSI_TOKEN_HTTP_IF_MODIFIED_SINCE, p, end);
		proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
				WSI_TOKEN_HTTP_ACCEPT_LANGUAGE, p, end);
		proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
				WSI_TOKEN_HTTP_ACCEPT_ENCODING, p, end);
		proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
				WSI_TOKEN_HTTP_CACHE_CONTROL, p, end);

		buf[0] = '\0';
		lws_get_peer_simple(parent, buf, sizeof(buf));
		if (lws_add_http_header_by_token(wsi, WSI_TOKEN_X_FORWARDED_FOR,
				(unsigned char *)buf, (int)strlen(buf), p, end))
			return -1;

		break;

#endif

#ifdef LWS_WITH_CGI
	/* CGI IO events (POLLIN/OUT) appear here, our default policy is:
	 *
	 *  - POST data goes on subprocess stdin
	 *  - subprocess stdout goes on http via writeable callback
	 *  - subprocess stderr goes to the logs
	 */
	case LWS_CALLBACK_CGI:
		args = (struct lws_cgi_args *)in;
		switch (args->ch) { /* which of stdin/out/err ? */
		case LWS_STDIN:
			/* TBD stdin rx flow control */
			break;
		case LWS_STDOUT:
			/* quench POLLIN on STDOUT until MASTER got writeable */
			lws_rx_flow_control(args->stdwsi[LWS_STDOUT], 0);
			wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI;
			/* when writing to MASTER would not block */
			lws_callback_on_writable(wsi);
			break;
		case LWS_STDERR:
			n = lws_get_socket_fd(args->stdwsi[LWS_STDERR]);
			if (n < 0)
				break;
			n = read(n, buf, sizeof(buf) - 2);
			if (n > 0) {
				if (buf[n - 1] != '\n')
					buf[n++] = '\n';
				buf[n] = '\0';
				lwsl_notice("CGI-stderr: %s\n", buf);
			}
			break;
		}
		break;

	case LWS_CALLBACK_CGI_TERMINATED:
		lwsl_debug("LWS_CALLBACK_CGI_TERMINATED: %d %" PRIu64 "\n",
				wsi->http.cgi->explicitly_chunked,
				(uint64_t)wsi->http.cgi->content_length);
		if (!wsi->http.cgi->explicitly_chunked &&
		    !wsi->http.cgi->content_length) {
			/* send terminating chunk */
			lwsl_debug("LWS_CALLBACK_CGI_TERMINATED: ending\n");
			wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI_CHUNK_END;
			lws_callback_on_writable(wsi);
			lws_set_timeout(wsi, PENDING_TIMEOUT_CGI, 3);
			break;
		}
		return -1;

	case LWS_CALLBACK_CGI_STDIN_DATA:  /* POST body for stdin */
		args = (struct lws_cgi_args *)in;
		args->data[args->len] = '\0';
		if (!args->stdwsi[LWS_STDIN])
			return -1;
		n = lws_get_socket_fd(args->stdwsi[LWS_STDIN]);
		if (n < 0)
			return -1;

#if defined(LWS_WITH_ZLIB)
		if (wsi->http.cgi->gzip_inflate) {
			/* gzip handling */

			if (!wsi->http.cgi->gzip_init) {
				lwsl_info("inflating gzip\n");

				memset(&wsi->http.cgi->inflate, 0,
				       sizeof(wsi->http.cgi->inflate));

				if (inflateInit2(&wsi->http.cgi->inflate,
						 16 + 15) != Z_OK) {
					lwsl_err("%s: iniflateInit failed\n",
						 __func__);
					return -1;
				}

				wsi->http.cgi->gzip_init = 1;
			}

			wsi->http.cgi->inflate.next_in = args->data;
			wsi->http.cgi->inflate.avail_in = args->len;

			do {

				wsi->http.cgi->inflate.next_out =
						wsi->http.cgi->inflate_buf;
				wsi->http.cgi->inflate.avail_out =
					sizeof(wsi->http.cgi->inflate_buf);

				n = inflate(&wsi->http.cgi->inflate,
					    Z_SYNC_FLUSH);

				switch (n) {
				case Z_NEED_DICT:
				case Z_STREAM_ERROR:
				case Z_DATA_ERROR:
				case Z_MEM_ERROR:
					inflateEnd(&wsi->http.cgi->inflate);
					wsi->http.cgi->gzip_init = 0;
					lwsl_err("zlib error inflate %d\n", n);
					return -1;
				}

				if (wsi->http.cgi->inflate.avail_out !=
					   sizeof(wsi->http.cgi->inflate_buf)) {
					int written;

					written = write(args->stdwsi[LWS_STDIN]->desc.filefd,
						wsi->http.cgi->inflate_buf,
						sizeof(wsi->http.cgi->inflate_buf) -
						wsi->http.cgi->inflate.avail_out);

					if (written != (int)(
						sizeof(wsi->http.cgi->inflate_buf) -
						wsi->http.cgi->inflate.avail_out)) {
						lwsl_notice("LWS_CALLBACK_CGI_STDIN_DATA: "
							"sent %d only %d went", n, args->len);
					}

					if (n == Z_STREAM_END) {
						lwsl_err("gzip inflate end\n");
						inflateEnd(&wsi->http.cgi->inflate);
						wsi->http.cgi->gzip_init = 0;
						break;
					}

				} else
					break;

				if (wsi->http.cgi->inflate.avail_out)
					break;

			} while (1);

			return args->len;
		}
#endif /* WITH_ZLIB */

		n = write(n, args->data, args->len);
//		lwsl_hexdump_notice(args->data, args->len);
		if (n < args->len)
			lwsl_notice("LWS_CALLBACK_CGI_STDIN_DATA: "
				    "sent %d only %d went", n, args->len);

		if (wsi->http.cgi->post_in_expected && args->stdwsi[LWS_STDIN] &&
		    args->stdwsi[LWS_STDIN]->desc.filefd > 0) {
			wsi->http.cgi->post_in_expected -= n;
			if (!wsi->http.cgi->post_in_expected) {
				struct lws *siwsi = args->stdwsi[LWS_STDIN];

				lwsl_debug("%s: expected POST in end: "
					   "closing stdin wsi %p, fd %d\n",
					   __func__, siwsi, siwsi->desc.sockfd);

				__remove_wsi_socket_from_fds(siwsi);
				lwsi_set_state(siwsi, LRS_DEAD_SOCKET);
				siwsi->socket_is_permanently_unusable = 1;
				lws_remove_child_from_any_parent(siwsi);
				if (wsi->context->event_loop_ops->
							close_handle_manually) {
					wsi->context->event_loop_ops->
						close_handle_manually(siwsi);
					siwsi->told_event_loop_closed = 1;
				} else {
					compatible_close(siwsi->desc.sockfd);
					__lws_free_wsi(siwsi);
				}
				wsi->http.cgi->pipe_fds[LWS_STDIN][1] = -1;

				args->stdwsi[LWS_STDIN] = NULL;
			}
		}

		return n;
#endif /* WITH_CGI */
#endif /* ROLE_ H1 / H2 */
	case LWS_CALLBACK_SSL_INFO:
		si = in;

		(void)si;
		lwsl_notice("LWS_CALLBACK_SSL_INFO: where: 0x%x, ret: 0x%x\n",
			    si->where, si->ret);
		break;

#if LWS_MAX_SMP > 1
	case LWS_CALLBACK_GET_THREAD_ID:
		return (int)(unsigned long long)pthread_self();
#endif

	default:
		break;
	}

	return 0;
}
Пример #4
0
static int
callback_lws_mirror(struct lws *wsi, enum lws_callback_reasons reason,
		    void *user, void *in, size_t len)
{
	struct per_session_data__lws_mirror *pss =
			(struct per_session_data__lws_mirror *)user;
	struct per_vhost_data__lws_mirror *v =
			(struct per_vhost_data__lws_mirror *)
			lws_protocol_vh_priv_get(lws_get_vhost(wsi),
					lws_get_protocol(wsi));
	int n, m;

	switch (reason) {

	case LWS_CALLBACK_ESTABLISHED:
		lwsl_info("%s: LWS_CALLBACK_ESTABLISHED\n", __func__);
		pss->ringbuffer_tail = v->ringbuffer_head;
		pss->wsi = wsi;
		break;

	case LWS_CALLBACK_PROTOCOL_INIT: /* per vhost */
		lws_protocol_vh_priv_zalloc(lws_get_vhost(wsi),
				lws_get_protocol(wsi),
				sizeof(struct per_vhost_data__lws_mirror));
		break;

	case LWS_CALLBACK_PROTOCOL_DESTROY: /* per vhost */
		if (!v)
			break;
		lwsl_info("%s: mirror protocol cleaning up %p\n", __func__, v);
		for (n = 0; n < ARRAY_SIZE(v->ringbuffer); n++)
			if (v->ringbuffer[n].payload) {
				free(v->ringbuffer[n].payload);
				v->ringbuffer[n].payload = NULL;
			}
		break;

	case LWS_CALLBACK_SERVER_WRITEABLE:
		while (pss->ringbuffer_tail != v->ringbuffer_head) {
			m = v->ringbuffer[pss->ringbuffer_tail].len;
			n = lws_write(wsi, (unsigned char *)
				   v->ringbuffer[pss->ringbuffer_tail].payload +
				   LWS_PRE, m, LWS_WRITE_TEXT);
			if (n < 0) {
				lwsl_err("ERROR %d writing to mirror socket\n", n);
				return -1;
			}
			if (n < m)
				lwsl_err("mirror partial write %d vs %d\n", n, m);

			if (pss->ringbuffer_tail == (MAX_MESSAGE_QUEUE - 1))
				pss->ringbuffer_tail = 0;
			else
				pss->ringbuffer_tail++;

			if (((v->ringbuffer_head - pss->ringbuffer_tail) &
			    (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 15))
				lws_rx_flow_allow_all_protocol(lws_get_context(wsi),
					       lws_get_protocol(wsi));

			if (lws_send_pipe_choked(wsi)) {
				lws_callback_on_writable(wsi);
				break;
			}
		}
		break;

	case LWS_CALLBACK_RECEIVE:
		if (((v->ringbuffer_head - pss->ringbuffer_tail) &
		    (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 1)) {
			lwsl_err("dropping!\n");
			goto choke;
		}

		if (v->ringbuffer[v->ringbuffer_head].payload)
			free(v->ringbuffer[v->ringbuffer_head].payload);

		v->ringbuffer[v->ringbuffer_head].payload = malloc(LWS_PRE + len);
		v->ringbuffer[v->ringbuffer_head].len = len;
		memcpy((char *)v->ringbuffer[v->ringbuffer_head].payload +
		       LWS_PRE, in, len);
		if (v->ringbuffer_head == (MAX_MESSAGE_QUEUE - 1))
			v->ringbuffer_head = 0;
		else
			v->ringbuffer_head++;

		if (((v->ringbuffer_head - pss->ringbuffer_tail) &
		    (MAX_MESSAGE_QUEUE - 1)) != (MAX_MESSAGE_QUEUE - 2))
			goto done;

choke:
		lwsl_debug("LWS_CALLBACK_RECEIVE: throttling %p\n", wsi);
		lws_rx_flow_control(wsi, 0);

done:
		lws_callback_on_writable_all_protocol(lws_get_context(wsi),
						      lws_get_protocol(wsi));
		break;

	default:
		break;
	}

	return 0;
}
Пример #5
0
int
lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
{
	int write_type = LWS_WRITE_PONG;
	struct lws_tokens eff_buf;
#ifdef LWS_USE_HTTP2
	struct lws *wsi2;
#endif
	int ret, m, n;

	/*
	 * user callback is lowest priority to get these notifications
	 * actually, since other pending things cannot be disordered
	 */

	/* Priority 1: pending truncated sends are incomplete ws fragments
	 *	       If anything else sent first the protocol would be
	 *	       corrupted.
	 */
	if (wsi->trunc_len) {
		if (lws_issue_raw(wsi, wsi->trunc_alloc + wsi->trunc_offset,
				  wsi->trunc_len) < 0) {
			lwsl_info("%s signalling to close\n", __func__);
			return -1;
		}
		/* leave POLLOUT active either way */
		return 0;
	} else
		if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)
			return -1; /* retry closing now */


#ifdef LWS_USE_HTTP2
	/* Priority 2: protocol packets
	 */
	if (wsi->pps) {
		lwsl_info("servicing pps %d\n", wsi->pps);
		switch (wsi->pps) {
		case LWS_PPS_HTTP2_MY_SETTINGS:
		case LWS_PPS_HTTP2_ACK_SETTINGS:
			lws_http2_do_pps_send(lws_get_context(wsi), wsi);
			break;
		default:
			break;
		}
		wsi->pps = LWS_PPS_NONE;
		lws_rx_flow_control(wsi, 1);

		return 0; /* leave POLLOUT active */
	}
#endif
	/* Priority 3: pending control packets (pong or close)
	 */
	if ((wsi->state == LWSS_ESTABLISHED &&
	     wsi->u.ws.ping_pending_flag) ||
	    (wsi->state == LWSS_RETURNED_CLOSE_ALREADY &&
	     wsi->u.ws.payload_is_close)) {

		if (wsi->u.ws.payload_is_close)
			write_type = LWS_WRITE_CLOSE;

		n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
			      wsi->u.ws.ping_payload_len, write_type);
		if (n < 0)
			return -1;

		/* well he is sent, mark him done */
		wsi->u.ws.ping_pending_flag = 0;
		if (wsi->u.ws.payload_is_close)
			/* oh... a close frame was it... then we are done */
			return -1;

		/* otherwise for PING, leave POLLOUT active either way */
		return 0;
	}

	/* Priority 4: if we are closing, not allowed to send more data frags
	 *	       which means user callback or tx ext flush banned now
	 */
	if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
		goto user_service;

	/* Priority 5: Tx path extension with more to send
	 *
	 *	       These are handled as new fragments each time around
	 *	       So while we must block new writeable callback to enforce
	 *	       payload ordering, but since they are always complete
	 *	       fragments control packets can interleave OK.
	 */
	if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
		lwsl_ext("SERVICING TX EXT DRAINING\n");
		if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
			return -1;
		/* leave POLLOUT active */
		return 0;
	}

	/* Priority 6: user can get the callback
	 */
	m = lws_ext_cb_active(wsi, LWS_EXT_CB_IS_WRITEABLE, NULL, 0);
#ifndef LWS_NO_EXTENSIONS
	if (!wsi->extension_data_pending)
		goto user_service;
#endif
	/*
	 * check in on the active extensions, see if they
	 * had pending stuff to spill... they need to get the
	 * first look-in otherwise sequence will be disordered
	 *
	 * NULL, zero-length eff_buf means just spill pending
	 */

	ret = 1;
	while (ret == 1) {

		/* default to nobody has more to spill */

		ret = 0;
		eff_buf.token = NULL;
		eff_buf.token_len = 0;

		/* give every extension a chance to spill */

		m = lws_ext_cb_active(wsi,
					LWS_EXT_CB_PACKET_TX_PRESEND,
					       &eff_buf, 0);
		if (m < 0) {
			lwsl_err("ext reports fatal error\n");
			return -1;
		}
		if (m)
			/*
			 * at least one extension told us he has more
			 * to spill, so we will go around again after
			 */
			ret = 1;

		/* assuming they gave us something to send, send it */

		if (eff_buf.token_len) {
			n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
					  eff_buf.token_len);
			if (n < 0) {
				lwsl_info("closing from POLLOUT spill\n");
				return -1;
			}
			/*
			 * Keep amount spilled small to minimize chance of this
			 */
			if (n != eff_buf.token_len) {
				lwsl_err("Unable to spill ext %d vs %s\n",
							  eff_buf.token_len, n);
				return -1;
			}
		} else
			continue;

		/* no extension has more to spill */

		if (!ret)
			continue;

		/*
		 * There's more to spill from an extension, but we just sent
		 * something... did that leave the pipe choked?
		 */

		if (!lws_send_pipe_choked(wsi))
			/* no we could add more */
			continue;

		lwsl_info("choked in POLLOUT service\n");

		/*
		 * Yes, he's choked.  Leave the POLLOUT masked on so we will
		 * come back here when he is unchoked.  Don't call the user
		 * callback to enforce ordering of spilling, he'll get called
		 * when we come back here and there's nothing more to spill.
		 */

		return 0;
	}
#ifndef LWS_NO_EXTENSIONS
	wsi->extension_data_pending = 0;
#endif
user_service:
	/* one shot */

	if (pollfd) {
		if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
			lwsl_info("failed at set pollfd\n");
			return 1;
		}

		lws_libev_io(wsi, LWS_EV_STOP | LWS_EV_WRITE);
	}

#ifdef LWS_USE_HTTP2
	/*
	 * we are the 'network wsi' for potentially many muxed child wsi with
	 * no network connection of their own, who have to use us for all their
	 * network actions.  So we use a round-robin scheme to share out the
	 * POLLOUT notifications to our children.
	 *
	 * But because any child could exhaust the socket's ability to take
	 * writes, we can only let one child get notified each time.
	 *
	 * In addition children may be closed / deleted / added between POLLOUT
	 * notifications, so we can't hold pointers
	 */

	if (wsi->mode != LWSCM_HTTP2_SERVING) {
		lwsl_info("%s: non http2\n", __func__);
		goto notify;
	}

	wsi->u.http2.requested_POLLOUT = 0;
	if (!wsi->u.http2.initialized) {
		lwsl_info("pollout on uninitialized http2 conn\n");
		return 0;
	}

	lwsl_info("%s: doing children\n", __func__);

	wsi2 = wsi;
	do {
		wsi2 = wsi2->u.http2.next_child_wsi;
		lwsl_info("%s: child %p\n", __func__, wsi2);
		if (!wsi2)
			continue;
		if (!wsi2->u.http2.requested_POLLOUT)
			continue;
		wsi2->u.http2.requested_POLLOUT = 0;
		if (lws_calllback_as_writeable(wsi2)) {
			lwsl_debug("Closing POLLOUT child\n");
			lws_close_free_wsi(wsi2, LWS_CLOSE_STATUS_NOSTATUS);
		}
		wsi2 = wsi;
	} while (wsi2 != NULL && !lws_send_pipe_choked(wsi));

	lwsl_info("%s: completed\n", __func__);

	return 0;
notify:
#endif
	return lws_calllback_as_writeable(wsi);
}
Пример #6
0
LWS_VISIBLE int
lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
{
	struct lws_context_per_thread *pt = &context->pt[tsi];
	lws_sockfd_type our_fd = 0, tmp_fd;
	struct lws_tokens eff_buf;
	unsigned int pending = 0;
	struct lws *wsi, *wsi1;
	char draining_flow = 0;
	int timed_out = 0;
	time_t now;
	int n, m;
	int more;

	/*
	 * you can call us with pollfd = NULL to just allow the once-per-second
	 * global timeout checks; if less than a second since the last check
	 * it returns immediately then.
	 */

	time(&now);

	/* TODO: if using libev, we should probably use timeout watchers... */
	if (context->last_timeout_check_s != now) {
		context->last_timeout_check_s = now;

		lws_plat_service_periodic(context);

		/* global timeout check once per second */

		if (pollfd)
			our_fd = pollfd->fd;

		wsi = context->pt[tsi].timeout_list;
		while (wsi) {
			/* we have to take copies, because he may be deleted */
			wsi1 = wsi->timeout_list;
			tmp_fd = wsi->sock;
			if (lws_service_timeout_check(wsi, (unsigned int)now)) {
				/* he did time out... */
				if (tmp_fd == our_fd)
					/* it was the guy we came to service! */
					timed_out = 1;
					/* he's gone, no need to mark as handled */
			}
			wsi = wsi1;
		}
#if 0
		{
			char s[300], *p = s;

			for (n = 0; n < context->count_threads; n++)
				p += sprintf(p, " %7lu (%5d), ",
					     context->pt[n].count_conns,
					     context->pt[n].fds_count);

			lwsl_notice("load: %s\n", s);
		}
#endif
	}

	/* the socket we came to service timed out, nothing to do */
	if (timed_out)
		return 0;

	/* just here for timeout management? */
	if (!pollfd)
		return 0;

	/* no, here to service a socket descriptor */
	wsi = wsi_from_fd(context, pollfd->fd);
	if (!wsi)
		/* not lws connection ... leave revents alone and return */
		return 0;

	/*
	 * so that caller can tell we handled, past here we need to
	 * zero down pollfd->revents after handling
	 */

#if LWS_POSIX

	/* handle session socket closed */

	if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
	    (pollfd->revents & LWS_POLLHUP)) {
		wsi->socket_is_permanently_unusable = 1;
		lwsl_debug("Session Socket %p (fd=%d) dead\n",
						       (void *)wsi, pollfd->fd);

		goto close_and_handled;
	}

#ifdef _WIN32
	if (pollfd->revents & LWS_POLLOUT)
		wsi->sock_send_blocking = FALSE;
#endif

#endif

	/* okay, what we came here to do... */

	switch (wsi->mode) {
	case LWSCM_HTTP_SERVING:
	case LWSCM_HTTP_SERVING_ACCEPTED:
	case LWSCM_SERVER_LISTENER:
	case LWSCM_SSL_ACK_PENDING:
		n = lws_server_socket_service(context, wsi, pollfd);
		if (n) /* closed by above */
			return 1;
		pending = lws_ssl_pending(wsi);
		if (pending)
			goto handle_pending;
		goto handled;

	case LWSCM_WS_SERVING:
	case LWSCM_WS_CLIENT:
	case LWSCM_HTTP2_SERVING:

		/* 1: something requested a callback when it was OK to write */

		if ((pollfd->revents & LWS_POLLOUT) &&
		    (wsi->state == LWSS_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
		     wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		     wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
		    lws_handle_POLLOUT_event(wsi, pollfd)) {
			if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
				wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
			lwsl_info("lws_service_fd: closing\n");
			goto close_and_handled;
		}
#if 1
		if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		    wsi->state == LWSS_AWAITING_CLOSE_ACK) {
			/*
			 * we stopped caring about anything except control
			 * packets.  Force flow control off, defeat tx
			 * draining.
			 */
			lws_rx_flow_control(wsi, 1);
			wsi->u.ws.tx_draining_ext = 0;
		}
#endif
		if (wsi->u.ws.tx_draining_ext) {
			/* we cannot deal with new RX until the TX ext
			 * path has been drained.  It's because new
			 * rx will, eg, crap on the wsi rx buf that
			 * may be needed to retain state.
			 *
			 * TX ext drain path MUST go through event loop
			 * to avoid blocking.
			 */
			break;
		}

		if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
			/* We cannot deal with any kind of new RX
			 * because we are RX-flowcontrolled.
			 */
			break;

		/* 2: RX Extension needs to be drained
		 */

		if (wsi->state == LWSS_ESTABLISHED &&
		    wsi->u.ws.rx_draining_ext) {

			lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
#ifndef LWS_NO_CLIENT
			if (wsi->mode == LWSCM_WS_CLIENT) {
				n = lws_client_rx_sm(wsi, 0);
				if (n < 0)
					/* we closed wsi */
					n = 0;
			} else
#endif
				n = lws_rx_sm(wsi, 0);

			goto handled;
		}

		if (wsi->u.ws.rx_draining_ext)
			/*
			 * We have RX EXT content to drain, but can't do it
			 * right now.  That means we cannot do anything lower
			 * priority either.
			 */
			break;

		/* 3: RX Flowcontrol buffer needs to be drained
		 */

		if (wsi->rxflow_buffer) {
			lwsl_info("draining rxflow (len %d)\n",
				wsi->rxflow_len - wsi->rxflow_pos
			);
			/* well, drain it */
			eff_buf.token = (char *)wsi->rxflow_buffer +
						wsi->rxflow_pos;
			eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
			draining_flow = 1;
			goto drain;
		}

		/* 4: any incoming data ready?
		 * notice if rx flow going off raced poll(), rx flow wins
		 */
		if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
			break;
read:
		eff_buf.token_len = lws_ssl_capable_read(wsi, pt->serv_buf,
					pending ? pending : LWS_MAX_SOCKET_IO_BUF);
		switch (eff_buf.token_len) {
		case 0:
			lwsl_info("service_fd: closing due to 0 length read\n");
			goto close_and_handled;
		case LWS_SSL_CAPABLE_MORE_SERVICE:
			lwsl_info("SSL Capable more service\n");
			n = 0;
			goto handled;
		case LWS_SSL_CAPABLE_ERROR:
			lwsl_info("Closing when error\n");
			goto close_and_handled;
		}

		/*
		 * give any active extensions a chance to munge the buffer
		 * before parse.  We pass in a pointer to an lws_tokens struct
		 * prepared with the default buffer and content length that's in
		 * there.  Rather than rewrite the default buffer, extensions
		 * that expect to grow the buffer can adapt .token to
		 * point to their own per-connection buffer in the extension
		 * user allocation.  By default with no extensions or no
		 * extension callback handling, just the normal input buffer is
		 * used then so it is efficient.
		 */

		eff_buf.token = (char *)pt->serv_buf;
drain:

		do {
			more = 0;

			m = lws_ext_cb_active(wsi,
				LWS_EXT_CB_PACKET_RX_PREPARSE, &eff_buf, 0);
			if (m < 0)
				goto close_and_handled;
			if (m)
				more = 1;

			/* service incoming data */

			if (eff_buf.token_len) {
				/*
				 * if draining from rxflow buffer, not
				 * critical to track what was used since at the
				 * use it bumps wsi->rxflow_pos.  If we come
				 * around again it will pick up from where it
				 * left off.
				 */
				n = lws_read(wsi, (unsigned char *)eff_buf.token,
					     eff_buf.token_len);
				if (n < 0) {
					/* we closed wsi */
					n = 0;
					goto handled;
				}
			}

			eff_buf.token = NULL;
			eff_buf.token_len = 0;
		} while (more);

		pending = lws_ssl_pending(wsi);
		if (pending) {
handle_pending:
			pending = pending > LWS_MAX_SOCKET_IO_BUF ?
					LWS_MAX_SOCKET_IO_BUF : pending;
			goto read;
		}

		if (draining_flow && wsi->rxflow_buffer &&
				 wsi->rxflow_pos == wsi->rxflow_len) {
			lwsl_info("flow buffer: drained\n");
			lws_free_set_NULL(wsi->rxflow_buffer);
			/* having drained the rxflow buffer, can rearm POLLIN */
#ifdef LWS_NO_SERVER
			n =
#endif
			_lws_rx_flow_control(wsi);
			/* n ignored, needed for NO_SERVER case */
		}

		break;

	default:
#ifdef LWS_NO_CLIENT
		break;
#else
		n = lws_client_socket_service(context, wsi, pollfd);
		if (n)
			return 1;
		goto handled;
#endif
	}

	n = 0;
	goto handled;

close_and_handled:
	lwsl_debug("Close and handled\n");
	lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
	/*
	 * pollfd may point to something else after the close
	 * due to pollfd swapping scheme on delete on some platforms
	 * we can't clear revents now because it'd be the wrong guy's revents
	 */
	return 1;

handled:
	pollfd->revents = 0;
	return n;
}
Пример #7
0
static int
lws_callback_raw_telnet(struct lws *wsi, enum lws_callback_reasons reason,
			void *user, void *in, size_t len)
{
	struct per_session_data__telnet *pss =
			(struct per_session_data__telnet *)user, **p;
	struct per_vhost_data__telnet *vhd =
			(struct per_vhost_data__telnet *)
			lws_protocol_vh_priv_get(lws_get_vhost(wsi),
					lws_get_protocol(wsi));
	const struct lws_protocol_vhost_options *pvo =
			(const struct lws_protocol_vhost_options *)in;
	int n, m;
	uint8_t buf[LWS_PRE + 800], *pu = in;

	switch ((int)reason) {
	case LWS_CALLBACK_PROTOCOL_INIT:
		vhd = lws_protocol_vh_priv_zalloc(lws_get_vhost(wsi),
				lws_get_protocol(wsi),
				sizeof(struct per_vhost_data__telnet));
		vhd->context = lws_get_context(wsi);
		vhd->protocol = lws_get_protocol(wsi);
		vhd->vhost = lws_get_vhost(wsi);

		while (pvo) {
			if (!strcmp(pvo->name, "ops"))
				vhd->ops = (const struct lws_ssh_ops *)pvo->value;

			pvo = pvo->next;
		}

		if (!vhd->ops) {
			lwsl_err("telnet pvo \"ops\" is mandatory\n");
			return -1;
		}
		break;

        case LWS_CALLBACK_RAW_ADOPT:
		pss->next = vhd->live_pss_list;
		vhd->live_pss_list = pss;
		pss->vhd = vhd;
		pss->state = LTST_WAIT_IAC;
		pss->initial = 0;
		if (vhd->ops->channel_create)
			vhd->ops->channel_create(wsi, &pss->priv);
		lws_callback_on_writable(wsi);
                break;

	case LWS_CALLBACK_RAW_CLOSE:
		p = &vhd->live_pss_list;

		while (*p) {
			if ((*p) == pss) {
				if (vhd->ops->channel_destroy)
					vhd->ops->channel_destroy(pss->priv);
				*p = pss->next;
				continue;
			}
			p = &((*p)->next);
		}
		break;

	case LWS_CALLBACK_RAW_RX:
		n = 0;

		/* this stuff is coming in telnet line discipline, we
		 * have to strip IACs and process IAC repeats */

		while (len--) {
			if (telnet_ld(pss, *pu))
				buf[n++] = *pu++;
			else
				pu++;

			if (n > 100 || !len)
				pss->vhd->ops->rx(pss->priv, wsi, buf, n);
		}
		break;

        case LWS_CALLBACK_RAW_WRITEABLE:
		n = 0;
		if (!pss->initial) {
			memcpy(buf + LWS_PRE, init, sizeof(init));

			n = sizeof(init);
			pss->initial = 1;
		} else {
			/* bring any waiting tx into second half of buffer
			 * restrict how much we can send to 1/4 of the buffer,
			 * because we have to apply telnet line discipline...
			 * in the worst case of all 0xff, doubling the size
			 */
			pu = buf + LWS_PRE + 400;
			m = (int)pss->vhd->ops->tx(pss->priv, LWS_STDOUT, pu,
					((int)sizeof(buf) - LWS_PRE - n - 401) / 2);

			/*
			 * apply telnet line discipline and copy into place
			 * in output buffer
			 */
			while (m--) {
				if (*pu == 0xff)
					buf[LWS_PRE + n++] = 0xff;
				buf[LWS_PRE + n++] = *pu++;
			}
		}
		if (n > 0) {
			m = lws_write(wsi, (unsigned char *)buf + LWS_PRE, n,
				      LWS_WRITE_HTTP);
	                if (m < 0) {
	                        lwsl_err("ERROR %d writing to di socket\n", m);
	                        return -1;
	                }
		}

		if (vhd->ops->tx_waiting(&pss->priv))
		       lws_callback_on_writable(wsi);
		break;

        case LWS_CALLBACK_SSH_UART_SET_RXFLOW:
        	/*
        	 * this is sent to set rxflow state on any connections that
        	 * sink on a particular uart.  The uart index affected is in len
        	 *
        	 * More than one protocol may sink to the same uart, and the
        	 * protocol may select the uart itself, eg, in the URL used
        	 * to set up the connection.
        	 */
        	lws_rx_flow_control(wsi, len & 1);
        	break;

	default:
		break;
	}

	return 0;
}
int
callback_lws_mirror(struct lws *wsi, enum lws_callback_reasons reason,
			void *user, void *in, size_t len)
{
	struct per_session_data__lws_mirror *pss =
			(struct per_session_data__lws_mirror *)user;
	int m,n;

	switch (reason) {

	case LWS_CALLBACK_ESTABLISHED:
		lwsl_info("callback_lws_mirror: LWS_CALLBACK_ESTABLISHED\n", __func__);
		pss->ringbuffer_tail = ringbuffer_head;
		pss->wsi = wsi;
		break;

	case LWS_CALLBACK_PROTOCOL_DESTROY:
		lwsl_notice("mirror protocol cleaning up\n");
		for (n = 0; n < sizeof ringbuffer / sizeof ringbuffer[0]; n++)
			if (ringbuffer[n].payload)
				free(ringbuffer[n].payload);
		break;

	case LWS_CALLBACK_SERVER_WRITEABLE:
		if (close_testing)
			break;
		while (pss->ringbuffer_tail != ringbuffer_head) {
			m = ringbuffer[pss->ringbuffer_tail].len;
			n = lws_write(wsi, (unsigned char *)
					ringbuffer[pss->ringbuffer_tail].payload +
					LWS_PRE, m, LWS_WRITE_TEXT);
			if (n < 0) {
				lwsl_err("ERROR %d writing to mirror socket\n", n);
				return -1;
			}
			if (n < m)
				lwsl_err("mirror partial write %d vs %d\n", n, m);

			if (pss->ringbuffer_tail == (MAX_MESSAGE_QUEUE - 1))
				pss->ringbuffer_tail = 0;
			else
				pss->ringbuffer_tail++;

			if (((ringbuffer_head - pss->ringbuffer_tail) &
				  (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 15))
				lws_rx_flow_allow_all_protocol(lws_get_context(wsi),
						lws_get_protocol(wsi));

			if (lws_send_pipe_choked(wsi)) {
				lws_callback_on_writable(wsi);
				break;
			}
			/*
			 * for tests with chrome on same machine as client and
			 * server, this is needed to stop chrome choking
			 */
			usleep(1);
		}
		break;

	case LWS_CALLBACK_RECEIVE:

		if (((ringbuffer_head - pss->ringbuffer_tail) &
				  (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 1)) {
			lwsl_err("dropping!\n");
			goto choke;
		}

		if (ringbuffer[ringbuffer_head].payload)
			free(ringbuffer[ringbuffer_head].payload);

		ringbuffer[ringbuffer_head].payload =
				malloc(LWS_SEND_BUFFER_PRE_PADDING + len +
						  LWS_SEND_BUFFER_POST_PADDING);
		ringbuffer[ringbuffer_head].len = len;
		memcpy((char *)ringbuffer[ringbuffer_head].payload +
					  LWS_SEND_BUFFER_PRE_PADDING, in, len);
		if (ringbuffer_head == (MAX_MESSAGE_QUEUE - 1))
			ringbuffer_head = 0;
		else
			ringbuffer_head++;

		if (((ringbuffer_head - pss->ringbuffer_tail) &
				  (MAX_MESSAGE_QUEUE - 1)) != (MAX_MESSAGE_QUEUE - 2))
			goto done;

choke:
		lwsl_debug("LWS_CALLBACK_RECEIVE: throttling %p\n", wsi);
		lws_rx_flow_control(wsi, 0);

done:
		lws_callback_on_writable_all_protocol(lws_get_context(wsi),
						lws_get_protocol(wsi));

		const char *ubus_socket = NULL;
		struct ubus_context *ctx;
		static struct blob_buf b;
		ctx = ubus_connect(ubus_socket);
		if (!ctx) {
			fprintf(stderr, "Failed to connect to ubus\n");
			return -1;
		}		
		blob_buf_init(&b, 0);
		blobmsg_add_json_from_string(&b, (char *)in);
		ubus_send_event(ctx, ubusxevent, b.head);
		ubus_free(ctx);

		break;
	/*
	 * this just demonstrates how to use the protocol filter. If you won't
	 * study and reject connections based on header content, you don't need
	 * to handle this callback
	 */

	case LWS_CALLBACK_FILTER_PROTOCOL_CONNECTION:
		dump_handshake_info(wsi);
		/* you could return non-zero here and kill the connection */
		break;

	default:
		break;
	}

	return 0;
}
Пример #9
0
LWS_VISIBLE LWS_EXTERN int
libwebsocket_rx_flow_control(struct lws *wsi, int enable)
{
    return lws_rx_flow_control(wsi, enable);
}
Пример #10
0
static int callback_websocket_universal(struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len)
{
	int n;
	struct per_session_data__universal *pss = (struct per_session_data__universal *)user;
    
	switch (reason) {

	case LWS_CALLBACK_ESTABLISHED:
        printf("transport_mock_websocket_server_t LWS_CALLBACK_ESTABLISHED\n");
		pss->ringbuffer_tail = ringbuffer_head;
		pss->wsi = wsi;
		break;

	case LWS_CALLBACK_PROTOCOL_DESTROY:
       printf("transport_mock_websocket_server_t LWS_CALLBACK_PROTOCOL_DESTROY\n");
		for (n = 0; n < sizeof ringbuffer / sizeof ringbuffer[0]; n++)
			if (ringbuffer[n].payload)
				free(ringbuffer[n].payload);
		break;
	case LWS_CALLBACK_SERVER_WRITEABLE:
        printf("transport_mock_websocket_server_t LWS_CALLBACK_SERVER_WRITEABLE\n");
		while (pss->ringbuffer_tail != ringbuffer_head) {

			n = lws_write(wsi, (unsigned char *)
				   ringbuffer[pss->ringbuffer_tail].payload +
				   LWS_SEND_BUFFER_PRE_PADDING,
				   ringbuffer[pss->ringbuffer_tail].len,
								LWS_WRITE_BINARY);
			if (n < ringbuffer[pss->ringbuffer_tail].len) {
				lwsl_err("ERROR %d writing to mirror socket\n", n);
				return -1;
			}
			if (n < ringbuffer[pss->ringbuffer_tail].len)
				lwsl_err("mirror partial write %d vs %d\n",
				       n, ringbuffer[pss->ringbuffer_tail].len);

			if (pss->ringbuffer_tail == (MAX_MESSAGE_QUEUE - 1))
				pss->ringbuffer_tail = 0;
			else
				pss->ringbuffer_tail++;

			if (((ringbuffer_head - pss->ringbuffer_tail) &
				  (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 15))
				lws_rx_flow_allow_all_protocol(lws_get_context(wsi), lws_get_protocol(wsi));

			// lwsl_debug("tx fifo %d\n", (ringbuffer_head - pss->ringbuffer_tail) & (MAX_MESSAGE_QUEUE - 1));

			if (lws_send_pipe_choked(wsi)) {
				lws_callback_on_writable(wsi);
				break;
			}
			/*
			 * for tests with chrome on same machine as client and
			 * server, this is needed to stop chrome choking
			 */
			usleep(1);
		}
		break;

	case LWS_CALLBACK_RECEIVE:
        printf("transport_mock_websocket_server_t LWS_CALLBACK_RECEIVE (%lu)\n", len);
		if (((ringbuffer_head - pss->ringbuffer_tail) &
				  (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 1)) {
			lwsl_err("dropping!\n");
			goto choke;
		}

		if (ringbuffer[ringbuffer_head].payload)
			free(ringbuffer[ringbuffer_head].payload);

		ringbuffer[ringbuffer_head].payload =
				malloc(LWS_SEND_BUFFER_PRE_PADDING + len +
						  LWS_SEND_BUFFER_POST_PADDING);
		ringbuffer[ringbuffer_head].len = len;
		memcpy((char *)ringbuffer[ringbuffer_head].payload +
					  LWS_SEND_BUFFER_PRE_PADDING, in, len);
		if (ringbuffer_head == (MAX_MESSAGE_QUEUE - 1))
			ringbuffer_head = 0;
		else
			ringbuffer_head++;

		if (((ringbuffer_head - pss->ringbuffer_tail) &
				  (MAX_MESSAGE_QUEUE - 1)) != (MAX_MESSAGE_QUEUE - 2))
			goto done;

choke:
		lwsl_debug("LWS_CALLBACK_RECEIVE: throttling %p\n", wsi);
		lws_rx_flow_control(wsi, 0);

//		lwsl_debug("rx fifo %d\n", (ringbuffer_head - pss->ringbuffer_tail) & (MAX_MESSAGE_QUEUE - 1));
done:
		lws_callback_on_writable_all_protocol(lws_get_context(wsi), lws_get_protocol(wsi));
		break;

	/*
	 * this just demonstrates how to use the protocol filter. If you won't
	 * study and reject connections based on header content, you don't need
	 * to handle this callback
	 */
	case LWS_CALLBACK_FILTER_PROTOCOL_CONNECTION:
        printf("transport_mock_websocket_server_t LWS_CALLBACK_FILTER_PROTOCOL_CONNECTION\n");
		//dump_handshake_info(wsi);
		/* you could return non-zero here and kill the connection */
		break;

	default:
		break;
	}

	return 0;
}
Пример #11
0
int
callback_lws_mirror(struct lws *wsi, enum lws_callback_reasons reason,
		    void *user, void *in, size_t len)
{
	struct per_session_data__lws_mirror *pss =
			(struct per_session_data__lws_mirror *)user;
	int n;

	switch (reason) {

	case LWS_CALLBACK_ESTABLISHED:
		lwsl_info("%s: LWS_CALLBACK_ESTABLISHED\n", __func__);
		pss->ringbuffer_tail = ringbuffer_head;
		pss->wsi = wsi;
		break;

	case LWS_CALLBACK_PROTOCOL_DESTROY:
		lwsl_notice("%s: mirror protocol cleaning up\n", __func__);
		for (n = 0; n < sizeof ringbuffer / sizeof ringbuffer[0]; n++)
			if (ringbuffer[n].payload)
				free(ringbuffer[n].payload);
		break;

	case LWS_CALLBACK_SERVER_WRITEABLE:
		if (close_testing)
			break;
		while (pss->ringbuffer_tail != ringbuffer_head) {

			n = lws_write(wsi, (unsigned char *)
				   ringbuffer[pss->ringbuffer_tail].payload +
				   LWS_SEND_BUFFER_PRE_PADDING,
				   ringbuffer[pss->ringbuffer_tail].len,
								LWS_WRITE_TEXT);
			if (n < 0) {
				lwsl_err("ERROR %d writing to mirror socket\n", n);
				return -1;
			}
			if (n < (int)ringbuffer[pss->ringbuffer_tail].len)
				lwsl_err("mirror partial write %d vs %d\n",
				       n, ringbuffer[pss->ringbuffer_tail].len);

			if (pss->ringbuffer_tail == (MAX_MESSAGE_QUEUE - 1))
				pss->ringbuffer_tail = 0;
			else
				pss->ringbuffer_tail++;

			if (((ringbuffer_head - pss->ringbuffer_tail) &
			    (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 15))
				lws_rx_flow_allow_all_protocol(lws_get_context(wsi),
					       lws_get_protocol(wsi));

			if (lws_partial_buffered(wsi) ||
			    lws_send_pipe_choked(wsi)) {
				lws_callback_on_writable(wsi);
				break;
			}
		}
		break;

	case LWS_CALLBACK_RECEIVE:
		if (((ringbuffer_head - pss->ringbuffer_tail) &
		    (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 1)) {
			lwsl_err("dropping!\n");
			goto choke;
		}

		if (ringbuffer[ringbuffer_head].payload)
			free(ringbuffer[ringbuffer_head].payload);

		ringbuffer[ringbuffer_head].payload =
				malloc(LWS_SEND_BUFFER_PRE_PADDING + len);
		ringbuffer[ringbuffer_head].len = len;
		memcpy((char *)ringbuffer[ringbuffer_head].payload +
					  LWS_SEND_BUFFER_PRE_PADDING, in, len);
		if (ringbuffer_head == (MAX_MESSAGE_QUEUE - 1))
			ringbuffer_head = 0;
		else
			ringbuffer_head++;

		if (((ringbuffer_head - pss->ringbuffer_tail) &
		    (MAX_MESSAGE_QUEUE - 1)) != (MAX_MESSAGE_QUEUE - 2))
			goto done;

choke:
		lwsl_debug("LWS_CALLBACK_RECEIVE: throttling %p\n", wsi);
		lws_rx_flow_control(wsi, 0);

done:
		lws_callback_on_writable_all_protocol(lws_get_context(wsi),
						      lws_get_protocol(wsi));
		break;

	/*
	 * this just demonstrates how to use the protocol filter. If you won't
	 * study and reject connections based on header content, you don't need
	 * to handle this callback
	 */

	case LWS_CALLBACK_FILTER_PROTOCOL_CONNECTION:
		dump_handshake_info(wsi);
		/* you could return non-zero here and kill the connection */
		break;

	default:
		break;
	}

	return 0;
}
Пример #12
0
int
callback_lws_mirror(struct lws *wsi, enum lws_callback_reasons reason,
		    void *user, void *in, size_t len)
{
	struct per_session_data__lws_mirror *pss =
			(struct per_session_data__lws_mirror *)user;
	int n, m;
int client_sockfd=lws_get_socket_fd(wsi);
	switch (reason) {

	case LWS_CALLBACK_ESTABLISHED:
		lwsl_notice("%s: LWS_CALLBACK_ESTABLISHED\n", __func__);
		pss->ringbuffer_tail = ringbuffer_head;
		pss->wsi = wsi;
		break;

	case LWS_CALLBACK_PROTOCOL_DESTROY:
		lwsl_notice("%s: mirror protocol cleaning up\n", __func__);
		for (n = 0; n < sizeof ringbuffer / sizeof ringbuffer[0]; n++)
			if (ringbuffer[n].payload)
				free(ringbuffer[n].payload);
		break;

	case LWS_CALLBACK_SERVER_WRITEABLE:
		//if (close_testing)
		//	break;

	printf("服务器开始发送数据\n");
	websocket_write_back(wsi, msg,-1);
	// n = lws_write(wsi, (char*)in + LWS_SEND_BUFFER_PRE_PADDING, len, LWS_WRITE_TEXT);

		// while (pss->ringbuffer_tail != ringbuffer_head) {
		// 	m = ringbuffer[pss->ringbuffer_tail].len;
		// 	n = lws_write(wsi, (unsigned char *)
		// 		   ringbuffer[pss->ringbuffer_tail].payload +
		// 		   LWS_PRE, m, LWS_WRITE_TEXT);
			
			
		// 	printf("the n is %d, the cilent fd is %d\n",n,client_sockfd);
		// 	if (n < 0) {
		// 		lwsl_err("ERROR %d writing to mirror socket\n", n);
		// 		return -1;
		// 	}
		// 	if (n < m)
		// 		lwsl_err("mirror partial write %d vs %d\n", n, m);

		// 	if (pss->ringbuffer_tail == (MAX_MESSAGE_QUEUE - 1))
		// 		pss->ringbuffer_tail = 0;
		// 	else
		// 		pss->ringbuffer_tail++;

		// 	// if (((ringbuffer_head - pss->ringbuffer_tail) &
		// 	//     (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 15))
		// 	// 	lws_rx_flow_allow_all_protocol(lws_get_context(wsi),
		// 	// 		       lws_get_protocol(wsi));

		// 	// if (lws_send_pipe_choked(wsi)) {
		// 	// 	lws_callback_on_writable(wsi);
		// 	// 	break;
		// 	// }
		// }
		break;

	case LWS_CALLBACK_RECEIVE:
	client_sockfd=lws_get_socket_fd(wsi);
	lwsl_notice("接收到客户端 %d 的信息=[%s]\n",client_sockfd, (const char*)in);
	memcpy(msg,in,strlen(in));
		if (((ringbuffer_head - pss->ringbuffer_tail) &
		    (MAX_MESSAGE_QUEUE - 1)) == (MAX_MESSAGE_QUEUE - 1)) {
			lwsl_err("dropping!\n");
			goto choke;
		}

		if (ringbuffer[ringbuffer_head].payload)
			free(ringbuffer[ringbuffer_head].payload);

		ringbuffer[ringbuffer_head].payload = malloc(LWS_PRE + len);
		ringbuffer[ringbuffer_head].len = len;
		memcpy((char *)ringbuffer[ringbuffer_head].payload +
		       LWS_PRE, in, len);
		if (ringbuffer_head == (MAX_MESSAGE_QUEUE - 1))
			ringbuffer_head = 0;
		else
			ringbuffer_head++;

		if (((ringbuffer_head - pss->ringbuffer_tail) &
		    (MAX_MESSAGE_QUEUE - 1)) != (MAX_MESSAGE_QUEUE - 2))
			goto done;

choke:
		lwsl_notice("LWS_CALLBACK_RECEIVE: throttling %p\n", wsi);
		lws_rx_flow_control(wsi, 0);

done:
	
	printf("lws_callback_on_writable_all_protocol, the client fd is %d\n", client_sockfd);
		lws_callback_on_writable_all_protocol(lws_get_context(wsi),
						      lws_get_protocol(wsi));
		break;

	/*
	 * this just demonstrates how to use the protocol filter. If you won't
	 * study and reject connections based on header content, you don't need
	 * to handle this callback
	 */

	case LWS_CALLBACK_FILTER_PROTOCOL_CONNECTION:
		//dump_handshake_info(wsi);
		/* you could return non-zero here and kill the connection */
		break;

	default:
		break;
	}

	return 0;
}