Пример #1
0
LWS_VISIBLE void
lws_libuv_stop(struct lws_context *context)
{
	struct lws_context_per_thread *pt;
	int n, m;

	if (context->requested_kill)
		return;

	context->requested_kill = 1;

	m = context->count_threads;
	context->being_destroyed = 1;

	while (m--) {
		pt = &context->pt[m];

		for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
			struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);

			if (!wsi)
				continue;
			lws_close_free_wsi(wsi,
				LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
				/* no protocol close */);
			n--;
		}
	}

	lwsl_info("%s: feels everything closed\n", __func__);
	if (context->count_wsi_allocated == 0)
		lws_libuv_kill(context);
}
Пример #2
0
void
lws_ssl_info_callback(const SSL *ssl, int where, int ret)
{
	struct lws *wsi;
	struct lws_context *context;
	struct lws_ssl_info si;

	context = (struct lws_context *)SSL_CTX_get_ex_data(
					SSL_get_SSL_CTX(ssl),
					openssl_SSL_CTX_private_data_index);
	if (!context)
		return;
	wsi = wsi_from_fd(context, SSL_get_fd(ssl));
	if (!wsi)
		return;

	if (!(where & wsi->vhost->ssl_info_event_mask))
		return;

	si.where = where;
	si.ret = ret;

	if (user_callback_handle_rxflow(wsi->protocol->callback,
						   wsi, LWS_CALLBACK_SSL_INFO,
						   wsi->user_space, &si, 0))
		lws_set_timeout(wsi, PENDING_TIMEOUT_KILLED_BY_SSL_INFO, -1);
}
Пример #3
0
int
remove_wsi_socket_from_fds(struct libwebsocket_context *context,
						      struct libwebsocket *wsi)
{
	int m;
	struct libwebsocket_pollargs pa = { wsi->sock, 0, 0 };

	lws_libev_io(context, wsi, LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE);

	--context->fds_count;

#if !defined(_WIN32) && !defined(MBED_OPERATORS)
	if (wsi->sock > context->max_fds) {
		lwsl_err("Socket fd %d too high (%d)\n",
						   wsi->sock, context->max_fds);
		return 1;
	}
#endif

	lwsl_info("%s: wsi=%p, sock=%d, fds pos=%d\n", __func__,
				    wsi, wsi->sock, wsi->position_in_fds_table);

	if (context->protocols[0].callback(context, wsi,
	    LWS_CALLBACK_LOCK_POLL, wsi->user_space, (void *)&pa, 0))
		return -1;

	m = wsi->position_in_fds_table; /* replace the contents for this */

	/* have the last guy take up the vacant slot */
	context->fds[m] = context->fds[context->fds_count];

	lws_plat_delete_socket_from_fds(context, wsi, m);

	/*
	 * end guy's fds_lookup entry remains unchanged
	 * (still same fd pointing to same wsi)
	 */
	/* end guy's "position in fds table" changed */
	wsi_from_fd(context,context->fds[context->fds_count].fd)-> 
					position_in_fds_table = m;
	/* deletion guy's lws_lookup entry needs nuking */
	delete_from_fd(context,wsi->sock);
	/* removed wsi has no position any more */
	wsi->position_in_fds_table = -1;

	/* remove also from external POLL support via protocol 0 */
	if (lws_socket_is_valid(wsi->sock)) {
		if (context->protocols[0].callback(context, wsi,
		    LWS_CALLBACK_DEL_POLL_FD, wsi->user_space,
		    (void *) &pa, 0))
			return -1;
	}
	if (context->protocols[0].callback(context, wsi,
				       LWS_CALLBACK_UNLOCK_POLL,
				       wsi->user_space, (void *) &pa, 0))
		return -1;

	return 0;
}
Пример #4
0
LWS_VISIBLE void
lws_rx_flow_allow_all_protocol(const struct lws_protocols *protocol)
{
    struct lws_context *context = protocol->owning_server;
    int n;
    struct lws *wsi;

    for (n = 0; n < context->fds_count; n++) {
        wsi = wsi_from_fd(context, context->fds[n].fd);
        if (!wsi)
            continue;
        if (wsi->protocol == protocol)
            lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
    }
}
Пример #5
0
/*
 * This does not actually stop the event loop.  The reason is we have to pass
 * libuv handle closures through its event loop.  So this tries to close all
 * wsi, and set a flag; when all the wsi closures are finalized then we
 * actually stop the libuv event loops.
 */
static void
lws_libuv_stop(struct lws_context *context)
{
	struct lws_context_per_thread *pt;
	int n, m;

	lwsl_err("%s\n", __func__);

	if (context->requested_kill) {
		lwsl_err("%s: ignoring\n", __func__);
		return;
	}

	context->requested_kill = 1;

	m = context->count_threads;
	context->being_destroyed = 1;

	/*
	 * Phase 1: start the close of every dynamic uv handle
	 */

	while (m--) {
		pt = &context->pt[m];

		if (pt->pipe_wsi) {
			uv_poll_stop(pt->pipe_wsi->w_read.uv.pwatcher);
			lws_destroy_event_pipe(pt->pipe_wsi);
			pt->pipe_wsi = NULL;
		}

		for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
			struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);

			if (!wsi)
				continue;
			lws_close_free_wsi(wsi,
				LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
				__func__ /* no protocol close */);
			n--;
		}
	}

	lwsl_info("%s: started closing all wsi\n", __func__);

	/* we cannot have completed... there are at least the cancel pipes */
}
Пример #6
0
LWS_VISIBLE int
lws_callback_on_writable_all_protocol(const struct lws_context *context,
				      const struct lws_protocols *protocol)
{
	struct lws *wsi;
	int n;

	for (n = 0; n < context->fds_count; n++) {
		wsi = wsi_from_fd(context,context->fds[n].fd);
		if (!wsi)
			continue;
		if (wsi->protocol == protocol)
			lws_callback_on_writable(wsi);
	}

	return 0;
}
Пример #7
0
LWS_VISIBLE int
lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, int tsi)
{
	struct lws_context_per_thread *pt = &context->pt[tsi];
	struct lws *wsi = wsi_from_fd(context, pt->lserv_fd);
	int status = 0, n;

	if (!loop) {
		loop = lws_malloc(sizeof(*loop));
		uv_loop_init(loop);
		pt->ev_loop_foreign = 0;
	} else
		pt->ev_loop_foreign = 1;

	pt->io_loop_uv = loop;

	if (pt->context->use_ev_sigint) {
		assert(ARRAY_SIZE(sigs) <= ARRAY_SIZE(pt->signals));
		for (n = 0; n < ARRAY_SIZE(sigs); n++) {
			uv_signal_init(loop, &pt->signals[n]);
			pt->signals[n].data = pt->context;
			uv_signal_start(&pt->signals[n], context->lws_uv_sigint_cb, sigs[n]);
		}
	}

	/*
	 * Initialize the accept wsi read watcher with the listening socket
	 * and register a callback for read operations
	 *
	 * We have to do it here because the uv loop(s) are not
	 * initialized until after context creation.
	 */
	if (wsi) {
		wsi->w_read.context = context;
		uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher,
			     pt->lserv_fd);
		uv_poll_start(&wsi->w_read.uv_watcher, UV_READABLE,
			      lws_io_cb);
	}

	uv_timer_init(pt->io_loop_uv, &pt->uv_timeout_watcher);
	uv_timer_start(&pt->uv_timeout_watcher, lws_uv_timeout_cb, 1000, 1000);

	return status;
}
Пример #8
0
LWS_VISIBLE int
lws_callback_all_protocol(const struct lws_protocols *protocol, int reason)
{
    struct lws_context *context = protocol->owning_server;
    struct lws *wsi;
    int n;

    for (n = 0; n < context->fds_count; n++) {
        wsi = wsi_from_fd(context, context->fds[n].fd);
        if (!wsi)
            continue;
        if (wsi->protocol == protocol)
            protocol->callback(context, wsi,
                       reason, wsi->user_space, NULL, 0);
    }

    return 0;
}
Пример #9
0
LWS_VISIBLE int
libwebsocket_callback_on_writable_all_protocol(
				  const struct libwebsocket_protocols *protocol)
{
	struct libwebsocket_context *context = protocol->owning_server;
	int n;
	struct libwebsocket *wsi;

	for (n = 0; n < context->fds_count; n++) {
		wsi = wsi_from_fd(context,context->fds[n].fd);
		if (!wsi)
			continue;
		if (wsi->protocol == protocol)
			libwebsocket_callback_on_writable(context, wsi);
	}

	return 0;
}
Пример #10
0
LWS_VISIBLE int
lws_callback_on_writable_all_protocol(const struct lws_context *context,
				      const struct lws_protocols *protocol)
{
	const struct lws_context_per_thread *pt = &context->pt[0];
	unsigned int n, m = context->count_threads;
	struct lws *wsi;

	while (m--) {
		for (n = 0; n < pt->fds_count; n++) {
			wsi = wsi_from_fd(context, pt->fds[n].fd);
			if (!wsi)
				continue;
			if (wsi->protocol == protocol)
				lws_callback_on_writable(wsi);
		}
		pt++;
	}

	return 0;
}
Пример #11
0
LWS_VISIBLE int
lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
	struct lws_context_per_thread *pt = &context->pt[tsi];
	WSANETWORKEVENTS networkevents;
	struct lws_pollfd *pfd;
	struct lws *wsi;
	unsigned int i;
	DWORD ev;
	int n, m;

	/* stay dead once we are dead */
	if (context == NULL)
		return 1;

	if (!context->service_tid_detected) {
		struct lws _lws;

		memset(&_lws, 0, sizeof(_lws));
		_lws.context = context;

		context->service_tid_detected = context->vhost_list->
			protocols[0].callback(&_lws, LWS_CALLBACK_GET_THREAD_ID,
					      NULL, NULL, 0);
	}
	context->service_tid = context->service_tid_detected;

	if (timeout_ms < 0)
		goto faked_service;

	for (i = 0; i < pt->fds_count; ++i) {
		pfd = &pt->fds[i];

		if (!(pfd->events & LWS_POLLOUT))
			continue;

		wsi = wsi_from_fd(context, pfd->fd);
		if (wsi->listener)
			continue;
		if (!wsi || wsi->sock_send_blocking)
			continue;
		pfd->revents = LWS_POLLOUT;
		n = lws_service_fd(context, pfd);
		if (n < 0)
			return -1;
		/* if something closed, retry this slot */
		if (n)
			i--;
	}

	/* if we know something needs service already, don't wait in poll */
	timeout_ms = lws_service_adjust_timeout(context, timeout_ms, tsi);

	ev = lws_plat_wait_event(pt, timeout_ms);

	context->service_tid = 0;

	if (ev == WSA_WAIT_TIMEOUT) {
		lws_service_fd(context, NULL);
		return 0;
	}

	if (ev == WSA_WAIT_EVENT_0) {
		WSAResetEvent(pt->events[0]);
		return 0;
	}

	if (ev < WSA_WAIT_EVENT_0 || ev > WSA_WAIT_EVENT_0 + pt->fds_count)
		return -1;

	pfd = &pt->fds[ev - WSA_WAIT_EVENT_0 - 1];

	/* eh... is one event at a time the best windows can do? */

	if (WSAEnumNetworkEvents(pfd->fd, pt->events[ev - WSA_WAIT_EVENT_0],
				 &networkevents) == SOCKET_ERROR) {
		lwsl_err("WSAEnumNetworkEvents() failed with error %d\n",
								     LWS_ERRNO);
		return -1;
	}

	pfd->revents = (short)networkevents.lNetworkEvents;

	if (pfd->revents & LWS_POLLOUT) {
		wsi = wsi_from_fd(context, pfd->fd);
		if (wsi)
			wsi->sock_send_blocking = 0;
	}

faked_service:

	/* if someone faked their LWS_POLLIN, then go through all active fds */

	if (lws_service_flag_pending(context, tsi)) {
		/* any socket with events to service? */
		for (n = 0; n < (int)pt->fds_count; n++) {
			if (!pt->fds[n].revents)
				continue;

			m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
			if (m < 0)
				return -1;
			/* if something closed, retry this slot */
			if (m)
				n--;
		}
		return 0;
	}

	if (timeout_ms < 0)
		return 0;

	/* otherwise just do the one... must be a way to improve that... */

	return lws_service_fd_tsi(context, pfd, tsi);
}
Пример #12
0
int
remove_wsi_socket_from_fds(struct lws *wsi)
{
	struct lws_pollargs pa = { wsi->sock, 0, 0 };
#ifndef LWS_NO_SERVER
	struct lws_pollargs pa1;
#endif
	struct lws_context *context = wsi->context;
	struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
	struct lws *end_wsi;
	int m, ret = 0;

#if !defined(_WIN32) && !defined(MBED_OPERATORS)
	if (wsi->sock > context->max_fds) {
		lwsl_err("fd %d too high (%d)\n", wsi->sock, context->max_fds);
		return 1;
	}
#endif

	if (context->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
					   wsi->user_space, (void *)&pa, 1))
		return -1;

	lws_libev_io(wsi, LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE);

	lws_pt_lock(pt);

	lwsl_info("%s: wsi=%p, sock=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
		  __func__, wsi, wsi->sock, wsi->position_in_fds_table,
		  pt->fds_count, pt->fds[pt->fds_count].fd);

	/* the guy who is to be deleted's slot index in pt->fds */
	m = wsi->position_in_fds_table;

	/* have the last guy take up the now vacant slot */
	pt->fds[m] = pt->fds[pt->fds_count - 1];

	lws_plat_delete_socket_from_fds(context, wsi, m);

	/* end guy's "position in fds table" is now the deletion guy's old one */
	end_wsi = wsi_from_fd(context, pt->fds[pt->fds_count].fd);
	assert(end_wsi);
	end_wsi->position_in_fds_table = m;

	/* deletion guy's lws_lookup entry needs nuking */
	delete_from_fd(context, wsi->sock);
	/* removed wsi has no position any more */
	wsi->position_in_fds_table = -1;

	/* remove also from external POLL support via protocol 0 */
	if (lws_socket_is_valid(wsi->sock))
		if (context->protocols[0].callback(wsi, LWS_CALLBACK_DEL_POLL_FD,
						   wsi->user_space, (void *) &pa, 0))
			ret = -1;
#ifndef LWS_NO_SERVER
	/* if this made some room, accept connects on this thread */
	if ((unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
		_lws_change_pollfd(pt->wsi_listening, 0, LWS_POLLIN, &pa1);
#endif
	lws_pt_unlock(pt);

	if (context->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
					   wsi->user_space, (void *) &pa, 1))
		ret = -1;

	return ret;
}
Пример #13
0
LWS_VISIBLE void
lws_context_destroy(struct lws_context *context)
{
	const struct lws_protocols *protocol = NULL;
	struct lws_context_per_thread *pt;
	struct lws_vhost *vh = NULL, *vh1;
	struct lws wsi;
	int n, m;

	lwsl_notice("%s\n", __func__);

	if (!context)
		return;

	m = context->count_threads;
	context->being_destroyed = 1;

	memset(&wsi, 0, sizeof(wsi));
	wsi.context = context;

#ifdef LWS_LATENCY
	if (context->worst_latency_info[0])
		lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
#endif

	while (m--) {
		pt = &context->pt[m];

		for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
			struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
			if (!wsi)
				continue;

			lws_close_free_wsi(wsi,
				LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
				/* no protocol close */);
			n--;
		}
		lws_pt_mutex_destroy(pt);
	}
	/*
	 * give all extensions a chance to clean up any per-context
	 * allocations they might have made
	 */

	n = lws_ext_cb_all_exts(context, NULL,
				LWS_EXT_CB_SERVER_CONTEXT_DESTRUCT, NULL, 0);

	n = lws_ext_cb_all_exts(context, NULL,
				LWS_EXT_CB_CLIENT_CONTEXT_DESTRUCT, NULL, 0);

	/*
	 * inform all the protocols that they are done and will have no more
	 * callbacks.
	 *
	 * We can't free things until after the event loop shuts down.
	 */
	if (context->protocol_init_done)
		vh = context->vhost_list;
	while (vh) {
		wsi.vhost = vh;
		protocol = vh->protocols;
		if (protocol) {
			n = 0;
			while (n < vh->count_protocols) {
				wsi.protocol = protocol;
				protocol->callback(&wsi, LWS_CALLBACK_PROTOCOL_DESTROY,
						   NULL, NULL, 0);
				protocol++;
				n++;
			}
		}

		vh = vh->vhost_next;
	}

	for (n = 0; n < context->count_threads; n++) {
		pt = &context->pt[n];

		lws_libev_destroyloop(context, n);
		lws_libuv_destroyloop(context, n);

		lws_free_set_NULL(context->pt[n].serv_buf);
		if (pt->ah_pool)
			lws_free(pt->ah_pool);
		if (pt->http_header_data)
			lws_free(pt->http_header_data);
	}
	lws_plat_context_early_destroy(context);
	lws_ssl_context_destroy(context);

	if (context->pt[0].fds)
		lws_free_set_NULL(context->pt[0].fds);

	/* free all the vhost allocations */

	vh = context->vhost_list;
	while (vh) {
		protocol = vh->protocols;
		if (protocol) {
			n = 0;
			while (n < vh->count_protocols) {
				if (vh->protocol_vh_privs &&
				    vh->protocol_vh_privs[n]) {
					lws_free(vh->protocol_vh_privs[n]);
					vh->protocol_vh_privs[n] = NULL;
				}
				protocol++;
				n++;
			}
		}
		if (vh->protocol_vh_privs)
			lws_free(vh->protocol_vh_privs);
		lws_ssl_SSL_CTX_destroy(vh);
		lws_free(vh->same_vh_protocol_list);
#ifdef LWS_WITH_PLUGINS
		if (context->plugin_list)
			lws_free((void *)vh->protocols);
#ifndef LWS_NO_EXTENSIONS
		if (context->plugin_extension_count)
			lws_free((void *)vh->extensions);
#endif
#endif
#ifdef LWS_WITH_ACCESS_LOG
		if (vh->log_fd != (int)LWS_INVALID_FILE)
			close(vh->log_fd);
#endif

		vh1 = vh->vhost_next;
		lws_free(vh);
		vh = vh1;
	}

	lws_plat_context_late_destroy(context);

	lws_free(context);
}
Пример #14
0
/**
 * lws_context_destroy() - Destroy the websocket context
 * @context:	Websocket context
 *
 *	This function closes any active connections and then frees the
 *	context.  After calling this, any further use of the context is
 *	undefined.
 */
LWS_VISIBLE void
lws_context_destroy(struct lws_context *context)
{
	const struct lws_protocols *protocol = NULL;
	struct lws_context_per_thread *pt;
	struct lws wsi;
	int n, m;

	lwsl_notice("%s\n", __func__);

	if (!context)
		return;

	m = context->count_threads;
	context->being_destroyed = 1;

	memset(&wsi, 0, sizeof(wsi));
	wsi.context = context;

#ifdef LWS_LATENCY
	if (context->worst_latency_info[0])
		lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
#endif

	while (m--) {
		pt = &context->pt[m];

		for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
			struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
			if (!wsi)
				continue;

			lws_close_free_wsi(wsi,
				LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
				/* no protocol close */);
			n--;
		}
	}
	/*
	 * give all extensions a chance to clean up any per-context
	 * allocations they might have made
	 */

	n = lws_ext_cb_all_exts(context, NULL,
				LWS_EXT_CB_SERVER_CONTEXT_DESTRUCT, NULL, 0);

	n = lws_ext_cb_all_exts(context, NULL,
				LWS_EXT_CB_CLIENT_CONTEXT_DESTRUCT, NULL, 0);

	/*
	 * inform all the protocols that they are done and will have no more
	 * callbacks
	 */
	protocol = context->protocols;
	if (protocol)
		while (protocol->callback) {
			protocol->callback(&wsi, LWS_CALLBACK_PROTOCOL_DESTROY,
					   NULL, NULL, 0);
			protocol++;
		}

	for (n = 0; n < context->count_threads; n++) {
		pt = &context->pt[n];

		lws_libev_destroyloop(context, n);
		lws_libuv_destroyloop(context, n);

		lws_free_set_NULL(context->pt[n].serv_buf);
		if (pt->ah_pool)
			lws_free(pt->ah_pool);
		if (pt->http_header_data)
			lws_free(pt->http_header_data);
	}
	lws_plat_context_early_destroy(context);
	lws_ssl_context_destroy(context);
	if (context->pt[0].fds)
		lws_free_set_NULL(context->pt[0].fds);

	lws_plat_context_late_destroy(context);

	lws_free(context);
}
Пример #15
0
LWS_VISIBLE int
lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
{
	struct lws_context_per_thread *pt = &context->pt[tsi];
	lws_sockfd_type our_fd = 0, tmp_fd;
	struct lws_tokens eff_buf;
	unsigned int pending = 0;
	struct lws *wsi, *wsi1;
	char draining_flow = 0;
	int timed_out = 0;
	time_t now;
	int n, m;
	int more;

	/*
	 * you can call us with pollfd = NULL to just allow the once-per-second
	 * global timeout checks; if less than a second since the last check
	 * it returns immediately then.
	 */

	time(&now);

	/* TODO: if using libev, we should probably use timeout watchers... */
	if (context->last_timeout_check_s != now) {
		context->last_timeout_check_s = now;

		lws_plat_service_periodic(context);

		/* global timeout check once per second */

		if (pollfd)
			our_fd = pollfd->fd;

		wsi = context->pt[tsi].timeout_list;
		while (wsi) {
			/* we have to take copies, because he may be deleted */
			wsi1 = wsi->timeout_list;
			tmp_fd = wsi->sock;
			if (lws_service_timeout_check(wsi, (unsigned int)now)) {
				/* he did time out... */
				if (tmp_fd == our_fd)
					/* it was the guy we came to service! */
					timed_out = 1;
					/* he's gone, no need to mark as handled */
			}
			wsi = wsi1;
		}
#if 0
		{
			char s[300], *p = s;

			for (n = 0; n < context->count_threads; n++)
				p += sprintf(p, " %7lu (%5d), ",
					     context->pt[n].count_conns,
					     context->pt[n].fds_count);

			lwsl_notice("load: %s\n", s);
		}
#endif
	}

	/* the socket we came to service timed out, nothing to do */
	if (timed_out)
		return 0;

	/* just here for timeout management? */
	if (!pollfd)
		return 0;

	/* no, here to service a socket descriptor */
	wsi = wsi_from_fd(context, pollfd->fd);
	if (!wsi)
		/* not lws connection ... leave revents alone and return */
		return 0;

	/*
	 * so that caller can tell we handled, past here we need to
	 * zero down pollfd->revents after handling
	 */

#if LWS_POSIX

	/* handle session socket closed */

	if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
	    (pollfd->revents & LWS_POLLHUP)) {
		wsi->socket_is_permanently_unusable = 1;
		lwsl_debug("Session Socket %p (fd=%d) dead\n",
						       (void *)wsi, pollfd->fd);

		goto close_and_handled;
	}

#ifdef _WIN32
	if (pollfd->revents & LWS_POLLOUT)
		wsi->sock_send_blocking = FALSE;
#endif

#endif

	/* okay, what we came here to do... */

	switch (wsi->mode) {
	case LWSCM_HTTP_SERVING:
	case LWSCM_HTTP_SERVING_ACCEPTED:
	case LWSCM_SERVER_LISTENER:
	case LWSCM_SSL_ACK_PENDING:
		n = lws_server_socket_service(context, wsi, pollfd);
		if (n) /* closed by above */
			return 1;
		pending = lws_ssl_pending(wsi);
		if (pending)
			goto handle_pending;
		goto handled;

	case LWSCM_WS_SERVING:
	case LWSCM_WS_CLIENT:
	case LWSCM_HTTP2_SERVING:

		/* 1: something requested a callback when it was OK to write */

		if ((pollfd->revents & LWS_POLLOUT) &&
		    (wsi->state == LWSS_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED ||
		     wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
		     wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		     wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
		    lws_handle_POLLOUT_event(wsi, pollfd)) {
			if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
				wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
			lwsl_info("lws_service_fd: closing\n");
			goto close_and_handled;
		}
#if 1
		if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
		    wsi->state == LWSS_AWAITING_CLOSE_ACK) {
			/*
			 * we stopped caring about anything except control
			 * packets.  Force flow control off, defeat tx
			 * draining.
			 */
			lws_rx_flow_control(wsi, 1);
			wsi->u.ws.tx_draining_ext = 0;
		}
#endif
		if (wsi->u.ws.tx_draining_ext) {
			/* we cannot deal with new RX until the TX ext
			 * path has been drained.  It's because new
			 * rx will, eg, crap on the wsi rx buf that
			 * may be needed to retain state.
			 *
			 * TX ext drain path MUST go through event loop
			 * to avoid blocking.
			 */
			break;
		}

		if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
			/* We cannot deal with any kind of new RX
			 * because we are RX-flowcontrolled.
			 */
			break;

		/* 2: RX Extension needs to be drained
		 */

		if (wsi->state == LWSS_ESTABLISHED &&
		    wsi->u.ws.rx_draining_ext) {

			lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
#ifndef LWS_NO_CLIENT
			if (wsi->mode == LWSCM_WS_CLIENT) {
				n = lws_client_rx_sm(wsi, 0);
				if (n < 0)
					/* we closed wsi */
					n = 0;
			} else
#endif
				n = lws_rx_sm(wsi, 0);

			goto handled;
		}

		if (wsi->u.ws.rx_draining_ext)
			/*
			 * We have RX EXT content to drain, but can't do it
			 * right now.  That means we cannot do anything lower
			 * priority either.
			 */
			break;

		/* 3: RX Flowcontrol buffer needs to be drained
		 */

		if (wsi->rxflow_buffer) {
			lwsl_info("draining rxflow (len %d)\n",
				wsi->rxflow_len - wsi->rxflow_pos
			);
			/* well, drain it */
			eff_buf.token = (char *)wsi->rxflow_buffer +
						wsi->rxflow_pos;
			eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
			draining_flow = 1;
			goto drain;
		}

		/* 4: any incoming data ready?
		 * notice if rx flow going off raced poll(), rx flow wins
		 */
		if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
			break;
read:
		eff_buf.token_len = lws_ssl_capable_read(wsi, pt->serv_buf,
					pending ? pending : LWS_MAX_SOCKET_IO_BUF);
		switch (eff_buf.token_len) {
		case 0:
			lwsl_info("service_fd: closing due to 0 length read\n");
			goto close_and_handled;
		case LWS_SSL_CAPABLE_MORE_SERVICE:
			lwsl_info("SSL Capable more service\n");
			n = 0;
			goto handled;
		case LWS_SSL_CAPABLE_ERROR:
			lwsl_info("Closing when error\n");
			goto close_and_handled;
		}

		/*
		 * give any active extensions a chance to munge the buffer
		 * before parse.  We pass in a pointer to an lws_tokens struct
		 * prepared with the default buffer and content length that's in
		 * there.  Rather than rewrite the default buffer, extensions
		 * that expect to grow the buffer can adapt .token to
		 * point to their own per-connection buffer in the extension
		 * user allocation.  By default with no extensions or no
		 * extension callback handling, just the normal input buffer is
		 * used then so it is efficient.
		 */

		eff_buf.token = (char *)pt->serv_buf;
drain:

		do {
			more = 0;

			m = lws_ext_cb_active(wsi,
				LWS_EXT_CB_PACKET_RX_PREPARSE, &eff_buf, 0);
			if (m < 0)
				goto close_and_handled;
			if (m)
				more = 1;

			/* service incoming data */

			if (eff_buf.token_len) {
				/*
				 * if draining from rxflow buffer, not
				 * critical to track what was used since at the
				 * use it bumps wsi->rxflow_pos.  If we come
				 * around again it will pick up from where it
				 * left off.
				 */
				n = lws_read(wsi, (unsigned char *)eff_buf.token,
					     eff_buf.token_len);
				if (n < 0) {
					/* we closed wsi */
					n = 0;
					goto handled;
				}
			}

			eff_buf.token = NULL;
			eff_buf.token_len = 0;
		} while (more);

		pending = lws_ssl_pending(wsi);
		if (pending) {
handle_pending:
			pending = pending > LWS_MAX_SOCKET_IO_BUF ?
					LWS_MAX_SOCKET_IO_BUF : pending;
			goto read;
		}

		if (draining_flow && wsi->rxflow_buffer &&
				 wsi->rxflow_pos == wsi->rxflow_len) {
			lwsl_info("flow buffer: drained\n");
			lws_free_set_NULL(wsi->rxflow_buffer);
			/* having drained the rxflow buffer, can rearm POLLIN */
#ifdef LWS_NO_SERVER
			n =
#endif
			_lws_rx_flow_control(wsi);
			/* n ignored, needed for NO_SERVER case */
		}

		break;

	default:
#ifdef LWS_NO_CLIENT
		break;
#else
		n = lws_client_socket_service(context, wsi, pollfd);
		if (n)
			return 1;
		goto handled;
#endif
	}

	n = 0;
	goto handled;

close_and_handled:
	lwsl_debug("Close and handled\n");
	lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
	/*
	 * pollfd may point to something else after the close
	 * due to pollfd swapping scheme on delete on some platforms
	 * we can't clear revents now because it'd be the wrong guy's revents
	 */
	return 1;

handled:
	pollfd->revents = 0;
	return n;
}
Пример #16
0
/******************************************************************************
 **函数名称: lws_get_wsi_by_fd
 **功    能: 通过套接字ID获取wsi对象
 **输入参数:
 **     lws: lws上下文
 **     fd: 套接字ID
 **输出参数:
 **返    回: wsi对象
 **实现描述:
 **注意事项:
 **作    者: # Qifeng.zou # 2015.12.10 #
 ******************************************************************************/
LWS_VISIBLE struct libwebsocket *lws_get_wsi_by_fd(struct lws_context *lws, int fd)
{
    return wsi_from_fd(lws, fd);
}
Пример #17
0
LWS_VISIBLE LWS_EXTERN int
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
	struct lws_context_per_thread *pt;
	WSANETWORKEVENTS networkevents;
	struct lws_pollfd *pfd;
	struct lws *wsi;
	unsigned int i;
	DWORD ev;
	int n, m;

	/* stay dead once we are dead */
	if (context == NULL || !context->vhost_list)
		return 1;

	pt = &context->pt[tsi];

	if (!context->service_tid_detected) {
		struct lws _lws;

		memset(&_lws, 0, sizeof(_lws));
		_lws.context = context;

		context->service_tid_detected = context->vhost_list->
			protocols[0].callback(&_lws, LWS_CALLBACK_GET_THREAD_ID,
						  NULL, NULL, 0);
		context->service_tid = context->service_tid_detected;
		context->service_tid_detected = 1;
	}

	if (timeout_ms < 0)
	{
			if (lws_service_flag_pending(context, tsi)) {
			/* any socket with events to service? */
			for (n = 0; n < (int)pt->fds_count; n++) {
				if (!pt->fds[n].revents)
					continue;

				m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
				if (m < 0)
					return -1;
				/* if something closed, retry this slot */
				if (m)
					n--;
			}
		}
		return 0;
	}

	for (i = 0; i < pt->fds_count; ++i) {
		pfd = &pt->fds[i];

		if (!(pfd->events & LWS_POLLOUT))
			continue;

		wsi = wsi_from_fd(context, pfd->fd);
		if (wsi->listener)
			continue;
		if (!wsi || wsi->sock_send_blocking)
			continue;
		pfd->revents = LWS_POLLOUT;
		n = lws_service_fd(context, pfd);
		if (n < 0)
			return -1;
		/* if something closed, retry this slot */
		if (n)
			i--;

		if (wsi->trunc_len)
			WSASetEvent(pt->events[0]);
	}

	/*
	 * is there anybody with pending stuff that needs service forcing?
	 */
	if (!lws_service_adjust_timeout(context, 1, tsi)) {
		/* -1 timeout means just do forced service */
		_lws_plat_service_tsi(context, -1, pt->tid);
		/* still somebody left who wants forced service? */
		if (!lws_service_adjust_timeout(context, 1, pt->tid))
			/* yes... come back again quickly */
			timeout_ms = 0;
	}

	ev = WSAWaitForMultipleEvents( 1,  pt->events , FALSE, timeout_ms, FALSE);
	if (ev == WSA_WAIT_EVENT_0) {
		unsigned int eIdx;

		WSAResetEvent(pt->events[0]);

		for (eIdx = 0; eIdx < pt->fds_count; ++eIdx) {
			if (WSAEnumNetworkEvents(pt->fds[eIdx].fd, 0, &networkevents) == SOCKET_ERROR) {
				lwsl_err("WSAEnumNetworkEvents() failed with error %d\n", LWS_ERRNO);
				return -1;
			}

			pfd = &pt->fds[eIdx];
			pfd->revents = (short)networkevents.lNetworkEvents;

			if ((networkevents.lNetworkEvents & FD_CONNECT) &&
				 networkevents.iErrorCode[FD_CONNECT_BIT] &&
				 networkevents.iErrorCode[FD_CONNECT_BIT] != LWS_EALREADY &&
				 networkevents.iErrorCode[FD_CONNECT_BIT] != LWS_EINPROGRESS &&
				 networkevents.iErrorCode[FD_CONNECT_BIT] != LWS_EWOULDBLOCK &&
				 networkevents.iErrorCode[FD_CONNECT_BIT] != WSAEINVAL) {
				lwsl_debug("Unable to connect errno=%d\n",
					   networkevents.iErrorCode[FD_CONNECT_BIT]);
				pfd->revents |= LWS_POLLHUP;
			}

			if (pfd->revents & LWS_POLLOUT) {
				wsi = wsi_from_fd(context, pfd->fd);
				if (wsi)
					wsi->sock_send_blocking = 0;
			}
			 /* if something closed, retry this slot */
			if (pfd->revents & LWS_POLLHUP)
					--eIdx;

			if( pfd->revents != 0 ) {
				lws_service_fd_tsi(context, pfd, tsi);

			}
		}
	}

	context->service_tid = 0;

	if (ev == WSA_WAIT_TIMEOUT) {
		lws_service_fd(context, NULL);
	}
	return 0;;
}
Пример #18
0
/**
 * lws_context_destroy() - Destroy the websocket context
 * @context:	Websocket context
 *
 *	This function closes any active connections and then frees the
 *	context.  After calling this, any further use of the context is
 *	undefined.
 */
LWS_VISIBLE void
lws_context_destroy(struct lws_context *context)
{
	const struct lws_protocols *protocol = NULL;
	struct lws wsi;
	int n;

	lwsl_notice("%s\n", __func__);

	if (!context)
		return;

	memset(&wsi, 0, sizeof(wsi));
	wsi.context = context;

#ifdef LWS_LATENCY
	if (context->worst_latency_info[0])
		lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
#endif

	for (n = 0; n < context->fds_count; n++) {
		struct lws *wsi = wsi_from_fd(context, context->fds[n].fd);
		if (!wsi)
			continue;
		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
				/* no protocol close */);
		n--;
	}

	/*
	 * give all extensions a chance to clean up any per-context
	 * allocations they might have made
	 */

	n = lws_ext_cb_all_exts(context, NULL,
			LWS_EXT_CALLBACK_SERVER_CONTEXT_DESTRUCT, NULL, 0);

	n = lws_ext_cb_all_exts(context, NULL,
			LWS_EXT_CALLBACK_CLIENT_CONTEXT_DESTRUCT, NULL, 0);

	/*
	 * inform all the protocols that they are done and will have no more
	 * callbacks
	 */
	protocol = context->protocols;
	if (protocol) {
		while (protocol->callback) {
			protocol->callback(&wsi,
					   LWS_CALLBACK_PROTOCOL_DESTROY,
					   NULL, NULL, 0);
			protocol++;
		}
	}
#ifdef LWS_USE_LIBEV
	ev_io_stop(context->io_loop, &context->w_accept.watcher);
	if(context->use_ev_sigint)
		ev_signal_stop(context->io_loop, &context->w_sigint.watcher);
#endif /* LWS_USE_LIBEV */

	lws_plat_context_early_destroy(context);
	lws_ssl_context_destroy(context);
	if (context->fds)
		lws_free(context->fds);
	if (context->ah_pool)
		lws_free(context->ah_pool);
	if (context->http_header_data)
		lws_free(context->http_header_data);

	lws_plat_context_late_destroy(context);

	lws_free(context);
}
Пример #19
0
LWS_VISIBLE int
lws_plat_service(struct libwebsocket_context *context, int timeout_ms)
{
	int n;
	int i;
	DWORD ev;
	WSANETWORKEVENTS networkevents;
	struct libwebsocket_pollfd *pfd;
	struct libwebsocket *wsi;

	/* stay dead once we are dead */

	if (context == NULL)
		return 1;

	context->service_tid = context->protocols[0].callback(context, NULL,
				     LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);

	for (i = 0; i < context->fds_count; ++i) {
		pfd = &context->fds[i];
		if (pfd->fd == context->listen_service_fd)
			continue;

		if (pfd->events & LWS_POLLOUT) {
			wsi = wsi_from_fd(context, pfd->fd);
			if (!wsi || wsi->sock_send_blocking)
				continue;
			pfd->revents = LWS_POLLOUT;
			n = libwebsocket_service_fd(context, pfd);
			if (n < 0)
				return n;
		}
	}

	ev = WSAWaitForMultipleEvents(context->fds_count + 1,
				     context->events, FALSE, timeout_ms, FALSE);
	context->service_tid = 0;

	if (ev == WSA_WAIT_TIMEOUT) {
		libwebsocket_service_fd(context, NULL);
		return 0;
	}

	if (ev == WSA_WAIT_EVENT_0) {
		WSAResetEvent(context->events[0]);
		return 0;
	}

	if (ev < WSA_WAIT_EVENT_0 || ev > WSA_WAIT_EVENT_0 + context->fds_count)
		return -1;

	pfd = &context->fds[ev - WSA_WAIT_EVENT_0 - 1];

	if (WSAEnumNetworkEvents(pfd->fd,
			context->events[ev - WSA_WAIT_EVENT_0],
					      &networkevents) == SOCKET_ERROR) {
		lwsl_err("WSAEnumNetworkEvents() failed with error %d\n",
								     LWS_ERRNO);
		return -1;
	}

	pfd->revents = networkevents.lNetworkEvents;

	if (pfd->revents & LWS_POLLOUT) {
		wsi = wsi_from_fd(context, pfd->fd);
		if (wsi)
			wsi->sock_send_blocking = FALSE;
	}

	return libwebsocket_service_fd(context, pfd);
}
Пример #20
0
LWS_VISIBLE int
libwebsocket_service_fd(struct libwebsocket_context *context,
                        struct libwebsocket_pollfd *pollfd)
{
    struct libwebsocket *wsi;
    int n;
    int m;
    int listen_socket_fds_index = 0;
    time_t now;
    int timed_out = 0;
    int our_fd = 0;
    char draining_flow = 0;
    int more;
    struct lws_tokens eff_buf;
    int pending = 0;

    if (context->listen_service_fd)
        listen_socket_fds_index = wsi_from_fd(context,context->listen_service_fd)->position_in_fds_table;

    /*
    * you can call us with pollfd = NULL to just allow the once-per-second
     * global timeout checks; if less than a second since the last check
     * it returns immediately then.
     */

    time(&now);

    /* TODO: if using libev, we should probably use timeout watchers... */
    if (context->last_timeout_check_s != now) {
        context->last_timeout_check_s = now;

        lws_plat_service_periodic(context);

        /* global timeout check once per second */

        if (pollfd)
            our_fd = pollfd->fd;

        for (n = 0; n < context->fds_count; n++) {
            m = context->fds[n].fd;
            wsi = wsi_from_fd(context,m);
            if (!wsi)
                continue;

            if (libwebsocket_service_timeout_check(context, wsi, now))
                /* he did time out... */
                if (m == our_fd) {
                    /* it was the guy we came to service! */
                    timed_out = 1;
                    /* he's gone, no need to mark as handled */
                }
        }
    }

    /* the socket we came to service timed out, nothing to do */
    if (timed_out)
        return 0;

    /* just here for timeout management? */
    if (pollfd == NULL)
        return 0;

    /* no, here to service a socket descriptor */
    wsi = wsi_from_fd(context,pollfd->fd);
    if (wsi == NULL)
        /* not lws connection ... leave revents alone and return */
        return 0;

    /*
     * so that caller can tell we handled, past here we need to
     * zero down pollfd->revents after handling
     */

    /*
     * deal with listen service piggybacking
     * every listen_service_modulo services of other fds, we
     * sneak one in to service the listen socket if there's anything waiting
     *
     * To handle connection storms, as found in ab, if we previously saw a
     * pending connection here, it causes us to check again next time.
     */

    if (context->listen_service_fd && pollfd !=
            &context->fds[listen_socket_fds_index]) {
        context->listen_service_count++;
        if (context->listen_service_extraseen ||
                context->listen_service_count ==
                context->listen_service_modulo) {
            context->listen_service_count = 0;
            m = 1;
            if (context->listen_service_extraseen > 5)
                m = 2;
            while (m--) {
                /*
                 * even with extpoll, we prepared this
                 * internal fds for listen
                 */
                n = lws_poll_listen_fd(&context->fds[listen_socket_fds_index]);
                if (n > 0) { /* there's a conn waiting for us */
                    libwebsocket_service_fd(context,
                                            &context->
                                            fds[listen_socket_fds_index]);
                    context->listen_service_extraseen++;
                } else {
                    if (context->listen_service_extraseen)
                        context->
                        listen_service_extraseen--;
                    break;
                }
            }
        }

    }

    /* handle session socket closed */

    if ((!(pollfd->revents & LWS_POLLIN)) &&
            (pollfd->revents & LWS_POLLHUP)) {

        lwsl_debug("Session Socket %p (fd=%d) dead\n",
                   (void *)wsi, pollfd->fd);

        goto close_and_handled;
    }

    /* okay, what we came here to do... */

    switch (wsi->mode) {
    case LWS_CONNMODE_HTTP_SERVING:
    case LWS_CONNMODE_HTTP_SERVING_ACCEPTED:
    case LWS_CONNMODE_SERVER_LISTENER:
    case LWS_CONNMODE_SSL_ACK_PENDING:
        n = lws_server_socket_service(context, wsi, pollfd);
        if (n < 0)
            goto close_and_handled;
        goto handled;

    case LWS_CONNMODE_WS_SERVING:
    case LWS_CONNMODE_WS_CLIENT:
    case LWS_CONNMODE_HTTP2_SERVING:

        /* the guy requested a callback when it was OK to write */

        if ((pollfd->revents & LWS_POLLOUT) &&
                (wsi->state == WSI_STATE_ESTABLISHED ||
                 wsi->state == WSI_STATE_HTTP2_ESTABLISHED ||
                 wsi->state == WSI_STATE_HTTP2_ESTABLISHED_PRE_SETTINGS ||
                 wsi->state == WSI_STATE_RETURNED_CLOSE_ALREADY ||
                 wsi->state == WSI_STATE_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
                lws_handle_POLLOUT_event(context, wsi, pollfd)) {
            lwsl_info("libwebsocket_service_fd: closing\n");
            goto close_and_handled;
        }

        if (wsi->rxflow_buffer &&
                (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) {
            lwsl_info("draining rxflow\n");
            /* well, drain it */
            eff_buf.token = (char *)wsi->rxflow_buffer +
                            wsi->rxflow_pos;
            eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
            draining_flow = 1;
            goto drain;
        }

        /* any incoming data ready? */

        if (!(pollfd->revents & LWS_POLLIN))
            break;
read:

        eff_buf.token_len = lws_ssl_capable_read(context, wsi,
                            context->service_buffer,
                            pending?pending:sizeof(context->service_buffer));
        switch (eff_buf.token_len) {
        case 0:
            lwsl_info("service_fd: closing due to 0 length read\n");
            goto close_and_handled;
        case LWS_SSL_CAPABLE_MORE_SERVICE:
            lwsl_info("SSL Capable more service\n");
            n = 0;
            goto handled;
        case LWS_SSL_CAPABLE_ERROR:
            lwsl_info("Closing when error\n");
            goto close_and_handled;
        }

        /*
         * give any active extensions a chance to munge the buffer
         * before parse.  We pass in a pointer to an lws_tokens struct
         * prepared with the default buffer and content length that's in
         * there.  Rather than rewrite the default buffer, extensions
         * that expect to grow the buffer can adapt .token to
         * point to their own per-connection buffer in the extension
         * user allocation.  By default with no extensions or no
         * extension callback handling, just the normal input buffer is
         * used then so it is efficient.
         */

        eff_buf.token = (char *)context->service_buffer;
drain:

        do {

            more = 0;

            m = lws_ext_callback_for_each_active(wsi,
                                                 LWS_EXT_CALLBACK_PACKET_RX_PREPARSE, &eff_buf, 0);
            if (m < 0)
                goto close_and_handled;
            if (m)
                more = 1;

            /* service incoming data */

            if (eff_buf.token_len) {
                n = libwebsocket_read(context, wsi,
                                      (unsigned char *)eff_buf.token,
                                      eff_buf.token_len);
                if (n < 0) {
                    /* we closed wsi */
                    n = 0;
                    goto handled;
                }
            }

            eff_buf.token = NULL;
            eff_buf.token_len = 0;
        } while (more);

        pending = lws_ssl_pending(wsi);
        if (pending) {
            pending = pending > sizeof(context->service_buffer)?
                      sizeof(context->service_buffer):pending;
            goto read;
        }

        if (draining_flow && wsi->rxflow_buffer &&
                wsi->rxflow_pos == wsi->rxflow_len) {
            lwsl_info("flow buffer: drained\n");
            lws_free2(wsi->rxflow_buffer);
            /* having drained the rxflow buffer, can rearm POLLIN */
#ifdef LWS_NO_SERVER
            n =
#endif
                _libwebsocket_rx_flow_control(wsi); /* n ignored, needed for NO_SERVER case */
        }

        break;

    default:
#ifdef LWS_NO_CLIENT
        break;
#else
        n = lws_client_socket_service(context, wsi, pollfd);
        goto handled;
#endif
    }

    n = 0;
    goto handled;

close_and_handled:
    lwsl_debug("Close and handled\n");
    libwebsocket_close_and_free_session(context, wsi,
                                        LWS_CLOSE_STATUS_NOSTATUS);
    // pollfd points to something else after the close
    return 1;

handled:
    pollfd->revents = 0;
    return n;
}
Пример #21
0
/**
 * libwebsocket_context_destroy() - Destroy the websocket context
 * @context:	Websocket context
 *
 *	This function closes any active connections and then frees the
 *	context.  After calling this, any further use of the context is
 *	undefined.
 */
LWS_VISIBLE void
libwebsocket_context_destroy(struct libwebsocket_context *context)
{
	/* Note that this is used for freeing partially allocated structs as well
	 * so make sure you don't try to free something uninitialized */
	int n;
	struct libwebsocket_protocols *protocol = NULL;

	lwsl_notice("%s\n", __func__);

	if (!context)
		return;

#ifdef LWS_LATENCY
	if (context->worst_latency_info[0])
		lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
#endif

	for (n = 0; n < context->fds_count; n++) {
		struct libwebsocket *wsi =
					wsi_from_fd(context, context->fds[n].fd);
		if (!wsi)
			continue;
		libwebsocket_close_and_free_session(context,
			wsi, LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY /* no protocol close */);
		n--;
	}

	/*
	 * give all extensions a chance to clean up any per-context
	 * allocations they might have made
	 */
	// TODO: I am not sure, but are we never supposed to be able to run a server
	//       and client at the same time for a given context?
	//       Otherwise both of these callbacks should always be called!
	if (context->listen_port != CONTEXT_PORT_NO_LISTEN) {
		if (lws_ext_callback_for_each_extension_type(context, NULL,
				LWS_EXT_CALLBACK_SERVER_CONTEXT_DESTRUCT, NULL, 0) < 0) {
			lwsl_err("Got error from server extension callback on cleanup");
		}
	} else {
		if (lws_ext_callback_for_each_extension_type(context, NULL,
				LWS_EXT_CALLBACK_CLIENT_CONTEXT_DESTRUCT, NULL, 0) < 0) {
			lwsl_err("Got error from client extension callback on cleanup");
		}
	}

	/*
	 * inform all the protocols that they are done and will have no more
	 * callbacks
	 */
	protocol = context->protocols;
	if (protocol) {
		while (protocol->callback) {
			protocol->callback(context, NULL, LWS_CALLBACK_PROTOCOL_DESTROY,
					NULL, NULL, 0);
			protocol++;
		}
	}

	lws_plat_context_early_destroy(context);

	lws_ssl_context_destroy(context);

	if (context->fds)
		lws_free(context->fds);

	lws_plat_context_late_destroy(context);

	lws_free(context);
}
Пример #22
0
LWS_EXTERN int
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
	struct lws_context_per_thread *pt;
	WSANETWORKEVENTS networkevents;
	struct lws_pollfd *pfd;
	struct lws *wsi;
	unsigned int i;
	DWORD ev;
	int n;

	/* stay dead once we are dead */
	if (context == NULL || !context->vhost_list)
		return 1;

	pt = &context->pt[tsi];

	if (!pt->service_tid_detected) {
		struct lws _lws;

		memset(&_lws, 0, sizeof(_lws));
		_lws.context = context;

		pt->service_tid = context->vhost_list->
			protocols[0].callback(&_lws, LWS_CALLBACK_GET_THREAD_ID,
						  NULL, NULL, 0);
		pt->service_tid_detected = 1;
	}

	if (timeout_ms < 0) {
		if (lws_service_flag_pending(context, tsi)) {
			/* any socket with events to service? */
			for (n = 0; n < (int)pt->fds_count; n++) {
				int m;
				if (!pt->fds[n].revents)
					continue;

				m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
				if (m < 0)
					return -1;
				/* if something closed, retry this slot */
				if (m)
					n--;
			}
		}
		return 0;
	}

	if (context->event_loop_ops->run_pt)
		context->event_loop_ops->run_pt(context, tsi);

	for (i = 0; i < pt->fds_count; ++i) {
		pfd = &pt->fds[i];

		if (!(pfd->events & LWS_POLLOUT))
			continue;

		wsi = wsi_from_fd(context, pfd->fd);
		if (!wsi || wsi->listener)
			continue;
		if (wsi->sock_send_blocking)
			continue;
		pfd->revents = LWS_POLLOUT;
		n = lws_service_fd(context, pfd);
		if (n < 0)
			return -1;

		/*
		 * Force WSAWaitForMultipleEvents() to check events
		 * and then return immediately.
		 */
		timeout_ms = 0;

		/* if something closed, retry this slot */
		if (n)
			i--;
	}

	/*
	 * is there anybody with pending stuff that needs service forcing?
	 */
	if (!lws_service_adjust_timeout(context, 1, tsi)) {
		/* -1 timeout means just do forced service */
		_lws_plat_service_tsi(context, -1, pt->tid);
		/* still somebody left who wants forced service? */
		if (!lws_service_adjust_timeout(context, 1, pt->tid))
			/* yes... come back again quickly */
			timeout_ms = 0;
	}

	if (timeout_ms) {
		lws_usec_t t;

		lws_pt_lock(pt, __func__);
		/* don't stay in poll wait longer than next hr timeout */
		t =  __lws_hrtimer_service(pt);

		if ((lws_usec_t)timeout_ms * 1000 > t)
			timeout_ms = (int)(t / 1000);
		lws_pt_unlock(pt);
	}

	for (n = 0; n < (int)pt->fds_count; n++)
		WSAEventSelect(pt->fds[n].fd, pt->events,
		       FD_READ | (!!(pt->fds[n].events & LWS_POLLOUT) * FD_WRITE) |
		       FD_OOB | FD_ACCEPT |
		       FD_CONNECT | FD_CLOSE | FD_QOS |
		       FD_ROUTING_INTERFACE_CHANGE |
		       FD_ADDRESS_LIST_CHANGE);

	ev = WSAWaitForMultipleEvents(1, &pt->events, FALSE, timeout_ms, FALSE);
	if (ev == WSA_WAIT_EVENT_0) {
		unsigned int eIdx;

#if defined(LWS_WITH_TLS)
		if (pt->context->tls_ops &&
		    pt->context->tls_ops->fake_POLLIN_for_buffered)
			pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
#endif

		for (eIdx = 0; eIdx < pt->fds_count; ++eIdx) {
			unsigned int err;

			if (WSAEnumNetworkEvents(pt->fds[eIdx].fd, pt->events,
					&networkevents) == SOCKET_ERROR) {
				lwsl_err("WSAEnumNetworkEvents() failed "
					 "with error %d\n", LWS_ERRNO);
				return -1;
			}

			if (!networkevents.lNetworkEvents)
				networkevents.lNetworkEvents = LWS_POLLOUT;

			pfd = &pt->fds[eIdx];
			pfd->revents = (short)networkevents.lNetworkEvents;

			err = networkevents.iErrorCode[FD_CONNECT_BIT];

			if ((networkevents.lNetworkEvents & FD_CONNECT) &&
			     err && err != LWS_EALREADY &&
			     err != LWS_EINPROGRESS && err != LWS_EWOULDBLOCK &&
			     err != WSAEINVAL) {
				lwsl_debug("Unable to connect errno=%d\n", err);
				pfd->revents |= LWS_POLLHUP;
			}

			if (pfd->revents & LWS_POLLOUT) {
				wsi = wsi_from_fd(context, pfd->fd);
				if (wsi)
					wsi->sock_send_blocking = 0;
			}
			 /* if something closed, retry this slot */
			if (pfd->revents & LWS_POLLHUP)
				--eIdx;

			if (pfd->revents) {
				recv(pfd->fd, NULL, 0, 0);
				lws_service_fd_tsi(context, pfd, tsi);
			}
		}
	} else if (ev == WSA_WAIT_TIMEOUT) {
		lws_service_fd(context, NULL);
	} else if (ev == WSA_WAIT_FAILED)
		return 0;

	return 0;
}
Пример #23
0
LWS_EXTERN int
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
	volatile struct lws_foreign_thread_pollfd *ftp, *next;
	volatile struct lws_context_per_thread *vpt;
	struct lws_context_per_thread *pt;
	int n = -1, m, c;

	/* stay dead once we are dead */

	if (!context || !context->vhost_list)
		return 1;

	pt = &context->pt[tsi];
	vpt = (volatile struct lws_context_per_thread *)pt;

	lws_stats_atomic_bump(context, pt, LWSSTATS_C_SERVICE_ENTRY, 1);

	if (timeout_ms < 0)
		goto faked_service;

	if (context->event_loop_ops->run_pt)
		context->event_loop_ops->run_pt(context, tsi);

	if (!pt->service_tid_detected) {
		struct lws _lws;

		memset(&_lws, 0, sizeof(_lws));
		_lws.context = context;

		pt->service_tid  =
			context->vhost_list->protocols[0].callback(
			&_lws, LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
		pt->service_tid_detected = 1;
	}

	/*
	 * is there anybody with pending stuff that needs service forcing?
	 */
	if (!lws_service_adjust_timeout(context, 1, tsi)) {
		/* -1 timeout means just do forced service */
		_lws_plat_service_tsi(context, -1, pt->tid);
		/* still somebody left who wants forced service? */
		if (!lws_service_adjust_timeout(context, 1, pt->tid))
			/* yes... come back again quickly */
			timeout_ms = 0;
	}

	if (timeout_ms) {
		lws_pt_lock(pt, __func__);
		/* don't stay in poll wait longer than next hr timeout */
		lws_usec_t t =  __lws_hrtimer_service(pt);
		if ((lws_usec_t)timeout_ms * 1000 > t)
			timeout_ms = t / 1000;
		lws_pt_unlock(pt);
	}

	vpt->inside_poll = 1;
	lws_memory_barrier();
	n = poll(pt->fds, pt->fds_count, timeout_ms);
	vpt->inside_poll = 0;
	lws_memory_barrier();

	/* Collision will be rare and brief.  Just spin until it completes */
	while (vpt->foreign_spinlock)
		;

	/*
	 * At this point we are not inside a foreign thread pollfd change,
	 * and we have marked ourselves as outside the poll() wait.  So we
	 * are the only guys that can modify the lws_foreign_thread_pollfd
	 * list on the pt.  Drain the list and apply the changes to the
	 * affected pollfds in the correct order.
	 */

	lws_pt_lock(pt, __func__);

	ftp = vpt->foreign_pfd_list;
	//lwsl_notice("cleared list %p\n", ftp);
	while (ftp) {
		struct lws *wsi;
		struct lws_pollfd *pfd;

		next = ftp->next;
		pfd = &vpt->fds[ftp->fd_index];
		if (lws_socket_is_valid(pfd->fd)) {
			wsi = wsi_from_fd(context, pfd->fd);
			if (wsi)
				__lws_change_pollfd(wsi, ftp->_and, ftp->_or);
		}
		lws_free((void *)ftp);
		ftp = next;
	}
	vpt->foreign_pfd_list = NULL;
	lws_memory_barrier();

	/* we have come out of a poll wait... check the hrtimer list */

	__lws_hrtimer_service(pt);

	lws_pt_unlock(pt);

	m = 0;
#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
	m |= !!pt->ws.rx_draining_ext_list;
#endif

	if (pt->context->tls_ops &&
	    pt->context->tls_ops->fake_POLLIN_for_buffered)
		m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);

	if (!m && !n) { /* nothing to do */
		lws_service_fd_tsi(context, NULL, tsi);
		lws_service_do_ripe_rxflow(pt);

		return 0;
	}

faked_service:
	m = lws_service_flag_pending(context, tsi);
	if (m)
		c = -1; /* unknown limit */
	else
		if (n < 0) {
			if (LWS_ERRNO != LWS_EINTR)
				return -1;
			return 0;
		} else
			c = n;

	/* any socket with events to service? */
	for (n = 0; n < (int)pt->fds_count && c; n++) {
		if (!pt->fds[n].revents)
			continue;

		c--;

		m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
		if (m < 0) {
			lwsl_err("%s: lws_service_fd_tsi returned %d\n",
				 __func__, m);
			return -1;
		}
		/* if something closed, retry this slot */
		if (m)
			n--;
	}

	lws_service_do_ripe_rxflow(pt);

	return 0;
}