void h2o_socketpool_cancel_connect(h2o_socketpool_connect_request_t *req) { if (req->getaddr_req != NULL) { h2o_hostinfo_getaddr_cancel(req->getaddr_req); req->getaddr_req = NULL; } if (req->sock != NULL) h2o_socket_close(req->sock); free(req); }
static void on_websocket_upgrade_complete(void *_info, h2o_socket_t *sock, size_t reqsize) { struct rp_ws_upgrade_info_t *info = _info; if (sock != NULL) { h2o_tunnel_establish(info->ctx, sock, info->upstream_sock, info->timeout); } else { h2o_socket_close(info->upstream_sock); } free(info); }
void on_accept_timeout(h2o_timeout_entry_t *entry) { /* TODO log */ struct st_h2o_accept_data_t *data = H2O_STRUCT_FROM_MEMBER(struct st_h2o_accept_data_t, timeout, entry); if (data->async_resumption_get_req != NULL) { h2o_memcached_cancel_get(async_resumption_context.memc, data->async_resumption_get_req); data->async_resumption_get_req = NULL; } h2o_socket_t *sock = data->sock; free_accept_data(data); h2o_socket_close(sock); }
static void on_websocket_upgrade_complete(void *_info, h2o_socket_t *sock, size_t reqsize) { struct rp_ws_upgrade_info_t *info = _info; if (sock != NULL) { h2o_buffer_consume(&sock->input, reqsize);//It is detached from conn. Let's trash unused data. h2o_tunnel_establish(info->ctx, sock, info->upstream_sock, info->timeout); } else { h2o_socket_close(info->upstream_sock); } free(info); }
void h2o_multithread_destroy_queue(h2o_multithread_queue_t *queue) { assert(h2o_linklist_is_empty(&queue->receivers.active)); assert(h2o_linklist_is_empty(&queue->receivers.inactive)); #if H2O_USE_LIBUV uv_close((uv_handle_t *)&queue->async, (void *)free); #else h2o_socket_read_stop(queue->async.read); h2o_socket_close(queue->async.read); close(queue->async.write); #endif pthread_mutex_destroy(&queue->mutex); }
static void on_connect(h2o_socket_t *sock, int status) { h2o_socketpool_connect_request_t *req = sock->data; const char *errstr = NULL; assert(req->sock == sock); if (status != 0) { h2o_socket_close(sock); req->sock = NULL; errstr = "connection failed"; } call_connect_cb(req, errstr); }
static void process_messages(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages) { IGNORE_FUNCTION_PARAMETER(messages); global_thread_data_t * const global_thread_data = H2O_STRUCT_FROM_MEMBER(global_thread_data_t, h2o_receiver, receiver); // Close the listening sockets immediately, so that if another instance of // the application is started before the current one exits (e.g. when doing // an update), it will accept all incoming connections. if (global_thread_data->ctx->event_loop.h2o_https_socket) { h2o_socket_read_stop(global_thread_data->ctx->event_loop.h2o_https_socket); h2o_socket_close(global_thread_data->ctx->event_loop.h2o_https_socket); global_thread_data->ctx->event_loop.h2o_https_socket = NULL; } if (global_thread_data->ctx->event_loop.h2o_socket) { h2o_socket_read_stop(global_thread_data->ctx->event_loop.h2o_socket); h2o_socket_close(global_thread_data->ctx->event_loop.h2o_socket); global_thread_data->ctx->event_loop.h2o_socket = NULL; } }
static void on_handshake_complete(h2o_socket_t *sock, const char *err) { h2o_socketpool_connect_request_t *req = sock->data; assert(req->sock == sock); if (err == h2o_socket_error_ssl_cert_name_mismatch && (SSL_CTX_get_verify_mode(req->pool->_ssl_ctx) & SSL_VERIFY_PEER) == 0) { /* ignore CN mismatch if we are not verifying peer */ } else if (err != NULL) { h2o_socket_close(sock); req->sock = NULL; } call_connect_cb(req, err); }
void h2o_multithread_destroy_queue(h2o_multithread_queue_t *queue) { assert(h2o_linklist_is_empty(&queue->receivers.active)); assert(h2o_linklist_is_empty(&queue->receivers.inactive)); pthread_mutex_destroy(&queue->mutex); #if H2O_USE_LIBUV uv_close((uv_handle_t *)&queue->async, libuv_destroy_delayed); #else h2o_socket_read_stop(queue->async.read); h2o_socket_close(queue->async.read); #ifndef __linux__ /* only one file descriptor is required for eventfd and already closed by h2o_socket_close() */ close(queue->async.write); #endif free(queue); #endif }
static void close_client(struct st_h2o_http1client_t *client) { if (client->sock != NULL) { if (client->super.connpool != NULL && client->_do_keepalive) { /* we do not send pipelined requests, and thus can trash all the received input at the end of the request */ h2o_buffer_consume(&client->sock->input, client->sock->input->size); h2o_socketpool_return(client->super.connpool->socketpool, client->sock); } else { h2o_socket_close(client->sock); } } if (h2o_timer_is_linked(&client->super._timeout)) h2o_timer_unlink(&client->super._timeout); if (client->_body_buf != NULL) h2o_buffer_dispose(&client->_body_buf); if (client->_body_buf_in_flight != NULL) h2o_buffer_dispose(&client->_body_buf_in_flight); free(client); }
int h2o_socket_export(h2o_socket_t *sock, h2o_socket_export_t *info) { static h2o_buffer_prototype_t nonpooling_prototype = {}; assert(!h2o_socket_is_writing(sock)); if (do_export(sock, info) == -1) return -1; if ((info->ssl = sock->ssl) != NULL) { sock->ssl = NULL; h2o_buffer_set_prototype(&info->ssl->input.encrypted, &nonpooling_prototype); } info->input = sock->input; h2o_buffer_set_prototype(&info->input, &nonpooling_prototype); h2o_buffer_init(&sock->input, &h2o_socket_buffer_prototype); h2o_socket_close(sock); return 0; }