static void erase_ref(h2o_cache_t *cache, khiter_t iter, int reuse) { h2o_cache_ref_t *ref = kh_key(cache->table, iter); if (!reuse) kh_del(cache, cache->table, iter); h2o_linklist_unlink(&ref->_lru_link); h2o_linklist_unlink(&ref->_age_link); cache->size -= ref->value.len; h2o_cache_release(cache, ref); }
void h2o_multithread_send_message(h2o_multithread_receiver_t *receiver, h2o_multithread_message_t *message) { int do_send = 0; pthread_mutex_lock(&receiver->queue->mutex); if (message != NULL) { assert(!h2o_linklist_is_linked(&message->link)); if (h2o_linklist_is_empty(&receiver->_messages)) { h2o_linklist_unlink(&receiver->_link); h2o_linklist_insert(&receiver->queue->receivers.active, &receiver->_link); do_send = 1; } h2o_linklist_insert(&receiver->_messages, &message->link); } else { if (h2o_linklist_is_empty(&receiver->_messages)) do_send = 1; } pthread_mutex_unlock(&receiver->queue->mutex); if (do_send) { #if H2O_USE_LIBUV uv_async_send(&receiver->queue->async); #else while (write(receiver->queue->async.write, "", 1) == -1 && errno == EINTR) ; #endif } }
void h2o_timeout_unlink(h2o_timeout_entry_t *entry) { if (h2o_linklist_is_linked(&entry->_link)) { h2o_linklist_unlink(&entry->_link); entry->registered_at = 0; } }
static h2o_memcached_req_t *pop_inflight(struct st_h2o_memcached_conn_t *conn, uint32_t serial) { h2o_memcached_req_t *req; pthread_mutex_lock(&conn->mutex); if (conn->yrmcds.text_mode) { /* in text mode, responses are returned in order (and we may receive responses for commands other than GET) */ if (!h2o_linklist_is_empty(&conn->inflight)) { req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, conn->inflight.next); assert(req->type == REQ_TYPE_GET); if (req->data.get.serial == serial) goto Found; } } else { /* in binary mode, responses are received out-of-order (and we would only recieve responses for GET) */ h2o_linklist_t *node; for (node = conn->inflight.next; node != &conn->inflight; node = node->next) { req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, node); assert(req->type == REQ_TYPE_GET); if (req->data.get.serial == serial) goto Found; } } /* not found */ pthread_mutex_unlock(&conn->mutex); return NULL; Found: h2o_linklist_unlink(&req->inflight); pthread_mutex_unlock(&conn->mutex); return req; }
void h2o_multithread_unregister_receiver(h2o_multithread_queue_t *queue, h2o_multithread_receiver_t *receiver) { assert(queue == receiver->queue); assert(h2o_linklist_is_empty(&receiver->_messages)); pthread_mutex_lock(&queue->mutex); h2o_linklist_unlink(&receiver->_link); pthread_mutex_unlock(&queue->mutex); }
static void close_connection(struct st_h2o_http1_conn_t *conn, int close_socket) { h2o_timeout_unlink(&conn->_timeout_entry); h2o_dispose_request(&conn->req); if (conn->sock != NULL && close_socket) h2o_socket_close(conn->sock); h2o_linklist_unlink(&conn->_conns); free(conn); }
void h2o_configurator__dispose_configurators(h2o_globalconf_t *conf) { while (!h2o_linklist_is_empty(&conf->configurators)) { h2o_configurator_t *c = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, conf->configurators.next); h2o_linklist_unlink(&c->_link); if (c->dispose != NULL) c->dispose(c); destroy_configurator(c); } }
static void *writer_main(void *_conn) { struct st_h2o_memcached_conn_t *conn = _conn; yrmcds_error err; pthread_mutex_lock(&conn->ctx->mutex); while (!__sync_add_and_fetch(&conn->writer_exit_requested, 0)) { while (!h2o_linklist_is_empty(&conn->ctx->pending)) { h2o_memcached_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, pending, conn->ctx->pending.next); h2o_linklist_unlink(&req->pending); pthread_mutex_unlock(&conn->ctx->mutex); switch (req->type) { case REQ_TYPE_GET: pthread_mutex_lock(&conn->mutex); h2o_linklist_insert(&conn->inflight, &req->inflight); pthread_mutex_unlock(&conn->mutex); if ((err = yrmcds_get(&conn->yrmcds, req->key.base, req->key.len, 0, &req->data.get.serial)) != YRMCDS_OK) goto Error; break; case REQ_TYPE_SET: err = yrmcds_set(&conn->yrmcds, req->key.base, req->key.len, req->data.set.value.base, req->data.set.value.len, 0, req->data.set.expiration, 0, !conn->yrmcds.text_mode, NULL); discard_req(req); if (err != YRMCDS_OK) goto Error; break; case REQ_TYPE_DELETE: err = yrmcds_remove(&conn->yrmcds, req->key.base, req->key.len, !conn->yrmcds.text_mode, NULL); discard_req(req); if (err != YRMCDS_OK) goto Error; break; default: fprintf(stderr, "[lib/common/memcached.c] unknown type:%d\n", (int)req->type); err = YRMCDS_NOT_IMPLEMENTED; goto Error; } pthread_mutex_lock(&conn->ctx->mutex); } pthread_cond_wait(&conn->ctx->cond, &conn->ctx->mutex); } pthread_mutex_unlock(&conn->ctx->mutex); return NULL; Error: fprintf(stderr, "[lib/common/memcached.c] failed to send request; %s\n", yrmcds_strerror(err)); /* doc says the call can be used to interrupt yrmcds_recv */ yrmcds_shutdown(&conn->yrmcds); return NULL; }
static inline void release_from_cache(h2o_filecache_t *cache, khiter_t iter) { const char *path = kh_key(cache->hash, iter); h2o_filecache_ref_t *ref = H2O_STRUCT_FROM_MEMBER(h2o_filecache_ref_t, _path, path); /* detach from list */ kh_del(opencache_set, cache->hash, iter); h2o_linklist_unlink(&ref->_lru); /* and close */ h2o_filecache_close_file(ref); }
static void run_pending_requests(h2o_http2_conn_t *conn) { while (!h2o_linklist_is_empty(&conn->_pending_reqs) && can_run_requests(conn)) { /* fetch and detach a pending stream */ h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_pending_reqs.next); h2o_linklist_unlink(&stream->_refs.link); /* handle it */ h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS); if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id) conn->pull_stream_ids.max_processed = stream->stream_id; h2o_process_request(&stream->req); } }
void h2o_config_dispose(h2o_global_configuration_t *config) { while (! h2o_linklist_is_empty(&config->virtual_hosts)) { h2o_host_configuration_t *host_config = H2O_STRUCT_FROM_MEMBER(h2o_host_configuration_t, _link, config->virtual_hosts.next); h2o_linklist_unlink(&host_config->_link); dispose_host_config(host_config); free(host_config); } dispose_host_config(&config->default_host); DESTROY_LIST(h2o_configurator_t, config->global_configurators); DESTROY_LIST(h2o_configurator_t, config->host_configurators); }
void h2o_socketpool_connect(h2o_socketpool_connect_request_t **_req, h2o_socketpool_t *pool, h2o_loop_t *loop, h2o_multithread_receiver_t *getaddr_receiver, h2o_socketpool_connect_cb cb, void *data) { struct pool_entry_t *entry = NULL; if (_req != NULL) *_req = NULL; /* fetch an entry */ pthread_mutex_lock(&pool->_shared.mutex); destroy_expired(pool); if (!h2o_linklist_is_empty(&pool->_shared.sockets)) { entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next); h2o_linklist_unlink(&entry->link); }
void h2o_timeout_run(h2o_loop_t *loop, h2o_timeout_t *timeout, uint64_t now) { uint64_t max_registered_at = now - timeout->timeout; while (!h2o_linklist_is_empty(&timeout->_entries)) { h2o_timeout_entry_t *entry = H2O_STRUCT_FROM_MEMBER(h2o_timeout_entry_t, _link, timeout->_entries.next); if (entry->registered_at > max_registered_at) { break; } h2o_linklist_unlink(&entry->_link); entry->registered_at = 0; entry->cb(entry); h2o_timeout__do_post_callback(loop); } }
void h2o_hostinfo_getaddr_receiver(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages) { while (!h2o_linklist_is_empty(messages)) { h2o_hostinfo_getaddr_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_hostinfo_getaddr_req_t, _out.message.link, messages->next); h2o_linklist_unlink(&req->_out.message.link); h2o_hostinfo_getaddr_cb cb = req->_cb; if (cb != NULL) { req->_cb = NULL; cb(req, req->_out.errstr, req->_out.ai, req->cbdata); } if (req->_out.ai != NULL) freeaddrinfo(req->_out.ai); free(req); } }
void h2o_memcached_cancel_get(h2o_memcached_context_t *ctx, h2o_memcached_req_t *req) { int do_free = 0; pthread_mutex_lock(&ctx->mutex); req->data.get.cb = NULL; if (h2o_linklist_is_linked(&req->pending)) { h2o_linklist_unlink(&req->pending); do_free = 1; } pthread_mutex_unlock(&ctx->mutex); if (do_free) free_req(req); }
static void run_pending_requests(h2o_http2_conn_t *conn) { conn->_is_dispatching_pending_reqs = 1; while (!h2o_linklist_is_empty(&conn->_pending_reqs) && conn->num_streams.responding < conn->super.ctx->globalconf->http2.max_concurrent_requests_per_connection) { /* fetch and detach a pending stream */ h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_pending_reqs.next); h2o_linklist_unlink(&stream->_refs.link); /* handle it */ h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS); if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id) conn->pull_stream_ids.max_processed = stream->stream_id; h2o_process_request(&stream->req); } conn->_is_dispatching_pending_reqs = 0; }
static void do_rebind(h2o_http2_scheduler_openref_t *ref, h2o_http2_scheduler_node_t *new_parent, int exclusive) { /* rebind _all_link */ h2o_linklist_unlink(&ref->_all_link); h2o_linklist_insert(&new_parent->_all_refs, &ref->_all_link); /* rebind to WRR (as well as adjust active_cnt) */ if (ref->_active_cnt != 0) { queue_unset(&ref->_queue_node); queue_set(get_queue(new_parent), &ref->_queue_node, ref->weight); decr_active_cnt(ref->node._parent); incr_active_cnt(new_parent); } /* update the backlinks */ ref->node._parent = new_parent; if (exclusive) convert_to_exclusive(new_parent, ref); }
void h2o_memcached_receiver(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages) { while (!h2o_linklist_is_empty(messages)) { h2o_memcached_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, data.get.message.link, messages->next); h2o_linklist_unlink(&req->data.get.message.link); assert(req->type == REQ_TYPE_GET); if (req->data.get.cb != NULL) { if (req->data.get.value_is_encoded && req->data.get.value.len != 0) { h2o_iovec_t decoded = h2o_decode_base64url(NULL, req->data.get.value.base, req->data.get.value.len); h2o_mem_set_secure(req->data.get.value.base, 0, req->data.get.value.len); free(req->data.get.value.base); req->data.get.value = decoded; } req->data.get.cb(req->data.get.value, req->data.get.cb_data); } free_req(req); } }
void h2o_hostinfo_getaddr_cancel(h2o_hostinfo_getaddr_req_t *req) { int should_free = 0; pthread_mutex_lock(&queue.mutex); if (h2o_linklist_is_linked(&req->_pending)) { h2o_linklist_unlink(&req->_pending); should_free = 1; } else { req->_cb = NULL; } pthread_mutex_unlock(&queue.mutex); if (should_free) free(req); }
static void *lookup_thread_main(void *_unused) { pthread_mutex_lock(&queue.mutex); while (1) { --queue.num_threads_idle; while (!h2o_linklist_is_empty(&queue.pending)) { h2o_hostinfo_getaddr_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_hostinfo_getaddr_req_t, _pending, queue.pending.next); h2o_linklist_unlink(&req->_pending); pthread_mutex_unlock(&queue.mutex); lookup_and_respond(req); pthread_mutex_lock(&queue.mutex); } ++queue.num_threads_idle; pthread_cond_wait(&queue.cond, &queue.mutex); } h2o_fatal("unreachable"); return NULL; }
void h2o_http2_scheduler_close(h2o_http2_scheduler_openref_t *ref) { assert(h2o_http2_scheduler_is_open(ref)); /* move dependents to parent */ if (!h2o_linklist_is_empty(&ref->node._all_refs)) { /* proportionally distribute the weight to the children (draft-16 5.3.4) */ uint32_t total_weight = 0, factor; h2o_linklist_t *link; for (link = ref->node._all_refs.next; link != &ref->node._all_refs; link = link->next) { h2o_http2_scheduler_openref_t *child_ref = H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, link); total_weight += child_ref->weight; } assert(total_weight != 0); factor = ((uint32_t)ref->weight * 65536 + total_weight / 2) / total_weight; do { h2o_http2_scheduler_openref_t *child_ref = H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, ref->node._all_refs.next); uint16_t weight = (child_ref->weight * factor / 32768 + 1) / 2; if (weight < 1) weight = 1; else if (weight > 256) weight = 256; h2o_http2_scheduler_rebind(child_ref, ref->node._parent, weight, 0); } while (!h2o_linklist_is_empty(&ref->node._all_refs)); } free(ref->node._queue); ref->node._queue = NULL; /* detach self */ h2o_linklist_unlink(&ref->_all_link); if (ref->_self_is_active) { assert(ref->_active_cnt == 1); queue_unset(&ref->_queue_node); decr_active_cnt(ref->node._parent); } else { assert(ref->_active_cnt == 0); } }
h2o_cache_ref_t *h2o_cache_fetch(h2o_cache_t *cache, uint64_t now, h2o_iovec_t key, h2o_cache_hashcode_t keyhash) { h2o_cache_ref_t search_key, *ref; khiter_t iter; int64_t timeleft; if (keyhash == 0) keyhash = h2o_cache_calchash(key.base, key.len); search_key.key = key; search_key.keyhash = keyhash; lock_cache(cache); purge(cache, now); if ((iter = kh_get(cache, cache->table, &search_key)) == kh_end(cache->table)) goto NotFound; /* found */ ref = kh_key(cache->table, iter); timeleft = get_timeleft(cache, ref, now); if (timeleft < 0) goto NotFound; if ((cache->flags & H2O_CACHE_FLAG_EARLY_UPDATE) != 0 && timeleft < 10 && !ref->_requested_early_update) { ref->_requested_early_update = 1; goto NotFound; } /* move the entry to the top of LRU */ h2o_linklist_unlink(&ref->_lru_link); h2o_linklist_insert(&cache->lru, &ref->_lru_link); __sync_fetch_and_add(&ref->_refcnt, 1); /* unlock and return the found entry */ unlock_cache(cache); return ref; NotFound: unlock_cache(cache); return NULL; }
static h2o_memcached_req_t *pop_inflight(struct st_h2o_memcached_conn_t *conn, uint32_t serial) { h2o_memcached_req_t *req; h2o_linklist_t *node; pthread_mutex_lock(&conn->mutex); for (node = conn->inflight.next; node != &conn->inflight; node = node->next) { req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, node); assert(req->type == REQ_TYPE_GET); if (req->data.get.serial == serial) { h2o_linklist_unlink(&req->inflight); goto Found; } } req = NULL; Found: pthread_mutex_unlock(&conn->mutex); return req; }
static void queue_cb(h2o_multithread_queue_t *queue) { pthread_mutex_lock(&queue->mutex); while (!h2o_linklist_is_empty(&queue->receivers.active)) { h2o_multithread_receiver_t *receiver = H2O_STRUCT_FROM_MEMBER(h2o_multithread_receiver_t, _link, queue->receivers.active.next); /* detach all the messages from the receiver */ h2o_linklist_t messages; h2o_linklist_init_anchor(&messages); h2o_linklist_insert_list(&messages, &receiver->_messages); /* relink the receiver to the inactive list */ h2o_linklist_unlink(&receiver->_link); h2o_linklist_insert(&queue->receivers.inactive, &receiver->_link); /* dispatch the messages */ pthread_mutex_unlock(&queue->mutex); receiver->cb(receiver, &messages); assert(h2o_linklist_is_empty(&messages)); pthread_mutex_lock(&queue->mutex); } pthread_mutex_unlock(&queue->mutex); }
static void queue_unset(h2o_http2_scheduler_queue_node_t *node) { assert(h2o_linklist_is_linked(&node->_link)); h2o_linklist_unlink(&node->_link); }
static void destroy_attached(struct pool_entry_t *entry) { h2o_linklist_unlink(&entry->link); h2o_socket_dispose_export(&entry->sockinfo); free(entry); }
static void destroy_attached(struct pool_entry_t *entry) { h2o_linklist_unlink(&entry->link); destroy_detached(entry); }
static void pop_empty_message(h2o_linklist_t *list) { h2o_multithread_message_t *message = H2O_STRUCT_FROM_MEMBER(h2o_multithread_message_t, link, list->next); h2o_linklist_unlink(&message->link); free(message); }
void h2o_socketpool_connect(h2o_socketpool_connect_request_t **_req, h2o_socketpool_t *pool, h2o_loop_t *loop, h2o_multithread_receiver_t *getaddr_receiver, h2o_socketpool_connect_cb cb, void *data) { struct pool_entry_t *entry = NULL; if (_req != NULL) *_req = NULL; /* fetch an entry and return it */ pthread_mutex_lock(&pool->_shared.mutex); destroy_expired(pool); while (1) { if (h2o_linklist_is_empty(&pool->_shared.sockets)) break; entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next); h2o_linklist_unlink(&entry->link); pthread_mutex_unlock(&pool->_shared.mutex); /* test if the connection is still alive */ char buf[1]; ssize_t rret = recv(entry->sockinfo.fd, buf, 1, MSG_PEEK); if (rret == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) { /* yes! return it */ h2o_socket_t *sock = h2o_socket_import(loop, &entry->sockinfo); free(entry); sock->on_close.cb = on_close; sock->on_close.data = pool; cb(sock, NULL, data); return; } /* connection is dead, report, close, and retry */ if (rret <= 0) { static long counter = 0; if (__sync_fetch_and_add(&counter, 1) == 0) fprintf(stderr, "[WARN] detected close by upstream before the expected timeout (see issue #679)\n"); } else { static long counter = 0; if (__sync_fetch_and_add(&counter, 1) == 0) fprintf(stderr, "[WARN] unexpectedly received data to a pooled socket (see issue #679)\n"); } destroy_detached(entry); pthread_mutex_lock(&pool->_shared.mutex); } pthread_mutex_unlock(&pool->_shared.mutex); /* FIXME repsect `capacity` */ __sync_add_and_fetch(&pool->_shared.count, 1); /* prepare request object */ h2o_socketpool_connect_request_t *req = h2o_mem_alloc(sizeof(*req)); *req = (h2o_socketpool_connect_request_t){data, cb, pool, loop}; if (_req != NULL) *_req = req; switch (pool->type) { case H2O_SOCKETPOOL_TYPE_NAMED: /* resolve the name, and connect */ req->getaddr_req = h2o_hostinfo_getaddr(getaddr_receiver, pool->peer.host, pool->peer.named_serv, AF_UNSPEC, SOCK_STREAM, IPPROTO_TCP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, req); break; case H2O_SOCKETPOOL_TYPE_SOCKADDR: /* connect (using sockaddr_in) */ start_connect(req, (void *)&pool->peer.sockaddr.bytes, pool->peer.sockaddr.len); break; } }
static void reader_main(h2o_memcached_context_t *ctx) { struct st_h2o_memcached_conn_t conn = {ctx, {}, PTHREAD_MUTEX_INITIALIZER, {&conn.inflight, &conn.inflight}, 0}; pthread_t writer_thread; yrmcds_response resp; yrmcds_error err; /* connect to server and start the writer thread */ connect_to_server(conn.ctx, &conn.yrmcds); pthread_create(&writer_thread, NULL, writer_main, &conn); pthread_mutex_lock(&conn.ctx->mutex); ++conn.ctx->num_threads_connected; pthread_mutex_unlock(&conn.ctx->mutex); /* receive data until an error occurs */ while (1) { if ((err = yrmcds_recv(&conn.yrmcds, &resp)) != YRMCDS_OK) { fprintf(stderr, "[lib/common/memcached.c] yrmcds_recv:%s\n", yrmcds_strerror(err)); break; } h2o_memcached_req_t *req = pop_inflight(&conn, resp.serial); if (req == NULL) { fprintf(stderr, "[lib/common/memcached.c] received unexpected serial\n"); break; } if (resp.status == YRMCDS_STATUS_OK) { req->data.get.value = h2o_iovec_init(h2o_mem_alloc(resp.data_len), resp.data_len); memcpy(req->data.get.value.base, resp.data, resp.data_len); h2o_mem_set_secure((void *)resp.data, 0, resp.data_len); } h2o_multithread_send_message(req->data.get.receiver, &req->data.get.message); } /* send error to all the reqs in-flight */ pthread_mutex_lock(&conn.mutex); while (!h2o_linklist_is_empty(&conn.inflight)) { h2o_memcached_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, conn.inflight.next); h2o_linklist_unlink(&req->inflight); assert(req->type == REQ_TYPE_GET); h2o_multithread_send_message(req->data.get.receiver, &req->data.get.message); } pthread_mutex_unlock(&conn.mutex); /* stop the writer thread */ __sync_add_and_fetch(&conn.writer_exit_requested, 1); pthread_mutex_lock(&conn.ctx->mutex); pthread_cond_broadcast(&conn.ctx->cond); pthread_mutex_unlock(&conn.ctx->mutex); pthread_join(writer_thread, NULL); /* decrement num_threads_connected, and discard all the pending requests if no connections are alive */ pthread_mutex_lock(&conn.ctx->mutex); if (--conn.ctx->num_threads_connected == 0) { while (!h2o_linklist_is_empty(&conn.ctx->pending)) { h2o_memcached_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, pending, conn.ctx->pending.next); h2o_linklist_unlink(&req->pending); discard_req(req); } } pthread_mutex_unlock(&conn.ctx->mutex); /* close the connection */ yrmcds_close(&conn.yrmcds); }