void h2o_http2_conn_unregister_stream(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { khiter_t iter = kh_get(h2o_http2_stream_t, conn->streams, stream->stream_id); assert(iter != kh_end(conn->streams)); kh_del(h2o_http2_stream_t, conn->streams, iter); assert(h2o_http2_scheduler_is_open(&stream->_refs.scheduler)); h2o_http2_scheduler_close(&stream->_refs.scheduler); switch (stream->state) { case H2O_HTTP2_STREAM_STATE_IDLE: case H2O_HTTP2_STREAM_STATE_RECV_HEADERS: case H2O_HTTP2_STREAM_STATE_RECV_BODY: assert(!h2o_linklist_is_linked(&stream->_refs.link)); break; case H2O_HTTP2_STREAM_STATE_REQ_PENDING: assert(h2o_linklist_is_linked(&stream->_refs.link)); h2o_linklist_unlink(&stream->_refs.link); break; case H2O_HTTP2_STREAM_STATE_SEND_HEADERS: case H2O_HTTP2_STREAM_STATE_SEND_BODY: case H2O_HTTP2_STREAM_STATE_END_STREAM: if (h2o_linklist_is_linked(&stream->_refs.link)) h2o_linklist_unlink(&stream->_refs.link); break; } if (stream->state != H2O_HTTP2_STREAM_STATE_END_STREAM) h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); if (conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING) { run_pending_requests(conn); update_idle_timeout(conn); } }
void h2o_cache_release(h2o_cache_t *cache, h2o_cache_ref_t *ref) { if (__sync_fetch_and_sub(&ref->_refcnt, 1) == 1) { assert(!h2o_linklist_is_linked(&ref->_lru_link)); assert(!h2o_linklist_is_linked(&ref->_age_link)); if (cache->destroy_cb != NULL) cache->destroy_cb(ref->value); free(ref->key.base); free(ref); } }
void h2o_multithread_send_message(h2o_multithread_receiver_t *receiver, h2o_multithread_message_t *message) { int do_send = 0; pthread_mutex_lock(&receiver->queue->mutex); if (message != NULL) { assert(!h2o_linklist_is_linked(&message->link)); if (h2o_linklist_is_empty(&receiver->_messages)) { h2o_linklist_unlink(&receiver->_link); h2o_linklist_insert(&receiver->queue->receivers.active, &receiver->_link); do_send = 1; } h2o_linklist_insert(&receiver->_messages, &message->link); } else { if (h2o_linklist_is_empty(&receiver->_messages)) do_send = 1; } pthread_mutex_unlock(&receiver->queue->mutex); if (do_send) { #if H2O_USE_LIBUV uv_async_send(&receiver->queue->async); #else while (write(receiver->queue->async.write, "", 1) == -1 && errno == EINTR) ; #endif } }
void h2o_timeout_unlink(h2o_timeout_entry_t *entry) { if (h2o_linklist_is_linked(&entry->_link)) { h2o_linklist_unlink(&entry->_link); entry->registered_at = 0; } }
void h2o_http2_stream_reset(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { switch (stream->state) { case H2O_HTTP2_STREAM_STATE_IDLE: case H2O_HTTP2_STREAM_STATE_RECV_HEADERS: case H2O_HTTP2_STREAM_STATE_RECV_BODY: case H2O_HTTP2_STREAM_STATE_REQ_PENDING: h2o_http2_stream_close(conn, stream); break; case H2O_HTTP2_STREAM_STATE_SEND_HEADERS: case H2O_HTTP2_STREAM_STATE_SEND_BODY: case H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL: h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); /* continues */ case H2O_HTTP2_STREAM_STATE_END_STREAM: /* clear all the queued bufs, and close the connection in the callback */ stream->_data.size = 0; if (h2o_linklist_is_linked(&stream->_refs.link)) { /* will be closed in the callback */ } else { h2o_http2_stream_close(conn, stream); } break; } }
void h2o_filecache_close_file(h2o_filecache_ref_t *ref) { if (--ref->_refcnt != 0) return; assert(!h2o_linklist_is_linked(&ref->_lru)); if (ref->fd != -1) { close(ref->fd); ref->fd = -1; } free(ref); }
static void free_req(h2o_memcached_req_t *req) { assert(!h2o_linklist_is_linked(&req->pending)); switch (req->type) { case REQ_TYPE_GET: assert(!h2o_linklist_is_linked(&req->data.get.message.link)); h2o_mem_set_secure(req->data.get.value.base, 0, req->data.get.value.len); free(req->data.get.value.base); break; case REQ_TYPE_SET: h2o_mem_set_secure(req->data.set.value.base, 0, req->data.set.value.len); free(req->data.set.value.base); break; case REQ_TYPE_DELETE: break; default: assert(!"FIXME"); break; } free(req); }
void h2o_cache_clear(h2o_cache_t *cache) { lock_cache(cache); while (!h2o_linklist_is_empty(&cache->lru)) { h2o_cache_ref_t *ref = H2O_STRUCT_FROM_MEMBER(h2o_cache_ref_t, _lru_link, cache->lru.next); erase_ref(cache, kh_get(cache, cache->table, ref), 0); } assert(h2o_linklist_is_linked(&cache->age)); assert(kh_size(cache->table) == 0); assert(cache->size == 0); unlock_cache(cache); }
void h2o_memcached_cancel_get(h2o_memcached_context_t *ctx, h2o_memcached_req_t *req) { int do_free = 0; pthread_mutex_lock(&ctx->mutex); req->data.get.cb = NULL; if (h2o_linklist_is_linked(&req->pending)) { h2o_linklist_unlink(&req->pending); do_free = 1; } pthread_mutex_unlock(&ctx->mutex); if (do_free) free_req(req); }
static void queue_set(h2o_http2_scheduler_queue_t *queue, h2o_http2_scheduler_queue_node_t *node, uint16_t weight) { /* holds 257 entries of offsets (multiplied by 65536) where nodes with weights between 1..257 should go into * each entry (expect for weight=256) is calculated as: round(N / weight), where N is adjusted so that the * value would become 63*65536 for weight=0. * weight=257 is used internally to send data before any of the streams being pulled, and therefore has the offset set to zero. */ static const unsigned OFFSET_TABLE[] = { 4128768, 2064384, 1376256, 1032192, 825754, 688128, 589824, 516096, 458752, 412877, 375343, 344064, 317598, 294912, 275251, 258048, 242869, 229376, 217304, 206438, 196608, 187671, 179512, 172032, 165151, 158799, 152917, 147456, 142371, 137626, 133186, 129024, 125114, 121434, 117965, 114688, 111588, 108652, 105866, 103219, 100702, 98304, 96018, 93836, 91750, 89756, 87846, 86016, 84261, 82575, 80956, 79399, 77901, 76459, 75069, 73728, 72435, 71186, 69979, 68813, 67685, 66593, 65536, 64512, 63520, 62557, 61623, 60717, 59837, 58982, 58152, 57344, 56558, 55794, 55050, 54326, 53620, 52933, 52263, 51610, 50972, 50351, 49744, 49152, 48574, 48009, 47457, 46918, 46391, 45875, 45371, 44878, 44395, 43923, 43461, 43008, 42565, 42130, 41705, 41288, 40879, 40478, 40085, 39700, 39322, 38951, 38587, 38229, 37879, 37534, 37196, 36864, 36538, 36217, 35902, 35593, 35289, 34990, 34696, 34406, 34122, 33842, 33567, 33297, 33030, 32768, 32510, 32256, 32006, 31760, 31517, 31279, 31043, 30812, 30583, 30359, 30137, 29919, 29703, 29491, 29282, 29076, 28873, 28672, 28474, 28279, 28087, 27897, 27710, 27525, 27343, 27163, 26985, 26810, 26637, 26466, 26298, 26131, 25967, 25805, 25645, 25486, 25330, 25175, 25023, 24872, 24723, 24576, 24431, 24287, 24145, 24004, 23866, 23729, 23593, 23459, 23326, 23195, 23066, 22938, 22811, 22686, 22562, 22439, 22318, 22198, 22079, 21962, 21845, 21730, 21617, 21504, 21393, 21282, 21173, 21065, 20958, 20852, 20748, 20644, 20541, 20439, 20339, 20239, 20140, 20043, 19946, 19850, 19755, 19661, 19568, 19475, 19384, 19293, 19204, 19115, 19027, 18939, 18853, 18767, 18682, 18598, 18515, 18432, 18350, 18269, 18188, 18109, 18030, 17951, 17873, 17796, 17720, 17644, 17569, 17495, 17421, 17348, 17275, 17203, 17132, 17061, 16991, 16921, 16852, 16784, 16716, 16648, 16581, 16515, 16449, 16384, 16319, 16255, 16191, 16128, 0}; assert(!h2o_linklist_is_linked(&node->_link)); if (weight > 256) { h2o_linklist_insert(&queue->anchor257, &node->_link); } else { assert(1 <= weight); size_t offset = OFFSET_TABLE[weight - 1] + node->_deficit; node->_deficit = offset % 65536; offset = offset / 65536; queue->bits |= 1ULL << (sizeof(queue->bits) * 8 - 1 - offset); h2o_linklist_insert(queue->anchors + (queue->offset + offset) % (sizeof(queue->anchors) / sizeof(queue->anchors[0])), &node->_link); } }
void h2o_hostinfo_getaddr_cancel(h2o_hostinfo_getaddr_req_t *req) { int should_free = 0; pthread_mutex_lock(&queue.mutex); if (h2o_linklist_is_linked(&req->_pending)) { h2o_linklist_unlink(&req->_pending); should_free = 1; } else { req->_cb = NULL; } pthread_mutex_unlock(&queue.mutex); if (should_free) free(req); }
void h2o_http2_stream_reset(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream, int errnum) { switch (stream->state) { case H2O_HTTP2_STREAM_STATE_RECV_PSUEDO_HEADERS: case H2O_HTTP2_STREAM_STATE_RECV_HEADERS: case H2O_HTTP2_STREAM_STATE_RECV_BODY: case H2O_HTTP2_STREAM_STATE_REQ_PENDING: h2o_http2_stream_close(conn, stream); break; case H2O_HTTP2_STREAM_STATE_SEND_HEADERS: case H2O_HTTP2_STREAM_STATE_SEND_BODY: case H2O_HTTP2_STREAM_STATE_END_STREAM: /* change the state to EOS, clear all the queued bufs, and close the connection in the callback */ stream->state = H2O_HTTP2_STREAM_STATE_END_STREAM; stream->_data.size = 0; if (h2o_linklist_is_linked(&stream->_link.link)) { /* will be closed in the callback */ } else { h2o_http2_stream_close(conn, stream); } break; } }
static void queue_unset(h2o_http2_scheduler_queue_node_t *node) { assert(h2o_linklist_is_linked(&node->_link)); h2o_linklist_unlink(&node->_link); }