Esempio n. 1
0
void h2o_multithread_send_message(h2o_multithread_receiver_t *receiver, h2o_multithread_message_t *message)
{
    int do_send = 0;

    pthread_mutex_lock(&receiver->queue->mutex);
    if (message != NULL) {
        assert(!h2o_linklist_is_linked(&message->link));
        if (h2o_linklist_is_empty(&receiver->_messages)) {
            h2o_linklist_unlink(&receiver->_link);
            h2o_linklist_insert(&receiver->queue->receivers.active, &receiver->_link);
            do_send = 1;
        }
        h2o_linklist_insert(&receiver->_messages, &message->link);
    } else {
        if (h2o_linklist_is_empty(&receiver->_messages))
            do_send = 1;
    }
    pthread_mutex_unlock(&receiver->queue->mutex);

    if (do_send) {
#if H2O_USE_LIBUV
        uv_async_send(&receiver->queue->async);
#else
        while (write(receiver->queue->async.write, "", 1) == -1 && errno == EINTR)
            ;
#endif
    }
}
Esempio n. 2
0
static h2o_http2_scheduler_queue_node_t *queue_pop(h2o_http2_scheduler_queue_t *queue)
{
    if (!h2o_linklist_is_empty(&queue->anchor257)) {
        h2o_http2_scheduler_queue_node_t *node =
            H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_queue_node_t, _link, queue->anchor257.next);
        h2o_linklist_unlink(&node->_link);
        return node;
    }

    while (queue->bits != 0) {
        int zeroes = __builtin_clzll(queue->bits);
        queue->bits <<= zeroes;
        queue->offset = (queue->offset + zeroes) % (sizeof(queue->anchors) / sizeof(queue->anchors[0]));
        if (!h2o_linklist_is_empty(queue->anchors + queue->offset)) {
            h2o_http2_scheduler_queue_node_t *node =
                H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_queue_node_t, _link, queue->anchors[queue->offset].next);
            h2o_linklist_unlink(&node->_link);
            if (h2o_linklist_is_empty(queue->anchors + queue->offset))
                queue->bits &= (1ULL << (sizeof(queue->bits) * 8 - 1)) - 1;
            return node;
        }
        queue->bits &= (1ULL << (sizeof(queue->bits) * 8 - 1)) - 1;
    }
    return NULL;
}
Esempio n. 3
0
void h2o_multithread_destroy_queue(h2o_multithread_queue_t *queue)
{
    assert(h2o_linklist_is_empty(&queue->receivers.active));
    assert(h2o_linklist_is_empty(&queue->receivers.inactive));
#if H2O_USE_LIBUV
    uv_close((uv_handle_t *)&queue->async, (void *)free);
#else
    h2o_socket_read_stop(queue->async.read);
    h2o_socket_close(queue->async.read);
    close(queue->async.write);
#endif
    pthread_mutex_destroy(&queue->mutex);
}
Esempio n. 4
0
static void on_pong(h2o_multithread_receiver_t *receiver, h2o_linklist_t *list)
{
    while (!h2o_linklist_is_empty(list)) {
        pop_empty_message(list);
        send_empty_message(&worker_thread.ping_receiver);
    }
}
Esempio n. 5
0
static h2o_memcached_req_t *pop_inflight(struct st_h2o_memcached_conn_t *conn, uint32_t serial)
{
    h2o_memcached_req_t *req;

    pthread_mutex_lock(&conn->mutex);

    if (conn->yrmcds.text_mode) {
        /* in text mode, responses are returned in order (and we may receive responses for commands other than GET) */
        if (!h2o_linklist_is_empty(&conn->inflight)) {
            req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, conn->inflight.next);
            assert(req->type == REQ_TYPE_GET);
            if (req->data.get.serial == serial)
                goto Found;
        }
    } else {
        /* in binary mode, responses are received out-of-order (and we would only recieve responses for GET) */
        h2o_linklist_t *node;
        for (node = conn->inflight.next; node != &conn->inflight; node = node->next) {
            req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, node);
            assert(req->type == REQ_TYPE_GET);
            if (req->data.get.serial == serial)
                goto Found;
        }
    }

    /* not found */
    pthread_mutex_unlock(&conn->mutex);
    return NULL;

Found:
    h2o_linklist_unlink(&req->inflight);
    pthread_mutex_unlock(&conn->mutex);
    return req;
}
Esempio n. 6
0
void h2o_socketpool_dispose(h2o_socketpool_t *pool)
{
    size_t i;

    pthread_mutex_lock(&pool->_shared.mutex);
    while (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
        struct pool_entry_t *entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, all_link, pool->_shared.sockets.next);
        destroy_attached(entry);
        __sync_sub_and_fetch(&pool->_shared.count, 1);
    }
    pthread_mutex_unlock(&pool->_shared.mutex);
    pthread_mutex_destroy(&pool->_shared.mutex);

    if (pool->balancer != NULL) {
        pool->balancer->callbacks->destroy(pool->balancer);
    }

    if (pool->_ssl_ctx != NULL)
        SSL_CTX_free(pool->_ssl_ctx);

    if (pool->_interval_cb.loop != NULL)
        h2o_socketpool_unregister_loop(pool, pool->_interval_cb.loop);

    for (i = 0; i < pool->targets.size; i++) {
        h2o_socketpool_destroy_target(pool->targets.entries[i]);
    }
    free(pool->targets.entries);
}
Esempio n. 7
0
static void purge(h2o_cache_t *cache, uint64_t now)
{
    /* by cache size */
    while (cache->capacity < cache->size) {
        h2o_cache_ref_t *last;
        assert(!h2o_linklist_is_empty(&cache->lru));
        last = H2O_STRUCT_FROM_MEMBER(h2o_cache_ref_t, _lru_link, cache->lru.next);
        erase_ref(cache, kh_get(cache, cache->table, last), 0);
    }
    /* by TTL */
    while (!h2o_linklist_is_empty(&cache->age)) {
        h2o_cache_ref_t *oldest = H2O_STRUCT_FROM_MEMBER(h2o_cache_ref_t, _age_link, cache->age.next);
        if (get_timeleft(cache, oldest, now) >= 0)
            break;
        erase_ref(cache, kh_get(cache, cache->table, oldest), 0);
    }
}
Esempio n. 8
0
void h2o_filecache_destroy(h2o_filecache_t *cache)
{
    h2o_filecache_clear(cache);
    assert(kh_size(cache->hash) == 0);
    assert(h2o_linklist_is_empty(&cache->lru));
    kh_destroy(opencache_set, cache->hash);
    free(cache);
}
Esempio n. 9
0
void h2o_multithread_unregister_receiver(h2o_multithread_queue_t *queue, h2o_multithread_receiver_t *receiver)
{
    assert(queue == receiver->queue);
    assert(h2o_linklist_is_empty(&receiver->_messages));
    pthread_mutex_lock(&queue->mutex);
    h2o_linklist_unlink(&receiver->_link);
    pthread_mutex_unlock(&queue->mutex);
}
Esempio n. 10
0
void h2o_multithread_destroy_queue(h2o_multithread_queue_t *queue)
{
    assert(h2o_linklist_is_empty(&queue->receivers.active));
    assert(h2o_linklist_is_empty(&queue->receivers.inactive));
    pthread_mutex_destroy(&queue->mutex);

#if H2O_USE_LIBUV
    uv_close((uv_handle_t *)&queue->async, libuv_destroy_delayed);
#else
    h2o_socket_read_stop(queue->async.read);
    h2o_socket_close(queue->async.read);
#ifndef __linux__
    /* only one file descriptor is required for eventfd and already closed by h2o_socket_close() */
    close(queue->async.write);
#endif
    free(queue);
#endif
}
Esempio n. 11
0
static void update_idle_timeout(h2o_http2_conn_t *conn)
{
    h2o_timeout_unlink(&conn->_timeout_entry);

    if (conn->num_streams.responding == 0) {
        assert(h2o_linklist_is_empty(&conn->_pending_reqs));
        conn->_timeout_entry.cb = on_idle_timeout;
        h2o_timeout_link(conn->super.ctx->loop, &conn->super.ctx->http2.idle_timeout, &conn->_timeout_entry);
    }
}
Esempio n. 12
0
void h2o_configurator__dispose_configurators(h2o_globalconf_t *conf)
{
    while (!h2o_linklist_is_empty(&conf->configurators)) {
        h2o_configurator_t *c = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, conf->configurators.next);
        h2o_linklist_unlink(&c->_link);
        if (c->dispose != NULL)
            c->dispose(c);
        destroy_configurator(c);
    }
}
Esempio n. 13
0
static void *writer_main(void *_conn)
{
    struct st_h2o_memcached_conn_t *conn = _conn;
    yrmcds_error err;

    pthread_mutex_lock(&conn->ctx->mutex);

    while (!__sync_add_and_fetch(&conn->writer_exit_requested, 0)) {
        while (!h2o_linklist_is_empty(&conn->ctx->pending)) {
            h2o_memcached_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, pending, conn->ctx->pending.next);
            h2o_linklist_unlink(&req->pending);
            pthread_mutex_unlock(&conn->ctx->mutex);

            switch (req->type) {
            case REQ_TYPE_GET:
                pthread_mutex_lock(&conn->mutex);
                h2o_linklist_insert(&conn->inflight, &req->inflight);
                pthread_mutex_unlock(&conn->mutex);
                if ((err = yrmcds_get(&conn->yrmcds, req->key.base, req->key.len, 0, &req->data.get.serial)) != YRMCDS_OK)
                    goto Error;
                break;
            case REQ_TYPE_SET:
                err = yrmcds_set(&conn->yrmcds, req->key.base, req->key.len, req->data.set.value.base, req->data.set.value.len, 0,
                                 req->data.set.expiration, 0, !conn->yrmcds.text_mode, NULL);
                discard_req(req);
                if (err != YRMCDS_OK)
                    goto Error;
                break;
            case REQ_TYPE_DELETE:
                err = yrmcds_remove(&conn->yrmcds, req->key.base, req->key.len, !conn->yrmcds.text_mode, NULL);
                discard_req(req);
                if (err != YRMCDS_OK)
                    goto Error;
                break;
            default:
                fprintf(stderr, "[lib/common/memcached.c] unknown type:%d\n", (int)req->type);
                err = YRMCDS_NOT_IMPLEMENTED;
                goto Error;
            }

            pthread_mutex_lock(&conn->ctx->mutex);
        }
        pthread_cond_wait(&conn->ctx->cond, &conn->ctx->mutex);
    }

    pthread_mutex_unlock(&conn->ctx->mutex);
    return NULL;

Error:
    fprintf(stderr, "[lib/common/memcached.c] failed to send request; %s\n", yrmcds_strerror(err));
    /* doc says the call can be used to interrupt yrmcds_recv */
    yrmcds_shutdown(&conn->yrmcds);

    return NULL;
}
Esempio n. 14
0
static void destroy_expired(h2o_socketpool_t *pool)
{
    /* caller should lock the mutex */
    uint64_t expire_before = h2o_now(pool->_interval_cb.loop) - pool->timeout;
    while (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
        struct pool_entry_t *entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
        if (entry->added_at > expire_before)
            break;
        destroy_attached(entry);
        __sync_sub_and_fetch(&pool->_shared.count, 1);
    }
}
Esempio n. 15
0
static void on_ping(h2o_multithread_receiver_t *receiver, h2o_linklist_t *list)
{
    while (!h2o_linklist_is_empty(list)) {
        pop_empty_message(list);
        if (++worker_thread.num_ping_received < 100) {
            send_empty_message(&main_thread.pong_receiver);
        } else {
            send_empty_message(&main_thread.shutdown_receiver);
            worker_thread.should_exit = 1;
        }
    }
}
Esempio n. 16
0
void h2o_http2_scheduler_close(h2o_http2_scheduler_openref_t *ref)
{
    assert(h2o_http2_scheduler_is_open(ref));

    /* move dependents to parent */
    if (!h2o_linklist_is_empty(&ref->node._all_refs)) {
        /* proportionally distribute the weight to the children (draft-16 5.3.4) */
        uint32_t total_weight = 0, factor;
        h2o_linklist_t *link;
        for (link = ref->node._all_refs.next; link != &ref->node._all_refs; link = link->next) {
            h2o_http2_scheduler_openref_t *child_ref = H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, link);
            total_weight += child_ref->weight;
        }
        assert(total_weight != 0);
        factor = ((uint32_t)ref->weight * 65536 + total_weight / 2) / total_weight;
        do {
            h2o_http2_scheduler_openref_t *child_ref =
                H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, ref->node._all_refs.next);
            uint16_t weight = (child_ref->weight * factor / 32768 + 1) / 2;
            if (weight < 1)
                weight = 1;
            else if (weight > 256)
                weight = 256;
            h2o_http2_scheduler_rebind(child_ref, ref->node._parent, weight, 0);
        } while (!h2o_linklist_is_empty(&ref->node._all_refs));
    }

    free(ref->node._queue);
    ref->node._queue = NULL;

    /* detach self */
    h2o_linklist_unlink(&ref->_all_link);
    if (ref->_self_is_active) {
        assert(ref->_active_cnt == 1);
        queue_unset(&ref->_queue_node);
        decr_active_cnt(ref->node._parent);
    } else {
        assert(ref->_active_cnt == 0);
    }
}
Esempio n. 17
0
static void convert_to_exclusive(h2o_http2_scheduler_node_t *parent, h2o_http2_scheduler_openref_t *added)
{
    while (!h2o_linklist_is_empty(&parent->_all_refs)) {
        h2o_http2_scheduler_openref_t *child_ref =
            H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, parent->_all_refs.next);
        if (child_ref == added) {
            /* precond: the added node should exist as the last item within parent */
            assert(parent->_all_refs.prev == &added->_all_link);
            break;
        }
        h2o_http2_scheduler_rebind(child_ref, &added->node, h2o_http2_scheduler_get_weight(child_ref), 0);
    }
}
Esempio n. 18
0
File: config.c Progetto: Gwill/h2o
void h2o_config_dispose(h2o_global_configuration_t *config)
{
    while (! h2o_linklist_is_empty(&config->virtual_hosts)) {
        h2o_host_configuration_t *host_config = H2O_STRUCT_FROM_MEMBER(h2o_host_configuration_t, _link, config->virtual_hosts.next);
        h2o_linklist_unlink(&host_config->_link);
        dispose_host_config(host_config);
        free(host_config);
    }
    dispose_host_config(&config->default_host);
    DESTROY_LIST(h2o_configurator_t, config->global_configurators);
    DESTROY_LIST(h2o_configurator_t, config->host_configurators);

}
Esempio n. 19
0
static void run_pending_requests(h2o_http2_conn_t *conn)
{
    while (!h2o_linklist_is_empty(&conn->_pending_reqs) && can_run_requests(conn)) {
        /* fetch and detach a pending stream */
        h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_pending_reqs.next);
        h2o_linklist_unlink(&stream->_refs.link);
        /* handle it */
        h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS);
        if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id)
            conn->pull_stream_ids.max_processed = stream->stream_id;
        h2o_process_request(&stream->req);
    }
}
Esempio n. 20
0
void h2o_cache_clear(h2o_cache_t *cache)
{
    lock_cache(cache);

    while (!h2o_linklist_is_empty(&cache->lru)) {
        h2o_cache_ref_t *ref = H2O_STRUCT_FROM_MEMBER(h2o_cache_ref_t, _lru_link, cache->lru.next);
        erase_ref(cache, kh_get(cache, cache->table, ref), 0);
    }
    assert(h2o_linklist_is_linked(&cache->age));
    assert(kh_size(cache->table) == 0);
    assert(cache->size == 0);

    unlock_cache(cache);
}
Esempio n. 21
0
void h2o_timeout_run(h2o_loop_t *loop, h2o_timeout_t *timeout, uint64_t now)
{
    uint64_t max_registered_at = now - timeout->timeout;

    while (!h2o_linklist_is_empty(&timeout->_entries)) {
        h2o_timeout_entry_t *entry = H2O_STRUCT_FROM_MEMBER(h2o_timeout_entry_t, _link, timeout->_entries.next);
        if (entry->registered_at > max_registered_at) {
            break;
        }
        h2o_linklist_unlink(&entry->_link);
        entry->registered_at = 0;
        entry->cb(entry);
        h2o_timeout__do_post_callback(loop);
    }
}
Esempio n. 22
0
void h2o_hostinfo_getaddr_receiver(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages)
{
    while (!h2o_linklist_is_empty(messages)) {
        h2o_hostinfo_getaddr_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_hostinfo_getaddr_req_t, _out.message.link, messages->next);
        h2o_linklist_unlink(&req->_out.message.link);
        h2o_hostinfo_getaddr_cb cb = req->_cb;
        if (cb != NULL) {
            req->_cb = NULL;
            cb(req, req->_out.errstr, req->_out.ai, req->cbdata);
        }
        if (req->_out.ai != NULL)
            freeaddrinfo(req->_out.ai);
        free(req);
    }
}
Esempio n. 23
0
static void queue_cb(h2o_multithread_queue_t *queue)
{
    pthread_mutex_lock(&queue->mutex);

    while (!h2o_linklist_is_empty(&queue->receivers.active)) {
        h2o_multithread_receiver_t *receiver =
            H2O_STRUCT_FROM_MEMBER(h2o_multithread_receiver_t, _link, queue->receivers.active.next);
        /* detach all the messages from the receiver */
        h2o_linklist_t messages;
        h2o_linklist_init_anchor(&messages);
        h2o_linklist_insert_list(&messages, &receiver->_messages);
        /* relink the receiver to the inactive list */
        h2o_linklist_unlink(&receiver->_link);
        h2o_linklist_insert(&queue->receivers.inactive, &receiver->_link);

        /* dispatch the messages */
        pthread_mutex_unlock(&queue->mutex);
        receiver->cb(receiver, &messages);
        assert(h2o_linklist_is_empty(&messages));
        pthread_mutex_lock(&queue->mutex);
    }

    pthread_mutex_unlock(&queue->mutex);
}
Esempio n. 24
0
void h2o_socketpool_connect(h2o_socketpool_connect_request_t **_req, h2o_socketpool_t *pool, h2o_loop_t *loop,
                            h2o_multithread_receiver_t *getaddr_receiver, h2o_socketpool_connect_cb cb, void *data)
{
    struct pool_entry_t *entry = NULL;

    if (_req != NULL)
        *_req = NULL;

    /* fetch an entry */
    pthread_mutex_lock(&pool->_shared.mutex);
    destroy_expired(pool);
    if (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
        entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
        h2o_linklist_unlink(&entry->link);
    }
Esempio n. 25
0
void h2o_socketpool_dispose(h2o_socketpool_t *pool)
{
    pthread_mutex_lock(&pool->_shared.mutex);
    while (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
        struct pool_entry_t *entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
        destroy_attached(entry);
        __sync_sub_and_fetch(&pool->_shared.count, 1);
    }
    pthread_mutex_unlock(&pool->_shared.mutex);
    pthread_mutex_destroy(&pool->_shared.mutex);

    if (pool->_interval_cb.loop != NULL) {
        h2o_timeout_unlink(&pool->_interval_cb.entry);
        h2o_timeout_dispose(pool->_interval_cb.loop, &pool->_interval_cb.timeout);
    }
    if (pool->peer.is_named)
        free(pool->peer.named.host.base);
}
Esempio n. 26
0
static void run_pending_requests(h2o_http2_conn_t *conn)
{
    conn->_is_dispatching_pending_reqs = 1;

    while (!h2o_linklist_is_empty(&conn->_pending_reqs) &&
           conn->num_streams.responding < conn->super.ctx->globalconf->http2.max_concurrent_requests_per_connection) {
        /* fetch and detach a pending stream */
        h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_pending_reqs.next);
        h2o_linklist_unlink(&stream->_refs.link);
        /* handle it */
        h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS);
        if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id)
            conn->pull_stream_ids.max_processed = stream->stream_id;
        h2o_process_request(&stream->req);
    }

    conn->_is_dispatching_pending_reqs = 0;
}
Esempio n. 27
0
void h2o_memcached_receiver(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages)
{
    while (!h2o_linklist_is_empty(messages)) {
        h2o_memcached_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, data.get.message.link, messages->next);
        h2o_linklist_unlink(&req->data.get.message.link);
        assert(req->type == REQ_TYPE_GET);
        if (req->data.get.cb != NULL) {
            if (req->data.get.value_is_encoded && req->data.get.value.len != 0) {
                h2o_iovec_t decoded = h2o_decode_base64url(NULL, req->data.get.value.base, req->data.get.value.len);
                h2o_mem_set_secure(req->data.get.value.base, 0, req->data.get.value.len);
                free(req->data.get.value.base);
                req->data.get.value = decoded;
            }
            req->data.get.cb(req->data.get.value, req->data.get.cb_data);
        }
        free_req(req);
    }
}
Esempio n. 28
0
uint64_t h2o_timeout_get_wake_at(h2o_linklist_t *timeouts)
{
    h2o_linklist_t *node;
    uint64_t wake_at = UINT64_MAX;

    /* change wake_at to the minimum value of the timeouts */
    for (node = timeouts->next; node != timeouts; node = node->next) {
        h2o_timeout_t *timeout = H2O_STRUCT_FROM_MEMBER(h2o_timeout_t, _link, node);
        if (!h2o_linklist_is_empty(&timeout->_entries)) {
            h2o_timeout_entry_t *entry = H2O_STRUCT_FROM_MEMBER(h2o_timeout_entry_t, _link, timeout->_entries.next);
            uint64_t entry_wake_at = entry->registered_at + timeout->timeout;
            if (entry_wake_at < wake_at)
                wake_at = entry_wake_at;
        }
    }

    return wake_at;
}
Esempio n. 29
0
static void *lookup_thread_main(void *_unused)
{
    pthread_mutex_lock(&queue.mutex);

    while (1) {
        --queue.num_threads_idle;
        while (!h2o_linklist_is_empty(&queue.pending)) {
            h2o_hostinfo_getaddr_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_hostinfo_getaddr_req_t, _pending, queue.pending.next);
            h2o_linklist_unlink(&req->_pending);
            pthread_mutex_unlock(&queue.mutex);
            lookup_and_respond(req);
            pthread_mutex_lock(&queue.mutex);
        }
        ++queue.num_threads_idle;
        pthread_cond_wait(&queue.cond, &queue.mutex);
    }

    h2o_fatal("unreachable");
    return NULL;
}
Esempio n. 30
0
void h2o_socketpool_dispose(h2o_socketpool_t *pool)
{
    pthread_mutex_lock(&pool->_shared.mutex);
    while (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
        struct pool_entry_t *entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
        destroy_attached(entry);
        __sync_sub_and_fetch(&pool->_shared.count, 1);
    }
    pthread_mutex_unlock(&pool->_shared.mutex);
    pthread_mutex_destroy(&pool->_shared.mutex);

    if (pool->_interval_cb.loop != NULL) {
        h2o_timeout_unlink(&pool->_interval_cb.entry);
        h2o_timeout_dispose(pool->_interval_cb.loop, &pool->_interval_cb.timeout);
    }
    free(pool->peer.host.base);
    switch (pool->type) {
    case H2O_SOCKETPOOL_TYPE_NAMED:
        free(pool->peer.named_serv.base);
        break;
    case H2O_SOCKETPOOL_TYPE_SOCKADDR:
        break;
    }
}