Esempio n. 1
0
static h2o_memcached_req_t *pop_inflight(struct st_h2o_memcached_conn_t *conn, uint32_t serial)
{
    h2o_memcached_req_t *req;

    pthread_mutex_lock(&conn->mutex);

    if (conn->yrmcds.text_mode) {
        /* in text mode, responses are returned in order (and we may receive responses for commands other than GET) */
        if (!h2o_linklist_is_empty(&conn->inflight)) {
            req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, conn->inflight.next);
            assert(req->type == REQ_TYPE_GET);
            if (req->data.get.serial == serial)
                goto Found;
        }
    } else {
        /* in binary mode, responses are received out-of-order (and we would only recieve responses for GET) */
        h2o_linklist_t *node;
        for (node = conn->inflight.next; node != &conn->inflight; node = node->next) {
            req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, inflight, node);
            assert(req->type == REQ_TYPE_GET);
            if (req->data.get.serial == serial)
                goto Found;
        }
    }

    /* not found */
    pthread_mutex_unlock(&conn->mutex);
    return NULL;

Found:
    h2o_linklist_unlink(&req->inflight);
    pthread_mutex_unlock(&conn->mutex);
    return req;
}
Esempio n. 2
0
static h2o_http2_scheduler_queue_node_t *queue_pop(h2o_http2_scheduler_queue_t *queue)
{
    if (!h2o_linklist_is_empty(&queue->anchor257)) {
        h2o_http2_scheduler_queue_node_t *node =
            H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_queue_node_t, _link, queue->anchor257.next);
        h2o_linklist_unlink(&node->_link);
        return node;
    }

    while (queue->bits != 0) {
        int zeroes = __builtin_clzll(queue->bits);
        queue->bits <<= zeroes;
        queue->offset = (queue->offset + zeroes) % (sizeof(queue->anchors) / sizeof(queue->anchors[0]));
        if (!h2o_linklist_is_empty(queue->anchors + queue->offset)) {
            h2o_http2_scheduler_queue_node_t *node =
                H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_queue_node_t, _link, queue->anchors[queue->offset].next);
            h2o_linklist_unlink(&node->_link);
            if (h2o_linklist_is_empty(queue->anchors + queue->offset))
                queue->bits &= (1ULL << (sizeof(queue->bits) * 8 - 1)) - 1;
            return node;
        }
        queue->bits &= (1ULL << (sizeof(queue->bits) * 8 - 1)) - 1;
    }
    return NULL;
}
Esempio n. 3
0
h2o_filecache_ref_t *h2o_filecache_open_file(h2o_filecache_t *cache, const char *path, int oflag)
{
    khiter_t iter = kh_get(opencache_set, cache->hash, path);
    h2o_filecache_ref_t *ref;
    int dummy;

    /* lookup cache, and return the one if found */
    if (iter != kh_end(cache->hash)) {
        ref = H2O_STRUCT_FROM_MEMBER(h2o_filecache_ref_t, _path, kh_key(cache->hash, iter));
        ++ref->_refcnt;
        goto Exit;
    }

    /* create a new cache entry */
    ref = h2o_mem_alloc(offsetof(h2o_filecache_ref_t, _path) + strlen(path) + 1);
    ref->_refcnt = 1;
    ref->_lru = (h2o_linklist_t){NULL};
    strcpy(ref->_path, path);

    /* if cache is used, then... */
    if (cache->capacity != 0) {
        /* purge one entry from LRU if cache is full */
        if (kh_size(cache->hash) == cache->capacity) {
            h2o_filecache_ref_t *purge_ref = H2O_STRUCT_FROM_MEMBER(h2o_filecache_ref_t, _lru, cache->lru.prev);
            khiter_t purge_iter = kh_get(opencache_set, cache->hash, purge_ref->_path);
            assert(purge_iter != kh_end(cache->hash));
            release_from_cache(cache, purge_iter);
        }
        /* assign the new entry */
        ++ref->_refcnt;
        kh_put(opencache_set, cache->hash, ref->_path, &dummy);
        h2o_linklist_insert(cache->lru.next, &ref->_lru);
    }

    /* open the file, or memoize the error */
    if ((ref->fd = open(path, oflag)) != -1 && fstat(ref->fd, &ref->st) == 0) {
        ref->_last_modified.str[0] = '\0';
        ref->_etag.len = 0;
    } else {
        ref->open_err = errno;
        if (ref->fd != -1) {
            close(ref->fd);
            ref->fd = -1;
        }
    }

Exit:
    /* if the cache entry retains an error, return it instead of the reference */
    if (ref->fd == -1) {
        errno = ref->open_err;
        h2o_filecache_close_file(ref);
        ref = NULL;
    }
    return ref;
}
Esempio n. 4
0
static void graceful_shutdown_resend_goaway(h2o_timeout_entry_t *entry)
{
    h2o_context_t *ctx = H2O_STRUCT_FROM_MEMBER(h2o_context_t, http2._graceful_shutdown_timeout, entry);
    h2o_linklist_t *node;

    for (node = ctx->http2._conns.next; node != &ctx->http2._conns; node = node->next) {
        h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _conns, node);
        if (conn->state < H2O_HTTP2_CONN_STATE_HALF_CLOSED)
            enqueue_goaway(conn, H2O_HTTP2_ERROR_NONE, (h2o_iovec_t){});
    }
}
Esempio n. 5
0
static int compare_fortunes(const list_t *x, const list_t *y)
{
	const fortune_t * const f1 = H2O_STRUCT_FROM_MEMBER(fortune_t, l, x);
	const fortune_t * const f2 = H2O_STRUCT_FROM_MEMBER(fortune_t, l, y);
	const size_t sz = MIN(f1->message.len, f2->message.len);
	int ret = memcmp(f1->message.base, f2->message.base, sz);

	if (!ret)
		ret = f1->message.len < f2->message.len ? -1 : f1->message.len > f2->message.len;

	return ret;
}
Esempio n. 6
0
static void complete_fortunes(struct st_h2o_generator_t *self, h2o_req_t *req)
{
	fortune_ctx_t * const fortune_ctx = H2O_STRUCT_FROM_MEMBER(fortune_ctx_t, generator, self);
	iovec_list_t * const iovec_list = H2O_STRUCT_FROM_MEMBER(iovec_list_t,
	                                                         l,
	                                                         fortune_ctx->iovec_list);

	fortune_ctx->iovec_list = iovec_list->l.next;

	const h2o_send_state_t state = fortune_ctx->iovec_list ?
	                               H2O_SEND_STATE_IN_PROGRESS :
	                               H2O_SEND_STATE_FINAL;

	h2o_send(req, iovec_list->iov, iovec_list->iovcnt, state);
}
Esempio n. 7
0
static void on_idle_timeout(h2o_timeout_entry_t *entry)
{
    h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _timeout_entry, entry);

    enqueue_goaway(conn, H2O_HTTP2_ERROR_NONE, h2o_iovec_init(H2O_STRLIT("idle timeout")));
    close_connection(conn);
}
Esempio n. 8
0
static int proceed(h2o_http2_scheduler_node_t *node, h2o_http2_scheduler_run_cb cb, void *cb_arg)
{
Redo:
    if (node->_queue == NULL)
        return 0;

    h2o_http2_scheduler_queue_node_t *drr_node = queue_pop(node->_queue);
    if (drr_node == NULL)
        return 0;

    h2o_http2_scheduler_openref_t *ref = H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _queue_node, drr_node);

    if (!ref->_self_is_active) {
        /* run the children (manually-unrolled tail recursion) */
        queue_set(node->_queue, &ref->_queue_node, ref->weight);
        node = &ref->node;
        goto Redo;
    }
    assert(ref->_active_cnt != 0);

    /* call the callbacks */
    int still_is_active, bail_out = cb(ref, &still_is_active, cb_arg);
    if (still_is_active) {
        queue_set(node->_queue, &ref->_queue_node, ref->weight);
    } else {
        ref->_self_is_active = 0;
        if (--ref->_active_cnt != 0) {
            queue_set(node->_queue, &ref->_queue_node, ref->weight);
        } else if (ref->node._parent != NULL) {
            decr_active_cnt(ref->node._parent);
        }
    }

    return bail_out;
}
static void accept_connection(h2o_socket_t *listener, const char *err)
{
	if (err)
		ERROR(err);
	else {
		thread_context_t * const ctx = H2O_STRUCT_FROM_MEMBER(thread_context_t,
		                                                      event_loop,
		                                                      listener->data);

		if (!ctx->global_data->shutdown) {
			size_t accepted = ctx->config->max_accept;

			do {
				h2o_socket_t * const sock = h2o_evloop_socket_accept(listener);

				if (!sock)
					break;

				ctx->event_loop.conn_num++;
				sock->on_close.cb = on_close_connection;
				sock->on_close.data = &ctx->event_loop.conn_num;
				h2o_accept(&ctx->event_loop.h2o_accept_ctx, sock);
			} while (--accepted > 0);
		}
	}
}
Esempio n. 10
0
static void shutdown_server(h2o_socket_t *listener, const char *err)
{
	if (err)
		ERROR(err);
	else {
		thread_context_t * const ctx = H2O_STRUCT_FROM_MEMBER(thread_context_t,
		                                                      event_loop,
		                                                      listener->data);

		ctx->global_data->shutdown = true;

		// Close the listening sockets immediately, so that if another instance
		// of the application is started before the current one exits (e.g. when
		// doing an update), it will accept all incoming connections.
		if (ctx->event_loop.h2o_https_socket) {
			h2o_socket_read_stop(ctx->event_loop.h2o_https_socket);
			h2o_socket_close(ctx->event_loop.h2o_https_socket);
			ctx->event_loop.h2o_https_socket = NULL;
		}

		if (ctx->event_loop.h2o_socket) {
			h2o_socket_read_stop(ctx->event_loop.h2o_socket);
			h2o_socket_close(ctx->event_loop.h2o_socket);
			ctx->event_loop.h2o_socket = NULL;
		}

		for (size_t i = ctx->config->thread_num - 1; i > 0; i--)
			h2o_multithread_send_message(&ctx->global_thread_data[i].h2o_receiver, NULL);
	}
}
Esempio n. 11
0
static uintmax_t on_fortune_section(mustache_api_t *api,
                                    void *userdata,
                                    mustache_token_section_t *token)
{
	fortune_ctx_t * const fortune_ctx = userdata;
	uintmax_t ret = 1;

	if (fortune_ctx->num_result) {
		assert(fortune_ctx->result);

		const list_t *iter = fortune_ctx->result;

		do {
			fortune_ctx->fortune_iter = H2O_STRUCT_FROM_MEMBER(fortune_t, l, iter);
			iter = iter->next;

			if (!mustache_render(api, fortune_ctx, token->section)) {
				ret = 0;
				break;
			}
		} while (iter);
	}

	return ret;
}
Esempio n. 12
0
void finalostream_send(h2o_ostream_t *self, h2o_req_t *req, h2o_iovec_t *bufs, size_t bufcnt, int is_final)
{
    h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _ostr_final, self);
    h2o_http2_conn_t *conn = (h2o_http2_conn_t*)req->conn;

    assert(stream->_data.size == 0);

    /* send headers */
    switch (stream->state) {
    case H2O_HTTP2_STREAM_STATE_SEND_HEADERS:
        send_headers(conn, stream);
        /* fallthru */
    case H2O_HTTP2_STREAM_STATE_SEND_BODY:
        if (is_final)
            stream->state = H2O_HTTP2_STREAM_STATE_END_STREAM;
        break;
    case H2O_HTTP2_STREAM_STATE_END_STREAM:
        /* might get set by h2o_http2_stream_reset */
        return;
    default:
        assert(!"cannot be in a receiving state");
    }

    /* save the contents in queue */
    if (bufcnt != 0) {
        h2o_vector_reserve(&req->pool, (h2o_vector_t*)&stream->_data, sizeof(h2o_iovec_t), bufcnt);
        memcpy(stream->_data.entries, bufs, sizeof(h2o_iovec_t) * bufcnt);
        stream->_data.size = bufcnt;
    }

    h2o_http2_conn_register_for_proceed_callback(conn, stream);
}
Esempio n. 13
0
static void initiate_graceful_shutdown(h2o_context_t *ctx)
{
    /* draft-16 6.8
     * A server that is attempting to gracefully shut down a connection SHOULD send an initial GOAWAY frame with the last stream
     * identifier set to 231-1 and a NO_ERROR code. This signals to the client that a shutdown is imminent and that no further
     * requests can be initiated. After waiting at least one round trip time, the server can send another GOAWAY frame with an
     * updated last stream identifier. This ensures that a connection can be cleanly shut down without losing requests.
     */
    h2o_linklist_t *node;

    /* only doit once */
    if (ctx->http2._graceful_shutdown_timeout.cb != NULL)
        return;
    ctx->http2._graceful_shutdown_timeout.cb = graceful_shutdown_resend_goaway;

    for (node = ctx->http2._conns.next; node != &ctx->http2._conns; node = node->next) {
        h2o_http2_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http2_conn_t, _conns, node);
        if (conn->state < H2O_HTTP2_CONN_STATE_HALF_CLOSED) {
            h2o_http2_encode_goaway_frame(&conn->_write.buf, INT32_MAX, H2O_HTTP2_ERROR_NONE,
                                          (h2o_iovec_t){H2O_STRLIT("graceful shutdown")});
            h2o_http2_conn_request_write(conn);
        }
    }
    h2o_timeout_link(ctx->loop, &ctx->one_sec_timeout, &ctx->http2._graceful_shutdown_timeout);
}
Esempio n. 14
0
static void finalostream_start_pull(h2o_ostream_t *_self, h2o_ostream_pull_cb cb)
{
    h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http1_conn_t, _ostr_final.super, _self);
    const char *connection = conn->req.http1_is_persistent ? "keep-alive" : "close";
    size_t bufsz, headers_len;

    assert(conn->req._ostr_top == &conn->_ostr_final.super);
    assert(! conn->_ostr_final.sent_headers);

    /* register the pull callback */
    conn->_ostr_final.pull.cb = cb;

    /* setup the buffer */
    bufsz = flatten_headers_estimate_size(&conn->req, conn->super.ctx->globalconf->server_name.len + strlen(connection));
    if (bufsz < MAX_PULL_BUF_SZ) {
        if (MAX_PULL_BUF_SZ - bufsz < conn->req.res.content_length) {
            bufsz = MAX_PULL_BUF_SZ;
        } else {
            bufsz += conn->req.res.content_length;
        }
    }
    conn->_ostr_final.pull.buf = h2o_mem_alloc_pool(&conn->req.pool, bufsz);

    /* fill-in the header */
    headers_len = flatten_headers(conn->_ostr_final.pull.buf, &conn->req, connection);
    conn->_ostr_final.sent_headers = 1;

    proceed_pull(conn, headers_len);
}
Esempio n. 15
0
static void purge(h2o_cache_t *cache, uint64_t now)
{
    /* by cache size */
    while (cache->capacity < cache->size) {
        h2o_cache_ref_t *last;
        assert(!h2o_linklist_is_empty(&cache->lru));
        last = H2O_STRUCT_FROM_MEMBER(h2o_cache_ref_t, _lru_link, cache->lru.next);
        erase_ref(cache, kh_get(cache, cache->table, last), 0);
    }
    /* by TTL */
    while (!h2o_linklist_is_empty(&cache->age)) {
        h2o_cache_ref_t *oldest = H2O_STRUCT_FROM_MEMBER(h2o_cache_ref_t, _age_link, cache->age.next);
        if (get_timeleft(cache, oldest, now) >= 0)
            break;
        erase_ref(cache, kh_get(cache, cache->table, oldest), 0);
    }
}
Esempio n. 16
0
static void reqread_on_timeout(h2o_timeout_entry_t *entry)
{
    h2o_http1_conn_t *conn = H2O_STRUCT_FROM_MEMBER(h2o_http1_conn_t, _timeout_entry, entry);

    /* TODO log */
    conn->req.http1_is_persistent = 0;
    close_connection(conn);
}
Esempio n. 17
0
uint64_t h2o_timeout_get_wake_at(h2o_linklist_t *timeouts)
{
    h2o_linklist_t *node;
    uint64_t wake_at = UINT64_MAX;

    /* change wake_at to the minimum value of the timeouts */
    for (node = timeouts->next; node != timeouts; node = node->next) {
        h2o_timeout_t *timeout = H2O_STRUCT_FROM_MEMBER(h2o_timeout_t, _link, node);
        if (!h2o_linklist_is_empty(&timeout->_entries)) {
            h2o_timeout_entry_t *entry = H2O_STRUCT_FROM_MEMBER(h2o_timeout_entry_t, _link, timeout->_entries.next);
            uint64_t entry_wake_at = entry->registered_at + timeout->timeout;
            if (entry_wake_at < wake_at)
                wake_at = entry_wake_at;
        }
    }

    return wake_at;
}
Esempio n. 18
0
static void process_messages(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages)
{
	IGNORE_FUNCTION_PARAMETER(messages);

	thread_context_t * const ctx = H2O_STRUCT_FROM_MEMBER(thread_context_t,
	                                                      event_loop.h2o_receiver,
	                                                      receiver);

	h2o_socket_read_stop(ctx->event_loop.h2o_socket);
}
Esempio n. 19
0
void h2o_configurator__dispose_configurators(h2o_globalconf_t *conf)
{
    while (!h2o_linklist_is_empty(&conf->configurators)) {
        h2o_configurator_t *c = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, conf->configurators.next);
        h2o_linklist_unlink(&c->_link);
        if (c->dispose != NULL)
            c->dispose(c);
        destroy_configurator(c);
    }
}
Esempio n. 20
0
static void *writer_main(void *_conn)
{
    struct st_h2o_memcached_conn_t *conn = _conn;
    yrmcds_error err;

    pthread_mutex_lock(&conn->ctx->mutex);

    while (!__sync_add_and_fetch(&conn->writer_exit_requested, 0)) {
        while (!h2o_linklist_is_empty(&conn->ctx->pending)) {
            h2o_memcached_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_memcached_req_t, pending, conn->ctx->pending.next);
            h2o_linklist_unlink(&req->pending);
            pthread_mutex_unlock(&conn->ctx->mutex);

            switch (req->type) {
            case REQ_TYPE_GET:
                pthread_mutex_lock(&conn->mutex);
                h2o_linklist_insert(&conn->inflight, &req->inflight);
                pthread_mutex_unlock(&conn->mutex);
                if ((err = yrmcds_get(&conn->yrmcds, req->key.base, req->key.len, 0, &req->data.get.serial)) != YRMCDS_OK)
                    goto Error;
                break;
            case REQ_TYPE_SET:
                err = yrmcds_set(&conn->yrmcds, req->key.base, req->key.len, req->data.set.value.base, req->data.set.value.len, 0,
                                 req->data.set.expiration, 0, !conn->yrmcds.text_mode, NULL);
                discard_req(req);
                if (err != YRMCDS_OK)
                    goto Error;
                break;
            case REQ_TYPE_DELETE:
                err = yrmcds_remove(&conn->yrmcds, req->key.base, req->key.len, !conn->yrmcds.text_mode, NULL);
                discard_req(req);
                if (err != YRMCDS_OK)
                    goto Error;
                break;
            default:
                fprintf(stderr, "[lib/common/memcached.c] unknown type:%d\n", (int)req->type);
                err = YRMCDS_NOT_IMPLEMENTED;
                goto Error;
            }

            pthread_mutex_lock(&conn->ctx->mutex);
        }
        pthread_cond_wait(&conn->ctx->cond, &conn->ctx->mutex);
    }

    pthread_mutex_unlock(&conn->ctx->mutex);
    return NULL;

Error:
    fprintf(stderr, "[lib/common/memcached.c] failed to send request; %s\n", yrmcds_strerror(err));
    /* doc says the call can be used to interrupt yrmcds_recv */
    yrmcds_shutdown(&conn->yrmcds);

    return NULL;
}
Esempio n. 21
0
File: config.c Progetto: Gwill/h2o
static int complete_configurators(h2o_linklist_t *configurators, void *config)
{
    h2o_linklist_t *node;
    for (node = configurators->next; node != configurators; node = node->next) {
        h2o_configurator_t *configurator = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, node);
        if (configurator->on_complete != NULL)
            if (configurator->on_complete(configurator, config) != 0)
                return -1;
    }
    return 0;
}
Esempio n. 22
0
static void accept_http_connection(h2o_socket_t *listener, const char *err)
{
	// Assume that err is most often NULL.
	thread_context_t * const ctx = H2O_STRUCT_FROM_MEMBER(thread_context_t,
	                                                      event_loop,
	                                                      listener->data);
	SSL_CTX * const ssl_ctx = ctx->event_loop.h2o_accept_ctx.ssl_ctx;

	ctx->event_loop.h2o_accept_ctx.ssl_ctx = NULL;
	accept_connection(listener, err);
	ctx->event_loop.h2o_accept_ctx.ssl_ctx = ssl_ctx;
}
Esempio n. 23
0
File: config.c Progetto: Gwill/h2o
h2o_configurator_t *h2o_config_get_configurator(h2o_linklist_t *anchor, const char *cmd)
{
    h2o_linklist_t *node;

    for (node = anchor->next; node != anchor; node = node->next) {
        h2o_configurator_t *configurator = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, node);
        if (strcmp(configurator->cmd, cmd) == 0)
            return configurator;
    }

    return NULL;
}
Esempio n. 24
0
static inline void release_from_cache(h2o_filecache_t *cache, khiter_t iter)
{
    const char *path = kh_key(cache->hash, iter);
    h2o_filecache_ref_t *ref = H2O_STRUCT_FROM_MEMBER(h2o_filecache_ref_t, _path, path);

    /* detach from list */
    kh_del(opencache_set, cache->hash, iter);
    h2o_linklist_unlink(&ref->_lru);

    /* and close */
    h2o_filecache_close_file(ref);
}
Esempio n. 25
0
void h2o_http2_scheduler_close(h2o_http2_scheduler_openref_t *ref)
{
    assert(h2o_http2_scheduler_is_open(ref));

    /* move dependents to parent */
    if (!h2o_linklist_is_empty(&ref->node._all_refs)) {
        /* proportionally distribute the weight to the children (draft-16 5.3.4) */
        uint32_t total_weight = 0, factor;
        h2o_linklist_t *link;
        for (link = ref->node._all_refs.next; link != &ref->node._all_refs; link = link->next) {
            h2o_http2_scheduler_openref_t *child_ref = H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, link);
            total_weight += child_ref->weight;
        }
        assert(total_weight != 0);
        factor = ((uint32_t)ref->weight * 65536 + total_weight / 2) / total_weight;
        do {
            h2o_http2_scheduler_openref_t *child_ref =
                H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, ref->node._all_refs.next);
            uint16_t weight = (child_ref->weight * factor / 32768 + 1) / 2;
            if (weight < 1)
                weight = 1;
            else if (weight > 256)
                weight = 256;
            h2o_http2_scheduler_rebind(child_ref, ref->node._parent, weight, 0);
        } while (!h2o_linklist_is_empty(&ref->node._all_refs));
    }

    free(ref->node._queue);
    ref->node._queue = NULL;

    /* detach self */
    h2o_linklist_unlink(&ref->_all_link);
    if (ref->_self_is_active) {
        assert(ref->_active_cnt == 1);
        queue_unset(&ref->_queue_node);
        decr_active_cnt(ref->node._parent);
    } else {
        assert(ref->_active_cnt == 0);
    }
}
Esempio n. 26
0
static void run_pending_requests(h2o_http2_conn_t *conn)
{
    while (!h2o_linklist_is_empty(&conn->_pending_reqs) && can_run_requests(conn)) {
        /* fetch and detach a pending stream */
        h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_pending_reqs.next);
        h2o_linklist_unlink(&stream->_refs.link);
        /* handle it */
        h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS);
        if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id)
            conn->pull_stream_ids.max_processed = stream->stream_id;
        h2o_process_request(&stream->req);
    }
}
Esempio n. 27
0
File: config.c Progetto: Gwill/h2o
void h2o_config_dispose(h2o_global_configuration_t *config)
{
    while (! h2o_linklist_is_empty(&config->virtual_hosts)) {
        h2o_host_configuration_t *host_config = H2O_STRUCT_FROM_MEMBER(h2o_host_configuration_t, _link, config->virtual_hosts.next);
        h2o_linklist_unlink(&host_config->_link);
        dispose_host_config(host_config);
        free(host_config);
    }
    dispose_host_config(&config->default_host);
    DESTROY_LIST(h2o_configurator_t, config->global_configurators);
    DESTROY_LIST(h2o_configurator_t, config->host_configurators);

}
Esempio n. 28
0
File: config.c Progetto: Gwill/h2o
int h2o_config_on_context_create(h2o_global_configuration_t *config, h2o_context_t *ctx)
{
    h2o_linklist_t *node;

    for (node = config->global_configurators.next; node != &config->global_configurators; node = node->next) {
        h2o_configurator_t *configurator = H2O_STRUCT_FROM_MEMBER(h2o_configurator_t, _link, node);
        if (configurator->on_context_create != NULL)
            if (configurator->on_context_create(configurator, ctx) != 0)
                return -1;
    }

    return 0;
}
Esempio n. 29
0
static void convert_to_exclusive(h2o_http2_scheduler_node_t *parent, h2o_http2_scheduler_openref_t *added)
{
    while (!h2o_linklist_is_empty(&parent->_all_refs)) {
        h2o_http2_scheduler_openref_t *child_ref =
            H2O_STRUCT_FROM_MEMBER(h2o_http2_scheduler_openref_t, _all_link, parent->_all_refs.next);
        if (child_ref == added) {
            /* precond: the added node should exist as the last item within parent */
            assert(parent->_all_refs.prev == &added->_all_link);
            break;
        }
        h2o_http2_scheduler_rebind(child_ref, &added->node, h2o_http2_scheduler_get_weight(child_ref), 0);
    }
}
Esempio n. 30
0
void h2o_cache_clear(h2o_cache_t *cache)
{
    lock_cache(cache);

    while (!h2o_linklist_is_empty(&cache->lru)) {
        h2o_cache_ref_t *ref = H2O_STRUCT_FROM_MEMBER(h2o_cache_ref_t, _lru_link, cache->lru.next);
        erase_ref(cache, kh_get(cache, cache->table, ref), 0);
    }
    assert(h2o_linklist_is_linked(&cache->age));
    assert(kh_size(cache->table) == 0);
    assert(cache->size == 0);

    unlock_cache(cache);
}