void h2o_http2_stream_send_pending_data(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { if (h2o_http2_window_get_window(&stream->output_window) <= 0) return; if (stream->_pull_cb != NULL) { h2o_send_state_t send_state; /* pull mode */ assert(stream->state != H2O_HTTP2_STREAM_STATE_END_STREAM); send_state = send_data_pull(conn, stream); if (send_state != H2O_SEND_STATE_IN_PROGRESS) { /* sent all data */ stream->_data.size = 0; h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); } } else { /* push mode */ h2o_iovec_t *nextbuf = send_data_push(conn, stream, stream->_data.entries, stream->_data.size, stream->send_state); if (nextbuf == stream->_data.entries + stream->_data.size) { /* sent all data */ stream->_data.size = 0; if (stream->state == H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL) h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); } else if (nextbuf != stream->_data.entries) { /* adjust the buffer */ size_t newsize = stream->_data.size - (nextbuf - stream->_data.entries); memmove(stream->_data.entries, nextbuf, sizeof(h2o_iovec_t) * newsize); stream->_data.size = newsize; } } }
void h2o_http2_conn_unregister_stream(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { khiter_t iter = kh_get(h2o_http2_stream_t, conn->streams, stream->stream_id); assert(iter != kh_end(conn->streams)); kh_del(h2o_http2_stream_t, conn->streams, iter); assert(h2o_http2_scheduler_is_open(&stream->_refs.scheduler)); h2o_http2_scheduler_close(&stream->_refs.scheduler); switch (stream->state) { case H2O_HTTP2_STREAM_STATE_IDLE: case H2O_HTTP2_STREAM_STATE_RECV_HEADERS: case H2O_HTTP2_STREAM_STATE_RECV_BODY: assert(!h2o_linklist_is_linked(&stream->_refs.link)); break; case H2O_HTTP2_STREAM_STATE_REQ_PENDING: assert(h2o_linklist_is_linked(&stream->_refs.link)); h2o_linklist_unlink(&stream->_refs.link); break; case H2O_HTTP2_STREAM_STATE_SEND_HEADERS: case H2O_HTTP2_STREAM_STATE_SEND_BODY: case H2O_HTTP2_STREAM_STATE_END_STREAM: if (h2o_linklist_is_linked(&stream->_refs.link)) h2o_linklist_unlink(&stream->_refs.link); break; } if (stream->state != H2O_HTTP2_STREAM_STATE_END_STREAM) h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); if (conn->state < H2O_HTTP2_CONN_STATE_IS_CLOSING) { run_pending_requests(conn); update_idle_timeout(conn); } }
void h2o_http2_stream_reset(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { switch (stream->state) { case H2O_HTTP2_STREAM_STATE_IDLE: case H2O_HTTP2_STREAM_STATE_RECV_HEADERS: case H2O_HTTP2_STREAM_STATE_RECV_BODY: case H2O_HTTP2_STREAM_STATE_REQ_PENDING: h2o_http2_stream_close(conn, stream); break; case H2O_HTTP2_STREAM_STATE_SEND_HEADERS: case H2O_HTTP2_STREAM_STATE_SEND_BODY: case H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL: h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); /* continues */ case H2O_HTTP2_STREAM_STATE_END_STREAM: /* clear all the queued bufs, and close the connection in the callback */ stream->_data.size = 0; if (h2o_linklist_is_linked(&stream->_refs.link)) { /* will be closed in the callback */ } else { h2o_http2_stream_close(conn, stream); } break; } }
void finalostream_send(h2o_ostream_t *self, h2o_req_t *req, h2o_iovec_t *bufs, size_t bufcnt, int is_final) { h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _ostr_final, self); h2o_http2_conn_t *conn = (h2o_http2_conn_t *)req->conn; assert(stream->_data.size == 0); /* send headers */ switch (stream->state) { case H2O_HTTP2_STREAM_STATE_SEND_HEADERS: if (send_headers(conn, stream) != 0) return; /* fallthru */ case H2O_HTTP2_STREAM_STATE_SEND_BODY: if (is_final) h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_BODY_IS_FINAL); break; case H2O_HTTP2_STREAM_STATE_END_STREAM: /* might get set by h2o_http2_stream_reset */ return; default: assert(!"cannot be in a receiving state"); } /* save the contents in queue */ if (bufcnt != 0) { h2o_vector_reserve(&req->pool, (h2o_vector_t *)&stream->_data, sizeof(h2o_iovec_t), bufcnt); memcpy(stream->_data.entries, bufs, sizeof(h2o_iovec_t) * bufcnt); stream->_data.size = bufcnt; } h2o_http2_conn_register_for_proceed_callback(conn, stream); }
static void run_pending_requests(h2o_http2_conn_t *conn) { while (!h2o_linklist_is_empty(&conn->_pending_reqs) && can_run_requests(conn)) { /* fetch and detach a pending stream */ h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_pending_reqs.next); h2o_linklist_unlink(&stream->_refs.link); /* handle it */ h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS); if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id) conn->pull_stream_ids.max_processed = stream->stream_id; h2o_process_request(&stream->req); } }
static void run_pending_requests(h2o_http2_conn_t *conn) { conn->_is_dispatching_pending_reqs = 1; while (!h2o_linklist_is_empty(&conn->_pending_reqs) && conn->num_streams.responding < conn->super.ctx->globalconf->http2.max_concurrent_requests_per_connection) { /* fetch and detach a pending stream */ h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, _refs.link, conn->_pending_reqs.next); h2o_linklist_unlink(&stream->_refs.link); /* handle it */ h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS); if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_processed < stream->stream_id) conn->pull_stream_ids.max_processed = stream->stream_id; h2o_process_request(&stream->req); } conn->_is_dispatching_pending_reqs = 0; }
static void execute_or_enqueue_request(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { assert(stream->state < H2O_HTTP2_STREAM_STATE_REQ_PENDING); if (stream->_req_body != NULL && stream->_expected_content_length != SIZE_MAX && stream->_req_body->size != stream->_expected_content_length) { send_stream_error(conn, stream->stream_id, H2O_HTTP2_ERROR_PROTOCOL); h2o_http2_stream_reset(conn, stream); return; } h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_REQ_PENDING); /* TODO schedule the pending reqs using the scheduler */ h2o_linklist_insert(&conn->_pending_reqs, &stream->_refs.link); run_pending_requests(conn); update_idle_timeout(conn); }
static int send_headers(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { h2o_timestamp_t ts; h2o_get_timestamp(conn->super.ctx, &stream->req.pool, &ts); /* cancel push with an error response */ if (h2o_http2_stream_is_push(stream->stream_id)) { if (400 <= stream->req.res.status) goto CancelPush; if (stream->cache_digests != NULL) { ssize_t etag_index = h2o_find_header(&stream->req.headers, H2O_TOKEN_ETAG, -1); if (etag_index != -1) { h2o_iovec_t url = h2o_concat(&stream->req.pool, stream->req.input.scheme->name, h2o_iovec_init(H2O_STRLIT("://")), stream->req.input.authority, stream->req.input.path); h2o_iovec_t *etag = &stream->req.headers.entries[etag_index].value; if (h2o_cache_digests_lookup_by_url_and_etag(stream->cache_digests, url.base, url.len, etag->base, etag->len) == H2O_CACHE_DIGESTS_STATE_FRESH) goto CancelPush; } } } /* reset casper cookie in case cache-digests exist */ if (stream->cache_digests != NULL && stream->req.hostconf->http2.casper.capacity_bits != 0) { h2o_add_header(&stream->req.pool, &stream->req.res.headers, H2O_TOKEN_SET_COOKIE, H2O_STRLIT("h2o_casper=; Path=/; Expires=Sat, 01 Jan 2000 00:00:00 GMT")); } /* CASPER */ if (conn->casper != NULL) { /* update casper if necessary */ if (stream->req.hostconf->http2.casper.track_all_types || is_blocking_asset(&stream->req)) { if (h2o_http2_casper_lookup(conn->casper, stream->req.path.base, stream->req.path.len, 1)) { /* cancel if the pushed resource is already marked as cached */ if (h2o_http2_stream_is_push(stream->stream_id)) goto CancelPush; } } if (stream->cache_digests != NULL) goto SkipCookie; /* browsers might ignore push responses, or they may process the responses in a different order than they were pushed. * Therefore H2O tries to include casper cookie only in the last stream that may be received by the client, or when the * value become stable; see also: https://github.com/h2o/h2o/issues/421 */ if (h2o_http2_stream_is_push(stream->stream_id)) { if (!(conn->num_streams.pull.open == 0 && (conn->num_streams.push.half_closed - conn->num_streams.push.send_body) == 1)) goto SkipCookie; } else { if (conn->num_streams.push.half_closed - conn->num_streams.push.send_body != 0) goto SkipCookie; } h2o_iovec_t cookie = h2o_http2_casper_get_cookie(conn->casper); h2o_add_header(&stream->req.pool, &stream->req.res.headers, H2O_TOKEN_SET_COOKIE, cookie.base, cookie.len); SkipCookie:; } if (h2o_http2_stream_is_push(stream->stream_id)) { /* for push, send the push promise */ if (!stream->push.promise_sent) h2o_http2_stream_send_push_promise(conn, stream); /* send ASAP if it is a blocking asset (even in case of Firefox we can't wait 1RTT for it to reprioritize the asset) */ if (is_blocking_asset(&stream->req)) h2o_http2_scheduler_rebind(&stream->_refs.scheduler, &conn->scheduler, 257, 0); } else { /* raise the priority of asset files that block rendering to highest if the user-agent is _not_ using dependency-based * prioritization (e.g. that of Firefox) */ if (conn->num_streams.priority.open == 0 && stream->req.hostconf->http2.reprioritize_blocking_assets && h2o_http2_scheduler_get_parent(&stream->_refs.scheduler) == &conn->scheduler && is_blocking_asset(&stream->req)) h2o_http2_scheduler_rebind(&stream->_refs.scheduler, &conn->scheduler, 257, 0); } /* send HEADERS, as well as start sending body */ if (h2o_http2_stream_is_push(stream->stream_id)) h2o_add_header_by_str(&stream->req.pool, &stream->req.res.headers, H2O_STRLIT("x-http2-push"), 0, H2O_STRLIT("pushed")); h2o_hpack_flatten_response(&conn->_write.buf, &conn->_output_header_table, stream->stream_id, conn->peer_settings.max_frame_size, &stream->req.res, &ts, &conn->super.ctx->globalconf->server_name, stream->req.res.content_length); h2o_http2_conn_request_write(conn); h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_BODY); return 0; CancelPush: h2o_add_header_by_str(&stream->req.pool, &stream->req.res.headers, H2O_STRLIT("x-http2-push"), 0, H2O_STRLIT("cancelled")); h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); h2o_linklist_insert(&conn->_write.streams_to_proceed, &stream->_refs.link); if (stream->push.promise_sent) { #ifndef _MSC_VER h2o_http2_encode_rst_stream_frame(&conn->_write.buf, stream->stream_id, -H2O_HTTP2_ERROR_INTERNAL); #else h2o_http2_encode_rst_stream_frame(&conn->_write.buf, stream->stream_id, H2O_HTTP2_ERROR_INTERNAL); #endif h2o_http2_conn_request_write(conn); } return -1; }
static int send_headers(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { h2o_timestamp_t ts; size_t num_casper_entries_before_push = 0; h2o_get_timestamp(conn->super.ctx, &stream->req.pool, &ts); /* cancel push with an error response */ if (h2o_http2_stream_is_push(stream->stream_id)) { if (400 <= stream->req.res.status) goto CancelPush; h2o_add_header_by_str(&stream->req.pool, &stream->req.res.headers, H2O_STRLIT("x-http2-pushed"), 0, H2O_STRLIT("1")); } if (stream->req.hostconf->http2.casper.capacity_bits != 0) { /* extract the client-side cache fingerprint */ if (conn->casper == NULL) h2o_http2_conn_init_casper(conn, stream->req.hostconf->http2.casper.capacity_bits); size_t header_index = -1; while ((header_index = h2o_find_header(&stream->req.headers, H2O_TOKEN_COOKIE, header_index)) != -1) { h2o_header_t *header = stream->req.headers.entries + header_index; h2o_http2_casper_consume_cookie(conn->casper, header->value.base, header->value.len); } num_casper_entries_before_push = h2o_http2_casper_num_entries(conn->casper); /* update casper if necessary */ if (stream->req.hostconf->http2.casper.track_all_types || is_blocking_asset(&stream->req)) { ssize_t etag_index = h2o_find_header(&stream->req.headers, H2O_TOKEN_ETAG, -1); h2o_iovec_t etag = etag_index != -1 ? stream->req.headers.entries[etag_index].value : (h2o_iovec_t){}; if (h2o_http2_casper_lookup(conn->casper, stream->req.path.base, stream->req.path.len, etag.base, etag.len, 1)) { /* cancel if the pushed resource is already marked as cached */ if (h2o_http2_stream_is_push(stream->stream_id)) goto CancelPush; } } } if (h2o_http2_stream_is_push(stream->stream_id)) { /* for push, send the push promise */ if (!stream->push.promise_sent) h2o_http2_stream_send_push_promise(conn, stream); /* send ASAP if it is a blocking asset (even in case of Firefox we can't wait 1RTT for it to reprioritize the asset) */ if (is_blocking_asset(&stream->req)) h2o_http2_scheduler_rebind(&stream->_refs.scheduler, &conn->scheduler, 257, 0); } else { /* for pull, push things requested, as well as send the casper cookie if modified */ if (conn->peer_settings.enable_push) { size_t i; for (i = 0; i != stream->req.http2_push_paths.size; ++i) h2o_http2_conn_push_path(conn, stream->req.http2_push_paths.entries[i], stream); /* send casper cookie if it has been altered (due to the __stream itself__ or by some of the pushes) */ if (conn->casper != NULL && num_casper_entries_before_push != h2o_http2_casper_num_entries(conn->casper)) { h2o_iovec_t cookie = h2o_http2_casper_build_cookie(conn->casper, &stream->req.pool); h2o_add_header(&stream->req.pool, &stream->req.res.headers, H2O_TOKEN_SET_COOKIE, cookie.base, cookie.len); } } /* raise the priority of asset files that block rendering to highest if the user-agent is _not_ using dependency-based * prioritization (e.g. that of Firefox) */ if (conn->num_streams.open_priority == 0 && stream->req.hostconf->http2.reprioritize_blocking_assets && h2o_http2_scheduler_get_parent(&stream->_refs.scheduler) == &conn->scheduler && is_blocking_asset(&stream->req)) h2o_http2_scheduler_rebind(&stream->_refs.scheduler, &conn->scheduler, 257, 0); } /* send HEADERS, as well as start sending body */ h2o_hpack_flatten_response(&conn->_write.buf, &conn->_output_header_table, stream->stream_id, conn->peer_settings.max_frame_size, &stream->req.res, &ts, &conn->super.ctx->globalconf->server_name); h2o_http2_conn_request_write(conn); h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_BODY); return 0; CancelPush: h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_END_STREAM); h2o_linklist_insert(&conn->_write.streams_to_proceed, &stream->_refs.link); if (stream->push.promise_sent) { h2o_http2_encode_rst_stream_frame(&conn->_write.buf, stream->stream_id, H2O_HTTP2_ERROR_INTERNAL); h2o_http2_conn_request_write(conn); } return -1; }