void h2o_socket_write(h2o_socket_t *sock, h2o_iovec_t *bufs, size_t bufcnt, h2o_socket_cb cb) { size_t i, prev_bytes_written = sock->bytes_written; for (i = 0; i != bufcnt; ++i) { sock->bytes_written = bufs[i].len; #if H2O_SOCKET_DUMP_WRITE fprintf(stderr, "writing %zu bytes to fd:%d\n", bufs[i].len, h2o_socket_get_fd(sock)); h2o_dump_memory(stderr, bufs[i].base, bufs[i].len); #endif } if (sock->ssl == NULL) { do_write(sock, bufs, bufcnt, cb); } else { assert(sock->ssl->output.bufs.size == 0); /* fill in the data */ size_t ssl_record_size; switch (sock->_latency_optimization.state) { case H2O_SOCKET_LATENCY_OPTIMIZATION_STATE_TBD: case H2O_SOCKET_LATENCY_OPTIMIZATION_STATE_DISABLED: ssl_record_size = prev_bytes_written < 200 * 1024 ? calc_suggested_tls_payload_size(sock, 1400) : 16384; break; case H2O_SOCKET_LATENCY_OPTIMIZATION_STATE_DETERMINED: sock->_latency_optimization.state = H2O_SOCKET_LATENCY_OPTIMIZATION_STATE_NEEDS_UPDATE; /* fallthru */ default: ssl_record_size = sock->_latency_optimization.suggested_tls_payload_size; break; } for (; bufcnt != 0; ++bufs, --bufcnt) { size_t off = 0; while (off != bufs[0].len) { int ret; size_t sz = bufs[0].len - off; if (sz > ssl_record_size) sz = ssl_record_size; ret = SSL_write(sock->ssl->ssl, bufs[0].base + off, (int)sz); if (ret != sz) { /* The error happens if SSL_write is called after SSL_read returns a fatal error (e.g. due to corrupt TCP packet * being received). We need to take care of this since some protocol implementations send data after the read- * side of the connection gets closed (note that protocol implementations are (yet) incapable of distinguishing * a normal shutdown and close due to an error using the `status` value of the read callback). */ clear_output_buffer(sock->ssl); flush_pending_ssl(sock, cb); #ifndef H2O_USE_LIBUV ((struct st_h2o_evloop_socket_t *)sock)->_flags |= H2O_SOCKET_FLAG_IS_WRITE_ERROR; #endif return; } off += sz; } } flush_pending_ssl(sock, cb); } }
void h2o_socket_write(h2o_socket_t *sock, h2o_iovec_t *bufs, size_t bufcnt, h2o_socket_cb cb) { #if H2O_SOCKET_DUMP_WRITE { size_t i; for (i = 0; i != bufcnt; ++i) { fprintf(stderr, "writing %zu bytes to fd:%d\n", bufs[i].len, #if H2O_USE_LIBUV ((struct st_h2o_uv_socket_t *)sock)->uv.stream->io_watcher.fd #else ((struct st_h2o_evloop_socket_t *)sock)->fd #endif ); h2o_dump_memory(stderr, bufs[i].base, bufs[i].len); } } #endif if (sock->ssl == NULL) { do_write(sock, bufs, bufcnt, cb); } else { assert(sock->ssl->output.bufs.size == 0); /* fill in the data */ for (; bufcnt != 0; ++bufs, --bufcnt) { size_t off = 0; while (off != bufs[0].len) { int ret; size_t sz = bufs[0].len - off; if (sz > 1400) sz = 1400; ret = SSL_write(sock->ssl->ssl, bufs[0].base + off, (int)sz); if (ret != sz) { /* The error happens if SSL_write is called after SSL_read returns a fatal error (e.g. due to corrupt TCP packet * being received). We need to take care of this since some protocol implementations send data after the read- * side of the connection gets closed (note that protocol implementations are (yet) incapable of distinguishing * a normal shutdown and close due to an error using the `status` value of the read callback). */ clear_output_buffer(sock->ssl); flush_pending_ssl(sock, cb); #ifndef H2O_USE_LIBUV ((struct st_h2o_evloop_socket_t *)sock)->_flags |= H2O_SOCKET_FLAG_IS_WRITE_ERROR; #endif return; } off += sz; } } flush_pending_ssl(sock, cb); } }
static void shutdown_ssl(h2o_socket_t *sock, int status) { int ret; if (status != 0) goto Close; if ((ret = SSL_shutdown(sock->ssl->ssl)) == -1) { goto Close; } if (sock->ssl->output.bufs.size != 0) { h2o_socket_read_stop(sock); flush_pending_ssl(sock, ret == 1 ? dispose_socket : shutdown_ssl); } else if (ret == 2 && SSL_get_error(sock->ssl->ssl, ret) == SSL_ERROR_WANT_READ) { h2o_socket_read_start(sock, shutdown_ssl); } else { status = ret == 1; goto Close; } return; Close: dispose_socket(sock, status); }
static void proceed_handshake(h2o_socket_t *sock, int status) { int ret; sock->_cb.write = NULL; if (status != 0) { goto Complete; } ret = SSL_accept(sock->ssl->ssl); if (ret == 2 || (ret < 0 && SSL_get_error(sock->ssl->ssl, ret) != SSL_ERROR_WANT_READ)) { /* failed */ status = -1; goto Complete; } if (sock->ssl->output.bufs.size != 0) { h2o_socket_read_stop(sock); flush_pending_ssl(sock, ret == 1 ? on_handshake_complete : proceed_handshake); } else { h2o_socket_read_start(sock, proceed_handshake); } return; Complete: h2o_socket_read_stop(sock); on_handshake_complete(sock, status); }
static void shutdown_ssl(h2o_socket_t *sock, const char *err) { int ret; if (err != NULL) goto Close; if (sock->_cb.write != NULL) { /* note: libuv calls the write callback after the socket is closed by uv_close (with status set to 0 if the write succeeded) */ sock->_cb.write = NULL; goto Close; } if ((ret = SSL_shutdown(sock->ssl->ssl)) == -1) { goto Close; } if (sock->ssl->output.bufs.size != 0) { h2o_socket_read_stop(sock); flush_pending_ssl(sock, ret == 1 ? dispose_socket : shutdown_ssl); } else if (ret == 2 && SSL_get_error(sock->ssl->ssl, ret) == SSL_ERROR_WANT_READ) { h2o_socket_read_start(sock, shutdown_ssl); } else { goto Close; } return; Close: dispose_socket(sock, err); }
void h2o_socket_write(h2o_socket_t *sock, h2o_buf_t *bufs, size_t bufcnt, h2o_socket_cb cb) { if (sock->ssl == NULL) { do_write(sock, bufs, bufcnt, cb); } else { size_t i; assert(sock->ssl->output.bufs.size == 0); /* fill in the data */ for (i = 0; i != bufcnt; ++i) { int ret = SSL_write(sock->ssl->ssl, bufs[i].base, (int)bufs[i].len); /* FIXME handle error (by deferred-calling cb(sock, -1)) */ assert(ret == bufs[i].len); } flush_pending_ssl(sock, cb); } }
void h2o_socket_write(h2o_socket_t *sock, h2o_iovec_t *bufs, size_t bufcnt, h2o_socket_cb cb) { if (sock->ssl == NULL) { do_write(sock, bufs, bufcnt, cb); } else { assert(sock->ssl->output.bufs.size == 0); /* fill in the data */ for (; bufcnt != 0; ++bufs, --bufcnt) { size_t off = 0; while (off != bufs[0].len) { int ret; size_t sz = bufs[0].len - off; if (sz > 1400) sz = 1400; ret = SSL_write(sock->ssl->ssl, bufs[0].base + off, (int)sz); assert(ret == sz); off += sz; } } flush_pending_ssl(sock, cb); } }
void h2o_socket_write(h2o_socket_t *sock, h2o_iovec_t *bufs, size_t bufcnt, h2o_socket_cb cb) { #if H2O_SOCKET_DUMP_WRITE { size_t i; for (i = 0; i != bufcnt; ++i) { fprintf(stderr, "writing %zu bytes to fd:%d\n", bufs[i].len, #if H2O_USE_LIBUV ((struct st_h2o_uv_socket_t *)sock)->uv.stream->io_watcher.fd #else ((struct st_h2o_evloop_socket_t *)sock)->fd #endif ); h2o_dump_memory(stderr, bufs[i].base, bufs[i].len); } } #endif if (sock->ssl == NULL) { do_write(sock, bufs, bufcnt, cb); } else { assert(sock->ssl->output.bufs.size == 0); /* fill in the data */ for (; bufcnt != 0; ++bufs, --bufcnt) { size_t off = 0; while (off != bufs[0].len) { int ret; size_t sz = bufs[0].len - off; if (sz > 1400) sz = 1400; ret = SSL_write(sock->ssl->ssl, bufs[0].base + off, (int)sz); assert(ret == sz); off += sz; } } flush_pending_ssl(sock, cb); } }
static void proceed_handshake(h2o_socket_t *sock, const char *err) { h2o_iovec_t first_input = {NULL}; int ret; sock->_cb.write = NULL; if (err != NULL) { goto Complete; } if (sock->ssl->handshake.server.async_resumption.state == ASYNC_RESUMPTION_STATE_RECORD) { if (sock->ssl->input.encrypted->size <= 1024) { /* retain a copy of input if performing async resumption */ first_input = h2o_iovec_init(alloca(sock->ssl->input.encrypted->size), sock->ssl->input.encrypted->size); memcpy(first_input.base, sock->ssl->input.encrypted->bytes, first_input.len); } else { sock->ssl->handshake.server.async_resumption.state = ASYNC_RESUMPTION_STATE_COMPLETE; } } Redo: if (SSL_is_server(sock->ssl->ssl)) { ret = SSL_accept(sock->ssl->ssl); } else { ret = SSL_connect(sock->ssl->ssl); } switch (sock->ssl->handshake.server.async_resumption.state) { case ASYNC_RESUMPTION_STATE_RECORD: /* async resumption has not been triggered; proceed the state to complete */ sock->ssl->handshake.server.async_resumption.state = ASYNC_RESUMPTION_STATE_COMPLETE; break; case ASYNC_RESUMPTION_STATE_REQUEST_SENT: { /* sent async request, reset the ssl state, and wait for async response */ assert(ret < 0); SSL_CTX *ssl_ctx = SSL_get_SSL_CTX(sock->ssl->ssl); SSL_free(sock->ssl->ssl); create_ssl(sock, ssl_ctx); clear_output_buffer(sock->ssl); h2o_buffer_consume(&sock->ssl->input.encrypted, sock->ssl->input.encrypted->size); h2o_buffer_reserve(&sock->ssl->input.encrypted, first_input.len); memcpy(sock->ssl->input.encrypted->bytes, first_input.base, first_input.len); sock->ssl->input.encrypted->size = first_input.len; h2o_socket_read_stop(sock); return; } default: break; } if (ret == 0 || (ret < 0 && SSL_get_error(sock->ssl->ssl, ret) != SSL_ERROR_WANT_READ)) { /* failed */ long verify_result = SSL_get_verify_result(sock->ssl->ssl); if (verify_result != X509_V_OK) { err = X509_verify_cert_error_string(verify_result); } else { err = "ssl handshake failure"; } goto Complete; } if (sock->ssl->output.bufs.size != 0) { h2o_socket_read_stop(sock); flush_pending_ssl(sock, ret == 1 ? on_handshake_complete : proceed_handshake); } else { if (ret == 1) { if (!SSL_is_server(sock->ssl->ssl)) { X509 *cert = SSL_get_peer_certificate(sock->ssl->ssl); if (cert != NULL) { switch (validate_hostname(sock->ssl->handshake.client.server_name, cert)) { case MatchFound: /* ok */ break; case MatchNotFound: err = h2o_socket_error_ssl_cert_name_mismatch; break; default: err = h2o_socket_error_ssl_cert_invalid; break; } X509_free(cert); } else { err = h2o_socket_error_ssl_no_cert; } } goto Complete; } if (sock->ssl->input.encrypted->size != 0) goto Redo; h2o_socket_read_start(sock, proceed_handshake); } return; Complete: h2o_socket_read_stop(sock); on_handshake_complete(sock, err); }