static int connection_handle_write(server *srv, connection *con) { switch(network_write_chunkqueue(srv, con, con->write_queue, MAX_WRITE_LIMIT)) { case 0: con->write_request_ts = srv->cur_ts; if (con->file_finished) { connection_set_state(srv, con, CON_STATE_RESPONSE_END); joblist_append(srv, con); } break; case -1: /* error on our side */ log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: write failed on fd", con->fd); connection_set_state(srv, con, CON_STATE_ERROR); joblist_append(srv, con); break; case -2: /* remote close */ connection_set_state(srv, con, CON_STATE_ERROR); joblist_append(srv, con); break; case 1: con->write_request_ts = srv->cur_ts; con->is_writable = 0; /* not finished yet -> WRITE */ break; } return 0; }
static handler_t cgi_handle_fdevent(server *srv, void *ctx, int revents) { handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; joblist_append(srv, con); if (revents & FDEVENT_IN) { handler_t rc = cgi_recv_response(srv, hctx);/*(might invalidate hctx)*/ if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/ } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { if (con->file_started) { /* drain any remaining data from kernel pipe buffers * even if (con->conf.stream_response_body * & FDEVENT_STREAM_RESPONSE_BUFMIN) * since event loop will spin on fd FDEVENT_HUP event * until unregistered. */ handler_t rc; do { rc = cgi_recv_response(srv,hctx);/*(might invalidate hctx)*/ } while (rc == HANDLER_GO_ON); /*(unless HANDLER_GO_ON)*/ return rc; /* HANDLER_FINISHED or HANDLER_COMEBACK or HANDLER_ERROR */ } else if (!buffer_string_is_empty(hctx->response_header)) { /* unfinished header package which is a body in reality */ con->file_started = 1; if (0 != http_chunk_append_buffer(srv, con, hctx->response_header)) { cgi_connection_close(srv, hctx); return HANDLER_ERROR; } } else { # if 0 log_error_write(srv, __FILE__, __LINE__, "sddd", "got HUP from cgi", con->fd, hctx->fd, revents); # endif } cgi_connection_close(srv, hctx); } else if (revents & FDEVENT_ERR) { /* kill all connections to the cgi process */ cgi_connection_close(srv, hctx); #if 1 log_error_write(srv, __FILE__, __LINE__, "s", "cgi-FDEVENT_ERR"); #endif return HANDLER_ERROR; } return HANDLER_FINISHED; }
static handler_t cgi_handle_fdevent(server *srv, void *ctx, int revents) { handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; joblist_append(srv, con); if (revents & FDEVENT_IN) { handler_t rc = cgi_recv_response(srv, hctx);/*(might invalidate hctx)*/ if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/ } /* perhaps this issue is already handled */ if (revents & (FDEVENT_HUP|FDEVENT_RDHUP)) { if (con->file_started) { /* drain any remaining data from kernel pipe buffers * even if (con->conf.stream_response_body * & FDEVENT_STREAM_RESPONSE_BUFMIN) * since event loop will spin on fd FDEVENT_HUP event * until unregistered. */ handler_t rc; const unsigned short flags = con->conf.stream_response_body; con->conf.stream_response_body &= ~FDEVENT_STREAM_RESPONSE_BUFMIN; con->conf.stream_response_body |= FDEVENT_STREAM_RESPONSE_POLLRDHUP; do { rc = cgi_recv_response(srv,hctx);/*(might invalidate hctx)*/ } while (rc == HANDLER_GO_ON); /*(unless HANDLER_GO_ON)*/ con->conf.stream_response_body = flags; return rc; /* HANDLER_FINISHED or HANDLER_COMEBACK or HANDLER_ERROR */ } else if (!buffer_string_is_empty(hctx->response)) { /* unfinished header package which is a body in reality */ con->file_started = 1; if (0 != http_chunk_append_buffer(srv, con, hctx->response)) { cgi_connection_close(srv, hctx); return HANDLER_ERROR; } if (0 == con->http_status) con->http_status = 200; /* OK */ } cgi_connection_close(srv, hctx); } else if (revents & FDEVENT_ERR) { /* kill all connections to the cgi process */ cgi_connection_close(srv, hctx); return HANDLER_ERROR; } return HANDLER_FINISHED; }
static handler_t cgi_handle_fdevent_send (server *srv, void *ctx, int revents) { handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; /*(joblist only actually necessary here in mod_cgi fdevent send if returning HANDLER_ERROR)*/ joblist_append(srv, con); if (revents & FDEVENT_OUT) { if (0 != cgi_write_request(srv, hctx, hctx->fdtocgi)) { cgi_connection_close(srv, hctx); return HANDLER_ERROR; } /* more request body to be sent to CGI */ } if (revents & FDEVENT_HUP) { /* skip sending remaining data to CGI */ if (con->request.content_length) { chunkqueue *cq = con->request_content_queue; chunkqueue_mark_written(cq, chunkqueue_length(cq)); if (cq->bytes_in != (off_t)con->request.content_length) { con->keep_alive = 0; } } cgi_connection_close_fdtocgi(srv, hctx); /*(closes only hctx->fdtocgi)*/ } else if (revents & FDEVENT_ERR) { /* kill all connections to the cgi process */ #if 1 log_error_write(srv, __FILE__, __LINE__, "s", "cgi-FDEVENT_ERR"); #endif cgi_connection_close(srv, hctx); return HANDLER_ERROR; } return HANDLER_FINISHED; }
static handler_t cgi_handle_fdevent(void *s, void *ctx, int revents) { server *srv = (server *)s; handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; joblist_append(srv, con); if (hctx->fd == -1) { log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), "invalid cgi-fd"); return HANDLER_ERROR; } if (revents & FDEVENT_IN) { switch (cgi_demux_response(srv, hctx)) { case FDEVENT_HANDLED_NOT_FINISHED: break; case FDEVENT_HANDLED_FINISHED: /* we are done */ #if 0 log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), "finished"); #endif cgi_connection_close(srv, hctx); /* if we get a IN|HUP and have read everything don't exec the close twice */ return HANDLER_FINISHED; case FDEVENT_HANDLED_ERROR: connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; log_error_write(srv, __FILE__, __LINE__, "s", "demuxer failed: "); break; } } if (revents & FDEVENT_OUT) { /* nothing to do */ } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { /* check if we still have a unfinished header package which is a body in reality */ if (con->file_started == 0 && hctx->response_header->used) { con->file_started = 1; http_chunk_append_mem(srv, con, hctx->response_header->ptr, hctx->response_header->used); joblist_append(srv, con); } if (con->file_finished == 0) { http_chunk_append_mem(srv, con, NULL, 0); joblist_append(srv, con); } con->file_finished = 1; if (chunkqueue_is_empty(con->write_queue)) { /* there is nothing left to write */ connection_set_state(srv, con, CON_STATE_RESPONSE_END); } else { /* used the write-handler to finish the request on demand */ } # if 0 log_error_write(srv, __FILE__, __LINE__, "sddd", "got HUP from cgi", con->fd, hctx->fd, revents); # endif /* rtsigs didn't liked the close */ cgi_connection_close(srv, hctx); } else if (revents & FDEVENT_ERR) { con->file_finished = 1; /* kill all connections to the cgi process */ cgi_connection_close(srv, hctx); #if 1 log_error_write(srv, __FILE__, __LINE__, "s", "cgi-FDEVENT_ERR"); #endif return HANDLER_ERROR; } return HANDLER_FINISHED; }
static int cgi_demux_response(server *srv, handler_ctx *hctx) { plugin_data *p = hctx->plugin_data; connection *con = hctx->remote_conn; while(1) { int n; buffer_prepare_copy(hctx->response, 1024); if (-1 == (n = read(hctx->fd, hctx->response->ptr, hctx->response->size - 1))) { if (errno == EAGAIN || errno == EINTR) { /* would block, wait for signal */ return FDEVENT_HANDLED_NOT_FINISHED; } /* error */ log_error_write(srv, __FILE__, __LINE__, "sdd", strerror(errno), con->fd, hctx->fd); return FDEVENT_HANDLED_ERROR; } if (n == 0) { /* read finished */ con->file_finished = 1; /* send final chunk */ http_chunk_append_mem(srv, con, NULL, 0); joblist_append(srv, con); return FDEVENT_HANDLED_FINISHED; } hctx->response->ptr[n] = '\0'; hctx->response->used = n+1; /* split header from body */ if (con->file_started == 0) { char *c; int in_header = 0; int header_end = 0; int cp, eol = EOL_UNSET; size_t used = 0; buffer_append_string_buffer(hctx->response_header, hctx->response); /* nph (non-parsed headers) */ if (0 == strncmp(hctx->response_header->ptr, "HTTP/1.", 7)) in_header = 1; /* search for the \r\n\r\n or \n\n in the string */ for (c = hctx->response_header->ptr, cp = 0, used = hctx->response_header->used - 1; used; c++, cp++, used--) { if (*c == ':') in_header = 1; else if (*c == '\n') { if (in_header == 0) { /* got a response without a response header */ c = NULL; header_end = 1; break; } if (eol == EOL_UNSET) eol = EOL_N; if (*(c+1) == '\n') { header_end = 1; break; } } else if (used > 1 && *c == '\r' && *(c+1) == '\n') { if (in_header == 0) { /* got a response without a response header */ c = NULL; header_end = 1; break; } if (eol == EOL_UNSET) eol = EOL_RN; if (used > 3 && *(c+2) == '\r' && *(c+3) == '\n') { header_end = 1; break; } /* skip the \n */ c++; cp++; used--; } } if (header_end) { if (c == NULL) { /* no header, but a body */ if (con->request.http_version == HTTP_VERSION_1_1) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } http_chunk_append_mem(srv, con, hctx->response_header->ptr, hctx->response_header->used); joblist_append(srv, con); } else { size_t hlen = c - hctx->response_header->ptr + (eol == EOL_RN ? 4 : 2); size_t blen = hctx->response_header->used - hlen - 1; /* a small hack: terminate after at the second \r */ hctx->response_header->used = hlen + 1 - (eol == EOL_RN ? 2 : 1); hctx->response_header->ptr[hlen - (eol == EOL_RN ? 2 : 1)] = '\0'; /* parse the response header */ cgi_response_parse(srv, con, p, hctx->response_header, eol); /* enable chunked-transfer-encoding */ if (con->request.http_version == HTTP_VERSION_1_1 && !(con->parsed_response & HTTP_CONTENT_LENGTH)) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } if ((hctx->response->used != hlen) && blen > 0) { http_chunk_append_mem(srv, con, c + (eol == EOL_RN ? 4: 2), blen + 1); joblist_append(srv, con); } } con->file_started = 1; } } else { http_chunk_append_mem(srv, con, hctx->response->ptr, hctx->response->used); joblist_append(srv, con); } #if 0 log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), b->ptr); #endif } return FDEVENT_HANDLED_NOT_FINISHED; }
static int cgi_demux_response(server *srv, handler_ctx *hctx) { plugin_data *p = hctx->plugin_data; connection *con = hctx->remote_conn; while(1) { int n; int toread; #if defined(__WIN32) buffer_prepare_copy(hctx->response, 4 * 1024); #else if (ioctl(con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { buffer_prepare_copy(hctx->response, 4 * 1024); } else { if (toread > MAX_READ_LIMIT) toread = MAX_READ_LIMIT; buffer_prepare_copy(hctx->response, toread + 1); } #endif if (-1 == (n = read(hctx->fd, hctx->response->ptr, hctx->response->size - 1))) { if (errno == EAGAIN || errno == EINTR) { /* would block, wait for signal */ return FDEVENT_HANDLED_NOT_FINISHED; } /* error */ log_error_write(srv, __FILE__, __LINE__, "sdd", strerror(errno), con->fd, hctx->fd); return FDEVENT_HANDLED_ERROR; } if (n == 0) { /* read finished */ con->file_finished = 1; /* send final chunk */ http_chunk_append_mem(srv, con, NULL, 0); joblist_append(srv, con); return FDEVENT_HANDLED_FINISHED; } hctx->response->ptr[n] = '\0'; hctx->response->used = n+1; /* split header from body */ if (con->file_started == 0) { int is_header = 0; int is_header_end = 0; size_t last_eol = 0; size_t i; buffer_append_string_buffer(hctx->response_header, hctx->response); /** * we have to handle a few cases: * * nph: * * HTTP/1.0 200 Ok\n * Header: Value\n * \n * * CGI: * Header: Value\n * Status: 200\n * \n * * and different mixes of \n and \r\n combinations * * Some users also forget about CGI and just send a response and hope * we handle it. No headers, no header-content seperator * */ /* nph (non-parsed headers) */ if (0 == strncmp(hctx->response_header->ptr, "HTTP/1.", 7)) is_header = 1; for (i = 0; !is_header_end && i < hctx->response_header->used - 1; i++) { char c = hctx->response_header->ptr[i]; switch (c) { case ':': /* we found a colon * * looks like we have a normal header */ is_header = 1; break; case '\n': /* EOL */ if (is_header == 0) { /* we got a EOL but we don't seem to got a HTTP header */ is_header_end = 1; break; } /** * check if we saw a \n(\r)?\n sequence */ if (last_eol > 0 && ((i - last_eol == 1) || (i - last_eol == 2 && hctx->response_header->ptr[i - 1] == '\r'))) { is_header_end = 1; break; } last_eol = i; break; } } if (is_header_end) { if (!is_header) { /* no header, but a body */ if (con->request.http_version == HTTP_VERSION_1_1) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } http_chunk_append_mem(srv, con, hctx->response_header->ptr, hctx->response_header->used); joblist_append(srv, con); } else { const char *bstart; size_t blen; /** * i still points to the char after the terminating EOL EOL * * put it on the last \n again */ i--; /* the body starts after the EOL */ bstart = hctx->response_header->ptr + (i + 1); blen = (hctx->response_header->used - 1) - (i + 1); /* string the last \r?\n */ if (i > 0 && (hctx->response_header->ptr[i - 1] == '\r')) { i--; } hctx->response_header->ptr[i] = '\0'; hctx->response_header->used = i + 1; /* the string + \0 */ /* parse the response header */ cgi_response_parse(srv, con, p, hctx->response_header); /* enable chunked-transfer-encoding */ if (con->request.http_version == HTTP_VERSION_1_1 && !(con->parsed_response & HTTP_CONTENT_LENGTH)) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } if (blen > 0) { http_chunk_append_mem(srv, con, bstart, blen + 1); joblist_append(srv, con); } } con->file_started = 1; } } else { http_chunk_append_mem(srv, con, hctx->response->ptr, hctx->response->used); joblist_append(srv, con); } #if 0 log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), b->ptr); #endif } return FDEVENT_HANDLED_NOT_FINISHED; }
int network_write_chunkqueue(server *srv, connection *con, chunkqueue *cq, off_t max_bytes) { int ret = -1; off_t written = 0; #ifdef TCP_CORK int corked = 0; #endif server_socket *srv_socket = con->srv_socket; if (con->conf.global_kbytes_per_second) { off_t limit = con->conf.global_kbytes_per_second * 1024 - *(con->conf.global_bytes_per_second_cnt_ptr); if (limit <= 0) { /* we reached the global traffic limit */ con->traffic_limit_reached = 1; joblist_append(srv, con); return 1; } else { if (max_bytes > limit) max_bytes = limit; } } if (con->conf.kbytes_per_second) { off_t limit = con->conf.kbytes_per_second * 1024 - con->bytes_written_cur_second; if (limit <= 0) { /* we reached the traffic limit */ con->traffic_limit_reached = 1; joblist_append(srv, con); return 1; } else { if (max_bytes > limit) max_bytes = limit; } } written = cq->bytes_out; #ifdef TCP_CORK /* Linux: put a cork into the socket as we want to combine the write() calls * but only if we really have multiple chunks */ if (cq->first && cq->first->next) { corked = 1; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif if (srv_socket->is_ssl) { #ifdef USE_OPENSSL ret = srv->network_ssl_backend_write(srv, con, con->ssl, cq, max_bytes); #endif } else { ret = srv->network_backend_write(srv, con, con->fd, cq, max_bytes); } if (ret >= 0) { chunkqueue_remove_finished_chunks(cq); ret = chunkqueue_is_empty(cq) ? 0 : 1; } #ifdef TCP_CORK if (corked) { corked = 0; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif written = cq->bytes_out - written; con->bytes_written += written; con->bytes_written_cur_second += written; *(con->conf.global_bytes_per_second_cnt_ptr) += written; return ret; }
static handler_t cgi_handle_fdevent(void *s, void *ctx, int revents) { server *srv = (server *)s; cgi_session *sess = ctx; connection *con = sess->remote_con; if (revents & FDEVENT_IN) { switch (sess->state) { case CGI_STATE_READ_RESPONSE_HEADER: /* parse the header and set file-started, the demuxer will care about it */ joblist_append(srv, con); break; case CGI_STATE_READ_RESPONSE_CONTENT: /* just forward the content to the out-going queue */ chunkqueue_remove_finished_chunks(sess->rb); switch (srv->network_backend_read(srv, con, sess->sock, sess->rb)) { case NETWORK_STATUS_CONNECTION_CLOSE: fdevent_event_del(srv->ev, sess->sock); /* connection closed. close the read chunkqueue. */ sess->rb->is_closed = 1; case NETWORK_STATUS_SUCCESS: /* read even more, do we have all the content */ /* how much do we want to read ? */ /* copy the resopnse content */ cgi_copy_response(srv, con, sess); break; default: ERROR("%s", "oops, we failed to read"); break; } joblist_append(srv, con); break; default: TRACE("unexpected state for a FDEVENT_IN: %d", sess->state); break; } } if (revents & FDEVENT_OUT) { /* nothing to do */ } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { con->send->is_closed = 1; fdevent_event_del(srv->ev, sess->sock); joblist_append(srv, con); } else if (revents & FDEVENT_ERR) { con->send->is_closed = 1; /* kill all connections to the cgi process */ fdevent_event_del(srv->ev, sess->sock); joblist_append(srv, con); } return HANDLER_FINISHED; }
static int cgi_demux_response(server *srv, connection *con, plugin_data *p) { cgi_session *sess = con->plugin_ctx[p->id]; switch(srv->network_backend_read(srv, con, sess->sock, sess->rb)) { case NETWORK_STATUS_CONNECTION_CLOSE: fdevent_event_del(srv->ev, sess->sock); /* connection closed. close the read chunkqueue. */ sess->rb->is_closed = 1; case NETWORK_STATUS_SUCCESS: /* we got content */ break; case NETWORK_STATUS_WAIT_FOR_EVENT: return 0; default: /* oops */ ERROR("%s", "oops, read-pipe-read failed and I don't know why"); return -1; } /* looks like we got some content * * split off the header from the incoming stream */ if (con->file_started == 0) { size_t i; int have_content_length = 0; http_response_reset(p->resp); /* the response header is not fully received yet, * * extract the http-response header from the rb-cq */ switch (http_response_parse_cq(sess->rb, p->resp)) { case PARSE_UNSET: case PARSE_ERROR: /* parsing failed */ TRACE("%s", "response parser failed"); con->http_status = 502; /* Bad Gateway */ return -1; case PARSE_NEED_MORE: if (sess->rb->is_closed) { /* backend died before sending a header */ con->http_status = 502; /* Bad Gateway */ return -1; } return 0; case PARSE_SUCCESS: con->http_status = p->resp->status; chunkqueue_remove_finished_chunks(sess->rb); /* copy the http-headers */ for (i = 0; i < p->resp->headers->used; i++) { const char *ign[] = { "Status", "Connection", NULL }; size_t j; data_string *ds; data_string *header = (data_string *)p->resp->headers->data[i]; /* some headers are ignored by default */ for (j = 0; ign[j]; j++) { if (0 == strcasecmp(ign[j], header->key->ptr)) break; } if (ign[j]) continue; if (0 == buffer_caseless_compare(CONST_BUF_LEN(header->key), CONST_STR_LEN("Location"))) { /* CGI/1.1 rev 03 - 7.2.1.2 */ if (con->http_status == 0) con->http_status = 302; } else if (0 == buffer_caseless_compare(CONST_BUF_LEN(header->key), CONST_STR_LEN("Content-Length"))) { have_content_length = 1; } if (NULL == (ds = (data_string *)array_get_unused_element(con->response.headers, TYPE_STRING))) { ds = data_response_init(); } buffer_copy_string_buffer(ds->key, header->key); buffer_copy_string_buffer(ds->value, header->value); array_insert_unique(con->response.headers, (data_unset *)ds); } con->file_started = 1; /* if Status: ... is not set, 200 is our default status-code */ if (con->http_status == 0) con->http_status = 200; sess->state = CGI_STATE_READ_RESPONSE_CONTENT; if (con->request.http_version == HTTP_VERSION_1_1 && !have_content_length) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } break; } } /* FIXME: pass the response-header to the other plugins to * setup the filter-queue * * - use next-queue instead of con->write_queue */ /* copy the resopnse content */ cgi_copy_response(srv, con, sess); joblist_append(srv, con); return 0; }
handler_t connection_handle_fdevent(void *s, void *context, int revents) { server *srv = (server *)s; connection *con = context; joblist_append(srv, con); if (revents & FDEVENT_IN) { con->is_readable = 1; #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "read-wait - done", con->fd); #endif } if (revents & FDEVENT_OUT) { con->is_writable = 1; /* we don't need the event twice */ } if (revents & ~(FDEVENT_IN | FDEVENT_OUT)) { /* looks like an error */ /* FIXME: revents = 0x19 still means that we should read from the queue */ if (revents & FDEVENT_HUP) { if (con->state == CON_STATE_CLOSE) { con->close_timeout_ts = 0; } else { /* sigio reports the wrong event here * * there was no HUP at all */ #ifdef USE_LINUX_SIGIO if (srv->ev->in_sigio == 1) { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> HUP", con->fd); } else { connection_set_state(srv, con, CON_STATE_ERROR); } #else connection_set_state(srv, con, CON_STATE_ERROR); #endif } } else if (revents & FDEVENT_ERR) { #ifndef USE_LINUX_SIGIO log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ERR", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ???", revents); } } if (con->state == CON_STATE_READ || con->state == CON_STATE_READ_POST) { connection_handle_read_state(srv, con); } if (con->state == CON_STATE_WRITE && !chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); } else if (con->state == CON_STATE_WRITE) { con->write_request_ts = srv->cur_ts; } } if (con->state == CON_STATE_CLOSE) { /* flush the read buffers */ int b; if (ioctl(con->fd, FIONREAD, &b)) { log_error_write(srv, __FILE__, __LINE__, "ss", "ioctl() failed", strerror(errno)); } if (b > 0) { char buf[1024]; log_error_write(srv, __FILE__, __LINE__, "sdd", "CLOSE-read()", con->fd, b); /* */ read(con->fd, buf, sizeof(buf)); } else { /* nothing to read */ con->close_timeout_ts = 0; } } return HANDLER_FINISHED; }
/* 0: everything ok, -1: error, -2: con closed */ static int connection_handle_read(server *srv, connection *con) { int len; buffer *b; int toread, read_offset; if (con->conf.is_ssl) { return connection_handle_read_ssl(srv, con); } b = (NULL != con->read_queue->last) ? con->read_queue->last->mem : NULL; /* default size for chunks is 4kb; only use bigger chunks if FIONREAD tells * us more than 4kb is available * if FIONREAD doesn't signal a big chunk we fill the previous buffer * if it has >= 1kb free */ #if defined(__WIN32) if (NULL == b || b->size - b->used < 1024) { b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, 4 * 1024); } read_offset = (b->used == 0) ? 0 : b->used - 1; len = recv(con->fd, b->ptr + read_offset, b->size - 1 - read_offset, 0); #else #ifdef HAVE_LIBMTCP /* toread = MAX_READ_LIMIT; */ if (mtcp_socket_ioctl(srv->mctx, con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { #else if (ioctl(con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { #endif if (NULL == b || b->size - b->used < 1024) { b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, 4 * 1024); } } else { if (toread > MAX_READ_LIMIT) toread = MAX_READ_LIMIT; b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, toread + 1); } read_offset = (b->used == 0) ? 0 : b->used - 1; #ifdef HAVE_LIBMTCP len = mtcp_read(srv->mctx, con->fd, b->ptr + read_offset, b->size - 1 - read_offset); #else len = read(con->fd, b->ptr + read_offset, b->size - 1 - read_offset); #endif #endif if (len < 0) { con->is_readable = 0; if (errno == EAGAIN) return 0; if (errno == EINTR) { /* we have been interrupted before we could read */ con->is_readable = 1; return 0; } if (errno != ECONNRESET) { /* expected for keep-alive */ log_error_write(srv, __FILE__, __LINE__, "ssd", "connection closed - read failed: ", strerror(errno), errno); } connection_set_state(srv, con, CON_STATE_ERROR); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ /* pipelining */ return -2; } else if ((size_t)len < b->size - 1) { /* we got less then expected, wait for the next fd-event */ con->is_readable = 0; } if (b->used > 0) b->used--; b->used += len; b->ptr[b->used++] = '\0'; con->bytes_read += len; #if 0 dump_packet(b->ptr, len); #endif return 0; } static int connection_handle_write_prepare(server *srv, connection *con) { if (con->mode == DIRECT) { /* static files */ switch(con->request.http_method) { case HTTP_METHOD_GET: case HTTP_METHOD_POST: case HTTP_METHOD_HEAD: case HTTP_METHOD_PUT: case HTTP_METHOD_PATCH: case HTTP_METHOD_MKCOL: case HTTP_METHOD_DELETE: case HTTP_METHOD_COPY: case HTTP_METHOD_MOVE: case HTTP_METHOD_PROPFIND: case HTTP_METHOD_PROPPATCH: case HTTP_METHOD_LOCK: case HTTP_METHOD_UNLOCK: break; case HTTP_METHOD_OPTIONS: /* * 400 is coming from the request-parser BEFORE uri.path is set * 403 is from the response handler when noone else catched it * * */ if ((!con->http_status || con->http_status == 200) && con->uri.path->used && con->uri.path->ptr[0] != '*') { response_header_insert(srv, con, CONST_STR_LEN("Allow"), CONST_STR_LEN("OPTIONS, GET, HEAD, POST")); con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED; con->parsed_response &= ~HTTP_CONTENT_LENGTH; con->http_status = 200; con->file_finished = 1; chunkqueue_reset(con->write_queue); } break; default: switch(con->http_status) { case 400: /* bad request */ case 401: /* authorization required */ case 414: /* overload request header */ case 505: /* unknown protocol */ case 207: /* this was webdav */ break; default: con->http_status = 501; break; } break; } } if (con->http_status == 0) { con->http_status = 403; } switch(con->http_status) { case 204: /* class: header only */ case 205: case 304: /* disable chunked encoding again as we have no body */ con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED; con->parsed_response &= ~HTTP_CONTENT_LENGTH; chunkqueue_reset(con->write_queue); con->file_finished = 1; break; default: /* class: header + body */ if (con->mode != DIRECT) break; /* only custom body for 4xx and 5xx */ if (con->http_status < 400 || con->http_status >= 600) break; con->file_finished = 0; buffer_reset(con->physical.path); /* try to send static errorfile */ if (!buffer_is_empty(con->conf.errorfile_prefix)) { stat_cache_entry *sce = NULL; buffer_copy_string_buffer(con->physical.path, con->conf.errorfile_prefix); buffer_append_long(con->physical.path, con->http_status); buffer_append_string_len(con->physical.path, CONST_STR_LEN(".html")); if (HANDLER_ERROR != stat_cache_get_entry(srv, con, con->physical.path, &sce)) { con->file_finished = 1; http_chunk_append_file(srv, con, con->physical.path, 0, sce->st.st_size); response_header_overwrite(srv, con, CONST_STR_LEN("Content-Type"), CONST_BUF_LEN(sce->content_type)); } } if (!con->file_finished) { buffer *b; buffer_reset(con->physical.path); con->file_finished = 1; b = chunkqueue_get_append_buffer(con->write_queue); /* build default error-page */ buffer_copy_string_len(b, CONST_STR_LEN( "<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n" "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n" " \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n" "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n" " <head>\n" " <title>")); buffer_append_long(b, con->http_status); buffer_append_string_len(b, CONST_STR_LEN(" - ")); buffer_append_string(b, get_http_status_name(con->http_status)); buffer_append_string_len(b, CONST_STR_LEN( "</title>\n" " </head>\n" " <body>\n" " <h1>")); buffer_append_long(b, con->http_status); buffer_append_string_len(b, CONST_STR_LEN(" - ")); buffer_append_string(b, get_http_status_name(con->http_status)); buffer_append_string_len(b, CONST_STR_LEN("</h1>\n" " </body>\n" "</html>\n" )); response_header_overwrite(srv, con, CONST_STR_LEN("Content-Type"), CONST_STR_LEN("text/html")); } break; } if (con->file_finished) { /* we have all the content and chunked encoding is not used, set a content-length */ if ((!(con->parsed_response & HTTP_CONTENT_LENGTH)) && (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) == 0) { off_t qlen = chunkqueue_length(con->write_queue); /** * The Content-Length header only can be sent if we have content: * - HEAD doesn't have a content-body (but have a content-length) * - 1xx, 204 and 304 don't have a content-body (RFC 2616 Section 4.3) * * Otherwise generate a Content-Length header as chunked encoding is not * available */ if ((con->http_status >= 100 && con->http_status < 200) || con->http_status == 204 || con->http_status == 304) { data_string *ds; /* no Content-Body, no Content-Length */ if (NULL != (ds = (data_string*) array_get_element(con->response.headers, "Content-Length"))) { buffer_reset(ds->value); /* Headers with empty values are ignored for output */ } } else if (qlen > 0 || con->request.http_method != HTTP_METHOD_HEAD) { /* qlen = 0 is important for Redirects (301, ...) as they MAY have * a content. Browsers are waiting for a Content otherwise */ buffer_copy_off_t(srv->tmp_buf, qlen); response_header_overwrite(srv, con, CONST_STR_LEN("Content-Length"), CONST_BUF_LEN(srv->tmp_buf)); } } } else { /** * the file isn't finished yet, but we have all headers * * to get keep-alive we either need: * - Content-Length: ... (HTTP/1.0 and HTTP/1.0) or * - Transfer-Encoding: chunked (HTTP/1.1) */ if (((con->parsed_response & HTTP_CONTENT_LENGTH) == 0) && ((con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) == 0)) { con->keep_alive = 0; } /** * if the backend sent a Connection: close, follow the wish * * NOTE: if the backend sent Connection: Keep-Alive, but no Content-Length, we * will close the connection. That's fine. We can always decide the close * the connection * * FIXME: to be nice we should remove the Connection: ... */ if (con->parsed_response & HTTP_CONNECTION) { /* a subrequest disable keep-alive although the client wanted it */ if (con->keep_alive && !con->response.keep_alive) { con->keep_alive = 0; } } } if (con->request.http_method == HTTP_METHOD_HEAD) { /** * a HEAD request has the same as a GET * without the content */ con->file_finished = 1; chunkqueue_reset(con->write_queue); con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED; } http_response_write_header(srv, con); return 0; } static int connection_handle_write(server *srv, connection *con) { switch(network_write_chunkqueue(srv, con, con->write_queue, MAX_WRITE_LIMIT)) { case 0: con->write_request_ts = srv->cur_ts; if (con->file_finished) { connection_set_state(srv, con, CON_STATE_RESPONSE_END); joblist_append(srv, con); } break; case -1: /* error on our side */ log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: write failed on fd", con->fd); connection_set_state(srv, con, CON_STATE_ERROR); joblist_append(srv, con); break; case -2: /* remote close */ connection_set_state(srv, con, CON_STATE_ERROR); joblist_append(srv, con); break; case 1: con->write_request_ts = srv->cur_ts; con->is_writable = 0; /* not finished yet -> WRITE */ break; } return 0; } connection *connection_init(server *srv) { connection *con; UNUSED(srv); con = calloc(1, sizeof(*con)); con->fd = 0; con->ndx = -1; con->fde_ndx = -1; con->bytes_written = 0; con->bytes_read = 0; con->bytes_header = 0; con->loops_per_request = 0; #define CLEAN(x) \ con->x = buffer_init(); CLEAN(request.uri); CLEAN(request.request_line); CLEAN(request.request); CLEAN(request.pathinfo); CLEAN(request.orig_uri); CLEAN(uri.scheme); CLEAN(uri.authority); CLEAN(uri.path); CLEAN(uri.path_raw); CLEAN(uri.query); CLEAN(physical.doc_root); CLEAN(physical.path); CLEAN(physical.basedir); CLEAN(physical.rel_path); CLEAN(physical.etag); CLEAN(parse_request); CLEAN(authed_user); CLEAN(server_name); CLEAN(error_handler); CLEAN(dst_addr_buf); #if defined USE_OPENSSL && ! defined OPENSSL_NO_TLSEXT CLEAN(tlsext_server_name); #endif #undef CLEAN con->write_queue = chunkqueue_init(); con->read_queue = chunkqueue_init(); con->request_content_queue = chunkqueue_init(); chunkqueue_set_tempdirs(con->request_content_queue, srv->srvconf.upload_tempdirs); con->request.headers = array_init(); con->response.headers = array_init(); con->environment = array_init(); /* init plugin specific connection structures */ con->plugin_ctx = calloc(1, (srv->plugins.used + 1) * sizeof(void *)); con->cond_cache = calloc(srv->config_context->used, sizeof(cond_cache_t)); config_setup_connection(srv, con); return con; }
static int proxy_demux_response(server *srv, handler_ctx *hctx) { int fin = 0; int b; ssize_t r; plugin_data *p = hctx->plugin_data; connection *con = hctx->remote_conn; int proxy_fd = hctx->fd; /* check how much we have to read */ if (ioctl(hctx->fd, FIONREAD, &b)) { log_error_write(srv, __FILE__, __LINE__, "sd", "ioctl failed: ", proxy_fd); return -1; } if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy - have to read:", b); } if (b > 0) { if (hctx->response->used == 0) { /* avoid too small buffer */ buffer_prepare_append(hctx->response, b + 1); hctx->response->used = 1; } else { buffer_prepare_append(hctx->response, b); } if (-1 == (r = read(hctx->fd, hctx->response->ptr + hctx->response->used - 1, b))) { if (errno == EAGAIN) return 0; log_error_write(srv, __FILE__, __LINE__, "sds", "unexpected end-of-file (perhaps the proxy process died):", proxy_fd, strerror(errno)); return -1; } /* this should be catched by the b > 0 above */ assert(r); hctx->response->used += r; hctx->response->ptr[hctx->response->used - 1] = '\0'; #if 0 log_error_write(srv, __FILE__, __LINE__, "sdsbs", "demux: Response buffer len", hctx->response->used, ":", hctx->response, ":"); #endif if (0 == con->got_response) { con->got_response = 1; buffer_prepare_copy(hctx->response_header, 128); } if (0 == con->file_started) { char *c; /* search for the \r\n\r\n in the string */ if (NULL != (c = buffer_search_string_len(hctx->response, "\r\n\r\n", 4))) { size_t hlen = c - hctx->response->ptr + 4; size_t blen = hctx->response->used - hlen - 1; /* found */ buffer_append_string_len(hctx->response_header, hctx->response->ptr, c - hctx->response->ptr + 4); #if 0 log_error_write(srv, __FILE__, __LINE__, "sb", "Header:", hctx->response_header); #endif /* parse the response header */ proxy_response_parse(srv, con, p, hctx->response_header); /* enable chunked-transfer-encoding */ if (con->request.http_version == HTTP_VERSION_1_1 && !(con->parsed_response & HTTP_CONTENT_LENGTH)) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } con->file_started = 1; if (blen) { http_chunk_append_mem(srv, con, c + 4, blen + 1); } hctx->response->used = 0; joblist_append(srv, con); } } else { http_chunk_append_mem(srv, con, hctx->response->ptr, hctx->response->used); joblist_append(srv, con); hctx->response->used = 0; } } else { /* reading from upstream done */ con->file_finished = 1; http_chunk_append_mem(srv, con, NULL, 0); joblist_append(srv, con); fin = 1; } return fin; }
static handler_t connection_handle_fdevent(server *srv, void *context, int revents) { connection *con = context; joblist_append(srv, con); if (con->srv_socket->is_ssl) { /* ssl may read and write for both reads and writes */ if (revents & (FDEVENT_IN | FDEVENT_OUT)) { con->is_readable = 1; con->is_writable = 1; } } else { if (revents & FDEVENT_IN) { con->is_readable = 1; } if (revents & FDEVENT_OUT) { con->is_writable = 1; /* we don't need the event twice */ } } if (revents & ~(FDEVENT_IN | FDEVENT_OUT)) { /* looks like an error */ /* FIXME: revents = 0x19 still means that we should read from the queue */ if (revents & FDEVENT_HUP) { if (con->state == CON_STATE_CLOSE) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } else { /* sigio reports the wrong event here * * there was no HUP at all */ #ifdef USE_LINUX_SIGIO if (srv->ev->in_sigio == 1) { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> HUP", con->fd); } else { connection_set_state(srv, con, CON_STATE_ERROR); } #else connection_set_state(srv, con, CON_STATE_ERROR); #endif } } else if (revents & FDEVENT_ERR) { /* error, connection reset, whatever... we don't want to spam the logfile */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ERR", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ???", revents); } } if (con->state == CON_STATE_READ || con->state == CON_STATE_READ_POST) { connection_handle_read_state(srv, con); } if (con->state == CON_STATE_WRITE && !chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); } } if (con->state == CON_STATE_CLOSE) { /* flush the read buffers */ int len; char buf[1024]; len = read(con->fd, buf, sizeof(buf)); if (len == 0 || (len < 0 && errno != EAGAIN && errno != EINTR) ) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } } return HANDLER_FINISHED; }
/* Open and return the socket. Cf. proxy_establish_connection() in mod_proxy.c. */ static handler_t lisp_connection_open(server *srv, handler_ctx *hctx) { struct sockaddr_in addr; int ret, sock; plugin_data *p = hctx->plugin; connection *con = hctx->connection; if (hctx->socket_data) { sock = hctx->socket_data->fd; } else { if (! (hctx->socket_data = mod_lisp_allocate_socket(p))) { LOG_ERROR_MAYBE(srv, p, LOGLEVEL_ERR, "Cannot allocate from Lisp socket pool: no free slots"); return HANDLER_WAIT_FOR_FD; } LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "Lisp process at %s:%d for %s: allocated fd=%d," " fde_ndx=%d, state=%d, total socket slot(s) allocated: %d", SPLICE_HOSTPORT(hctx->socket_data), hctx->socket_data->fd, hctx->socket_data->fde_ndx, hctx->socket_data->state, p->LispSocketPoolUsed); if ((sock = hctx->socket_data->fd) >= 0) { if (FD_STATE_UNSAFE_EV_COUNT(hctx->socket_data->state) > 0) { fdevent_event_del(srv->ev, SPLICE_FDE(hctx->socket_data)); fdevent_unregister(srv->ev, sock); close(sock); srv->cur_fds--; LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "Lisp process at %s:%d for %s: close unsafe socket (fd=%d)", SPLICE_HOSTPORT(hctx->socket_data), sock); mod_lisp_reset_socket(hctx->socket_data); sock = -1; } else { return HANDLER_GO_ON; } } } if (sock <= 0) { if (-1 == (sock = socket(AF_INET, SOCK_STREAM, 0))) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "socket() failed (%s)", strerror(errno)); return HANDLER_ERROR; } mod_lisp_reset_socket(hctx->socket_data); hctx->socket_data->fd = sock; srv->cur_fds++; fdevent_register(srv->ev, sock, lisp_handle_fdevent, hctx); if (-1 == fdevent_fcntl_set(srv->ev, sock)) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "fcntl() failed (%s)", strerror(errno)); return HANDLER_ERROR; } addr.sin_addr.s_addr = inet_addr(hctx->socket_data->ip->ptr); addr.sin_port = htons(hctx->socket_data->port); addr.sin_family = AF_INET; /* Try to connect to Lisp. */ ret = connect(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); #ifdef WIN32 if (ret == SOCKET_ERROR) { ret = -1; errno = WSAGetLastError()-WSABASEERR; } #endif /* WIN32 */ if (ret == -1 && (errno == EINTR || errno == EINPROGRESS)) { /* As soon as something happens on the socket, this function shall be re-entered and follow the getsockopt branch below. */ fdevent_event_set(srv->ev, SPLICE_FDE(hctx->socket_data), FDEVENT_OUT); LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "connection to Lisp process at %s:%d for %s delayed (%s)", SPLICE_HOSTPORT(hctx->socket_data), strerror(errno)); return HANDLER_WAIT_FOR_EVENT; } } else { int sockerr; socklen_t sockerr_len = sizeof(sockerr); fdevent_event_del(srv->ev, SPLICE_FDE(hctx->socket_data)); /* try to finish the connect() */ if (0 != getsockopt(sock, SOL_SOCKET, SO_ERROR, &sockerr, &sockerr_len)) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "getsockopt() failed (%s)", strerror(errno)); return HANDLER_ERROR; } ret = sockerr ? -1 : 0; } /* Check if we connected */ if (ret == -1) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "cannot connect socket to Lisp process at %s:%d for %s (%s)", SPLICE_HOSTPORT(hctx->socket_data), strerror(errno)); hctx->socket_data->fde_ndx = -1; mod_lisp_connection_close(srv, con, p); /* reset the enviroment and restart the sub-request */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 503; con->mode = DIRECT; joblist_append(srv, con); return HANDLER_FINISHED; } hctx->socket_data->state |= (FD_STATE_READ | FD_STATE_WRITE); LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "opened socket fd=%d to Lisp process at %s:%d for %s", sock, SPLICE_HOSTPORT(hctx->socket_data)); return HANDLER_GO_ON; }
static int connection_handle_read_ssl(server *srv, connection *con) { #ifdef USE_OPENSSL int r, ssl_err, len, count = 0; char *mem = NULL; size_t mem_len = 0; if (!con->srv_socket->is_ssl) return -1; ERR_clear_error(); do { chunkqueue_get_memory(con->read_queue, &mem, &mem_len, 0, SSL_pending(con->ssl)); #if 0 /* overwrite everything with 0 */ memset(mem, 0, mem_len); #endif len = SSL_read(con->ssl, mem, mem_len); chunkqueue_use_memory(con->read_queue, len > 0 ? len : 0); if (con->renegotiations > 1 && con->conf.ssl_disable_client_renegotiation) { log_error_write(srv, __FILE__, __LINE__, "s", "SSL: renegotiation initiated by client, killing connection"); connection_set_state(srv, con, CON_STATE_ERROR); return -1; } if (len > 0) { con->bytes_read += len; count += len; } } while (len == (ssize_t) mem_len && count < MAX_READ_LIMIT); if (len < 0) { int oerrno = errno; switch ((r = SSL_get_error(con->ssl, len))) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: con->is_readable = 0; /* the manual says we have to call SSL_read with the same arguments next time. * we ignore this restriction; no one has complained about it in 1.5 yet, so it probably works anyway. */ return 0; case SSL_ERROR_SYSCALL: /** * man SSL_get_error() * * SSL_ERROR_SYSCALL * Some I/O error occurred. The OpenSSL error queue may contain more * information on the error. If the error queue is empty (i.e. * ERR_get_error() returns 0), ret can be used to find out more about * the error: If ret == 0, an EOF was observed that violates the * protocol. If ret == -1, the underlying BIO reported an I/O error * (for socket I/O on Unix systems, consult errno for details). * */ while((ssl_err = ERR_get_error())) { /* get all errors from the error-queue */ log_error_write(srv, __FILE__, __LINE__, "sds", "SSL:", r, ERR_error_string(ssl_err, NULL)); } switch(oerrno) { default: log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL:", len, r, oerrno, strerror(oerrno)); break; } break; case SSL_ERROR_ZERO_RETURN: /* clean shutdown on the remote side */ if (r == 0) { /* FIXME: later */ } /* fall thourgh */ default: while((ssl_err = ERR_get_error())) { switch (ERR_GET_REASON(ssl_err)) { case SSL_R_SSL_HANDSHAKE_FAILURE: case SSL_R_TLSV1_ALERT_UNKNOWN_CA: case SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN: case SSL_R_SSLV3_ALERT_BAD_CERTIFICATE: if (!con->conf.log_ssl_noise) continue; break; default: break; } /* get all errors from the error-queue */ log_error_write(srv, __FILE__, __LINE__, "sds", "SSL:", r, ERR_error_string(ssl_err, NULL)); } break; } connection_set_state(srv, con, CON_STATE_ERROR); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ return -2; } else { joblist_append(srv, con); } return 0; #else UNUSED(srv); UNUSED(con); return -1; #endif }
static handler_t proxy_handle_fdevent(void *s, void *ctx, int revents) { server *srv = (server *)s; handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; plugin_data *p = hctx->plugin_data; if ((revents & FDEVENT_IN) && hctx->state == PROXY_STATE_READ) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-in", hctx->state); } switch (proxy_demux_response(srv, hctx)) { case 0: break; case 1: hctx->host->usage--; /* we are done */ proxy_connection_close(srv, hctx); joblist_append(srv, con); return HANDLER_FINISHED; case -1: if (con->file_started == 0) { /* nothing has been send out yet, send a 500 */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; } else { /* response might have been already started, kill the connection */ connection_set_state(srv, con, CON_STATE_ERROR); } joblist_append(srv, con); return HANDLER_FINISHED; } } if (revents & FDEVENT_OUT) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-out", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT || hctx->state == PROXY_STATE_WRITE) { /* we are allowed to send something out * * 1. in a unfinished connect() call * 2. in a unfinished write() call (long POST request) */ return mod_proxy_handle_subrequest(srv, con, p); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: out", hctx->state); } } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-hup", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT) { /* connect() -> EINPROGRESS -> HUP */ /** * what is proxy is doing if it can't reach the next hop ? * */ proxy_connection_close(srv, hctx); joblist_append(srv, con); con->http_status = 503; con->mode = DIRECT; return HANDLER_FINISHED; } con->file_finished = 1; proxy_connection_close(srv, hctx); joblist_append(srv, con); } else if (revents & FDEVENT_ERR) { /* kill all connections to the proxy process */ log_error_write(srv, __FILE__, __LINE__, "sd", "proxy-FDEVENT_ERR, but no HUP", revents); joblist_append(srv, con); proxy_connection_close(srv, hctx); } return HANDLER_FINISHED; }
static handler_t connection_handle_fdevent(server *srv, void *context, int revents) { connection *con = context; joblist_append(srv, con); if (con->srv_socket->is_ssl) { /* ssl may read and write for both reads and writes */ if (revents & (FDEVENT_IN | FDEVENT_OUT)) { con->is_readable = 1; con->is_writable = 1; } } else { if (revents & FDEVENT_IN) { con->is_readable = 1; } if (revents & FDEVENT_OUT) { con->is_writable = 1; /* we don't need the event twice */ } } if (con->state == CON_STATE_READ) { connection_handle_read_state(srv, con); } if (con->state == CON_STATE_WRITE && !chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); } } if (con->state == CON_STATE_CLOSE) { /* flush the read buffers */ connection_read_for_eos(srv, con); } /* attempt (above) to read data in kernel socket buffers * prior to handling FDEVENT_HUP and FDEVENT_ERR */ if ((revents & ~(FDEVENT_IN | FDEVENT_OUT)) && con->state != CON_STATE_ERROR) { if (con->state == CON_STATE_CLOSE) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } else if (revents & FDEVENT_HUP) { if (fdevent_is_tcp_half_closed(con->fd)) { con->keep_alive = 0; } else { connection_set_state(srv, con, CON_STATE_ERROR); } } else if (revents & FDEVENT_ERR) { /* error, connection reset */ connection_set_state(srv, con, CON_STATE_ERROR); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ???", revents); } } return HANDLER_FINISHED; }
static handler_t proxy_handle_fdevent(server *srv, void *ctx, int revents) { handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; plugin_data *p = hctx->plugin_data; if ((revents & FDEVENT_IN) && hctx->state == PROXY_STATE_READ) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-in", hctx->state); } switch (proxy_demux_response(srv, hctx)) { case 0: break; case 1: /* we are done */ proxy_connection_close(srv, hctx); joblist_append(srv, con); return HANDLER_FINISHED; case -1: if (con->file_started == 0) { /* nothing has been send out yet, send a 500 */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; } else { /* response might have been already started, kill the connection */ connection_set_state(srv, con, CON_STATE_ERROR); } joblist_append(srv, con); return HANDLER_FINISHED; } } if (revents & FDEVENT_OUT) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-out", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT) { int socket_error; socklen_t socket_error_len = sizeof(socket_error); /* we don't need it anymore */ fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); hctx->fde_ndx = -1; /* try to finish the connect() */ if (0 != getsockopt(hctx->fd, SOL_SOCKET, SO_ERROR, &socket_error, &socket_error_len)) { log_error_write(srv, __FILE__, __LINE__, "ss", "getsockopt failed:", strerror(errno)); joblist_append(srv, con); return HANDLER_FINISHED; } if (socket_error != 0) { log_error_write(srv, __FILE__, __LINE__, "ss", "establishing connection failed:", strerror(socket_error), "port:", hctx->host->port); joblist_append(srv, con); return HANDLER_FINISHED; } if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "s", "proxy - connect - delayed success"); } proxy_set_state(srv, hctx, PROXY_STATE_PREPARE_WRITE); } if (hctx->state == PROXY_STATE_PREPARE_WRITE || hctx->state == PROXY_STATE_WRITE) { /* we are allowed to send something out * * 1. after a just finished connect() call * 2. in a unfinished write() call (long POST request) */ return mod_proxy_handle_subrequest(srv, con, p); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: out", hctx->state); } } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-hup", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT) { /* connect() -> EINPROGRESS -> HUP */ /** * what is proxy is doing if it can't reach the next hop ? * */ if (hctx->host) { hctx->host->is_disabled = 1; hctx->host->disable_ts = srv->cur_ts; log_error_write(srv, __FILE__, __LINE__, "sbdd", "proxy-server disabled:", hctx->host->host, hctx->host->port, hctx->fd); /* disable this server */ hctx->host->is_disabled = 1; hctx->host->disable_ts = srv->cur_ts; proxy_connection_close(srv, hctx); /* reset the enviroment and restart the sub-request */ buffer_reset(con->physical.path); con->mode = DIRECT; joblist_append(srv, con); } else { proxy_connection_close(srv, hctx); joblist_append(srv, con); con->mode = DIRECT; con->http_status = 503; } return HANDLER_FINISHED; } if (!con->file_finished) { http_chunk_append_mem(srv, con, NULL, 0); } con->file_finished = 1; proxy_connection_close(srv, hctx); joblist_append(srv, con); } else if (revents & FDEVENT_ERR) { /* kill all connections to the proxy process */ log_error_write(srv, __FILE__, __LINE__, "sd", "proxy-FDEVENT_ERR, but no HUP", revents); con->file_finished = 1; joblist_append(srv, con); proxy_connection_close(srv, hctx); } return HANDLER_FINISHED; }
//向客户端发送数据,被connection_handle_write函数调用 int network_write_chunkqueue(server *srv, connection *con, chunkqueue *cq) { int ret = -1; off_t written = 0; #ifdef TCP_CORK int corked = 0; #endif server_socket *srv_socket = con->srv_socket; //达到设定的最大数据传输率 if (con->conf.global_kbytes_per_second && *(con->conf.global_bytes_per_second_cnt_ptr) > con->conf.global_kbytes_per_second * 1024) { con->traffic_limit_reached = 1; joblist_append(srv, con); return 1; } written = cq->bytes_out; //设置套接字TCP_CORK选项 #ifdef TCP_CORK /* Linux: put a cork into the socket as we want to combine the write() calls * but only if we really have multiple chunks */ if (cq->first && cq->first->next) { corked = 1; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif //调用数据发送函数 if (srv_socket->is_ssl) { #ifdef USE_OPENSSL ret = srv->network_ssl_backend_write(srv, con, con->ssl, cq); #endif } else { //函数返回本次被发送完在cp链的chunk数目 ret = srv->network_backend_write(srv, con, con->fd, cq); } //清理本次被发送完在cp链的chunk,并判断是否全部chunk都发送完来设置ret if (ret >= 0) { chunkqueue_remove_finished_chunks(cq); ret = chunkqueue_is_empty(cq) ? 0 : 1; } //取消套接字TCP_CORK选项 #ifdef TCP_CORK if (corked) { corked = 0; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif written = cq->bytes_out - written; con->bytes_written += written; con->bytes_written_cur_second += written; *(con->conf.global_bytes_per_second_cnt_ptr) += written; if (con->conf.kbytes_per_second && (con->bytes_written_cur_second > con->conf.kbytes_per_second * 1024)) { /* we reached the traffic limit */ con->traffic_limit_reached = 1; joblist_append(srv, con); } return ret; }