static int connection_handle_write(server *srv, connection *con) { switch(network_write_chunkqueue(srv, con, con->write_queue, MAX_WRITE_LIMIT)) { case 0: con->write_request_ts = srv->cur_ts; if (con->file_finished) { connection_set_state(srv, con, CON_STATE_RESPONSE_END); } break; case -1: /* error on our side */ log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: write failed on fd", con->fd); connection_set_state(srv, con, CON_STATE_ERROR); break; case -2: /* remote close */ connection_set_state(srv, con, CON_STATE_ERROR); break; case 1: con->write_request_ts = srv->cur_ts; con->is_writable = 0; /* not finished yet -> WRITE */ break; } return 0; }
static void connection_handle_response_end_state(server *srv, connection *con) { /* log the request */ /* (even if error, connection dropped, still write to access log if http_status) */ if (con->http_status) { plugins_call_handle_request_done(srv, con); } if (con->state != CON_STATE_ERROR) srv->con_written++; if (con->request.content_length != con->request_content_queue->bytes_in || con->state == CON_STATE_ERROR) { /* request body is present and has not been read completely */ con->keep_alive = 0; } if (con->keep_alive) { connection_reset(srv, con); #if 0 con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; #endif connection_set_state(srv, con, CON_STATE_REQUEST_START); } else { connection_handle_shutdown(srv, con); } }
connection *connection_accepted(server *srv, server_socket *srv_socket, sock_addr *cnt_addr, int cnt) { connection *con; srv->cur_fds++; /* ok, we have the connection, register it */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "appected()", cnt); #endif srv->con_opened++; con = connections_get_new_connection(srv); con->fd = cnt; con->fde_ndx = -1; fdevent_register(srv->ev, con->fd, connection_handle_fdevent, con); connection_set_state(srv, con, CON_STATE_REQUEST_START); con->connection_start = srv->cur_ts; con->dst_addr = *cnt_addr; buffer_copy_string(con->dst_addr_buf, inet_ntop_cache_get_ip(srv, &(con->dst_addr))); con->srv_socket = srv_socket; if (-1 == fdevent_fcntl_set_nb_cloexec_sock(srv->ev, con->fd)) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno)); connection_close(srv, con); return NULL; } #ifdef USE_OPENSSL /* connect FD to SSL */ if (srv_socket->is_ssl) { if (NULL == (con->ssl = SSL_new(srv_socket->ssl_ctx))) { log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); connection_close(srv, con); return NULL; } con->renegotiations = 0; SSL_set_app_data(con->ssl, con); SSL_set_accept_state(con->ssl); if (1 != (SSL_set_fd(con->ssl, cnt))) { log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); connection_close(srv, con); return NULL; } } #endif return con; }
int connection_close(server *srv, connection *con) { #ifdef USE_OPENSSL server_socket *srv_sock = con->srv_socket; #endif #ifdef USE_OPENSSL if (srv_sock->is_ssl) { if (con->ssl) SSL_free(con->ssl); con->ssl = NULL; } #endif fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); fdevent_unregister(srv->ev, con->fd); #ifdef __WIN32 if (closesocket(con->fd)) { log_error_write(srv, __FILE__, __LINE__, "sds", "(warning) close:", con->fd, strerror(errno)); } #else if (close(con->fd)) { log_error_write(srv, __FILE__, __LINE__, "sds", "(warning) close:", con->fd, strerror(errno)); } #endif srv->cur_fds--; #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "closed()", con->fd); #endif connection_del(srv, con); connection_set_state(srv, con, CON_STATE_CONNECT); return 0; }
static handler_t proxy_handle_fdevent(server *srv, void *ctx, int revents) { handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; plugin_data *p = hctx->plugin_data; if ((revents & FDEVENT_IN) && hctx->state == PROXY_STATE_READ) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-in", hctx->state); } switch (proxy_demux_response(srv, hctx)) { case 0: break; case 1: /* we are done */ proxy_connection_close(srv, hctx); joblist_append(srv, con); return HANDLER_FINISHED; case -1: if (con->file_started == 0) { /* nothing has been send out yet, send a 500 */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; } else { /* response might have been already started, kill the connection */ connection_set_state(srv, con, CON_STATE_ERROR); } joblist_append(srv, con); return HANDLER_FINISHED; } } if (revents & FDEVENT_OUT) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-out", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT) { int socket_error; socklen_t socket_error_len = sizeof(socket_error); /* we don't need it anymore */ fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); hctx->fde_ndx = -1; /* try to finish the connect() */ if (0 != getsockopt(hctx->fd, SOL_SOCKET, SO_ERROR, &socket_error, &socket_error_len)) { log_error_write(srv, __FILE__, __LINE__, "ss", "getsockopt failed:", strerror(errno)); joblist_append(srv, con); return HANDLER_FINISHED; } if (socket_error != 0) { log_error_write(srv, __FILE__, __LINE__, "ss", "establishing connection failed:", strerror(socket_error), "port:", hctx->host->port); joblist_append(srv, con); return HANDLER_FINISHED; } if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "s", "proxy - connect - delayed success"); } proxy_set_state(srv, hctx, PROXY_STATE_PREPARE_WRITE); } if (hctx->state == PROXY_STATE_PREPARE_WRITE || hctx->state == PROXY_STATE_WRITE) { /* we are allowed to send something out * * 1. after a just finished connect() call * 2. in a unfinished write() call (long POST request) */ return mod_proxy_handle_subrequest(srv, con, p); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: out", hctx->state); } } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-hup", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT) { /* connect() -> EINPROGRESS -> HUP */ /** * what is proxy is doing if it can't reach the next hop ? * */ if (hctx->host) { hctx->host->is_disabled = 1; hctx->host->disable_ts = srv->cur_ts; log_error_write(srv, __FILE__, __LINE__, "sbdd", "proxy-server disabled:", hctx->host->host, hctx->host->port, hctx->fd); /* disable this server */ hctx->host->is_disabled = 1; hctx->host->disable_ts = srv->cur_ts; proxy_connection_close(srv, hctx); /* reset the enviroment and restart the sub-request */ buffer_reset(con->physical.path); con->mode = DIRECT; joblist_append(srv, con); } else { proxy_connection_close(srv, hctx); joblist_append(srv, con); con->mode = DIRECT; con->http_status = 503; } return HANDLER_FINISHED; } if (!con->file_finished) { http_chunk_append_mem(srv, con, NULL, 0); } con->file_finished = 1; proxy_connection_close(srv, hctx); joblist_append(srv, con); } else if (revents & FDEVENT_ERR) { /* kill all connections to the proxy process */ log_error_write(srv, __FILE__, __LINE__, "sd", "proxy-FDEVENT_ERR, but no HUP", revents); con->file_finished = 1; joblist_append(srv, con); proxy_connection_close(srv, hctx); } return HANDLER_FINISHED; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } chunkqueue_remove_finished_chunks(cq); /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { size_t i; size_t len = buffer_string_length(c->mem) - c->offset; const char *b = c->mem->ptr + c->offset; for (i = 0; i < len; ++i) { char ch = b[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { size_t bblen = buffer_string_length(cc->mem) - cc->offset; const char *bb = c->mem->ptr + cc->offset; for ( ; j < bblen; j++) { ch = bb[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { size_t len = buffer_string_length(c->mem) - c->offset; if (c == last_chunk) { len = last_offset; } buffer_append_string_len(con->request.request, c->mem->ptr + c->offset, len); c->offset += len; if (c == last_chunk) break; } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 414"); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: if (0 != chunkqueue_steal_with_tempfiles(srv, dst_cq, cq, con->request.content_length - dst_cq->bytes_in )) { con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { connection_set_state(srv, con, CON_STATE_ERROR); } chunkqueue_remove_finished_chunks(cq); return 0; }
static handler_t connection_handle_fdevent(server *srv, void *context, int revents) { connection *con = context; joblist_append(srv, con); if (con->srv_socket->is_ssl) { /* ssl may read and write for both reads and writes */ if (revents & (FDEVENT_IN | FDEVENT_OUT)) { con->is_readable = 1; con->is_writable = 1; } } else { if (revents & FDEVENT_IN) { con->is_readable = 1; } if (revents & FDEVENT_OUT) { con->is_writable = 1; /* we don't need the event twice */ } } if (revents & ~(FDEVENT_IN | FDEVENT_OUT)) { /* looks like an error */ /* FIXME: revents = 0x19 still means that we should read from the queue */ if (revents & FDEVENT_HUP) { if (con->state == CON_STATE_CLOSE) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } else { /* sigio reports the wrong event here * * there was no HUP at all */ #ifdef USE_LINUX_SIGIO if (srv->ev->in_sigio == 1) { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> HUP", con->fd); } else { connection_set_state(srv, con, CON_STATE_ERROR); } #else connection_set_state(srv, con, CON_STATE_ERROR); #endif } } else if (revents & FDEVENT_ERR) { /* error, connection reset, whatever... we don't want to spam the logfile */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ERR", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ???", revents); } } if (con->state == CON_STATE_READ || con->state == CON_STATE_READ_POST) { connection_handle_read_state(srv, con); } if (con->state == CON_STATE_WRITE && !chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); } } if (con->state == CON_STATE_CLOSE) { /* flush the read buffers */ int len; char buf[1024]; len = read(con->fd, buf, sizeof(buf)); if (len == 0 || (len < 0 && errno != EAGAIN && errno != EINTR) ) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } } return HANDLER_FINISHED; }
/* 0: everything ok, -1: error, -2: con closed */ static int connection_handle_read(server *srv, connection *con) { int len; buffer *b; int toread, read_offset; if (con->conf.is_ssl) { return connection_handle_read_ssl(srv, con); } b = (NULL != con->read_queue->last) ? con->read_queue->last->mem : NULL; /* default size for chunks is 4kb; only use bigger chunks if FIONREAD tells * us more than 4kb is available * if FIONREAD doesn't signal a big chunk we fill the previous buffer * if it has >= 1kb free */ #if defined(__WIN32) if (NULL == b || b->size - b->used < 1024) { b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, 4 * 1024); } read_offset = (b->used == 0) ? 0 : b->used - 1; len = recv(con->fd, b->ptr + read_offset, b->size - 1 - read_offset, 0); #else #ifdef HAVE_LIBMTCP /* toread = MAX_READ_LIMIT; */ if (mtcp_socket_ioctl(srv->mctx, con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { #else if (ioctl(con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { #endif if (NULL == b || b->size - b->used < 1024) { b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, 4 * 1024); } } else { if (toread > MAX_READ_LIMIT) toread = MAX_READ_LIMIT; b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, toread + 1); } read_offset = (b->used == 0) ? 0 : b->used - 1; #ifdef HAVE_LIBMTCP len = mtcp_read(srv->mctx, con->fd, b->ptr + read_offset, b->size - 1 - read_offset); #else len = read(con->fd, b->ptr + read_offset, b->size - 1 - read_offset); #endif #endif if (len < 0) { con->is_readable = 0; if (errno == EAGAIN) return 0; if (errno == EINTR) { /* we have been interrupted before we could read */ con->is_readable = 1; return 0; } if (errno != ECONNRESET) { /* expected for keep-alive */ log_error_write(srv, __FILE__, __LINE__, "ssd", "connection closed - read failed: ", strerror(errno), errno); } connection_set_state(srv, con, CON_STATE_ERROR); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ /* pipelining */ return -2; } else if ((size_t)len < b->size - 1) { /* we got less then expected, wait for the next fd-event */ con->is_readable = 0; } if (b->used > 0) b->used--; b->used += len; b->ptr[b->used++] = '\0'; con->bytes_read += len; #if 0 dump_packet(b->ptr, len); #endif return 0; } static int connection_handle_write_prepare(server *srv, connection *con) { if (con->mode == DIRECT) { /* static files */ switch(con->request.http_method) { case HTTP_METHOD_GET: case HTTP_METHOD_POST: case HTTP_METHOD_HEAD: case HTTP_METHOD_PUT: case HTTP_METHOD_PATCH: case HTTP_METHOD_MKCOL: case HTTP_METHOD_DELETE: case HTTP_METHOD_COPY: case HTTP_METHOD_MOVE: case HTTP_METHOD_PROPFIND: case HTTP_METHOD_PROPPATCH: case HTTP_METHOD_LOCK: case HTTP_METHOD_UNLOCK: break; case HTTP_METHOD_OPTIONS: /* * 400 is coming from the request-parser BEFORE uri.path is set * 403 is from the response handler when noone else catched it * * */ if ((!con->http_status || con->http_status == 200) && con->uri.path->used && con->uri.path->ptr[0] != '*') { response_header_insert(srv, con, CONST_STR_LEN("Allow"), CONST_STR_LEN("OPTIONS, GET, HEAD, POST")); con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED; con->parsed_response &= ~HTTP_CONTENT_LENGTH; con->http_status = 200; con->file_finished = 1; chunkqueue_reset(con->write_queue); } break; default: switch(con->http_status) { case 400: /* bad request */ case 401: /* authorization required */ case 414: /* overload request header */ case 505: /* unknown protocol */ case 207: /* this was webdav */ break; default: con->http_status = 501; break; } break; } } if (con->http_status == 0) { con->http_status = 403; } switch(con->http_status) { case 204: /* class: header only */ case 205: case 304: /* disable chunked encoding again as we have no body */ con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED; con->parsed_response &= ~HTTP_CONTENT_LENGTH; chunkqueue_reset(con->write_queue); con->file_finished = 1; break; default: /* class: header + body */ if (con->mode != DIRECT) break; /* only custom body for 4xx and 5xx */ if (con->http_status < 400 || con->http_status >= 600) break; con->file_finished = 0; buffer_reset(con->physical.path); /* try to send static errorfile */ if (!buffer_is_empty(con->conf.errorfile_prefix)) { stat_cache_entry *sce = NULL; buffer_copy_string_buffer(con->physical.path, con->conf.errorfile_prefix); buffer_append_long(con->physical.path, con->http_status); buffer_append_string_len(con->physical.path, CONST_STR_LEN(".html")); if (HANDLER_ERROR != stat_cache_get_entry(srv, con, con->physical.path, &sce)) { con->file_finished = 1; http_chunk_append_file(srv, con, con->physical.path, 0, sce->st.st_size); response_header_overwrite(srv, con, CONST_STR_LEN("Content-Type"), CONST_BUF_LEN(sce->content_type)); } } if (!con->file_finished) { buffer *b; buffer_reset(con->physical.path); con->file_finished = 1; b = chunkqueue_get_append_buffer(con->write_queue); /* build default error-page */ buffer_copy_string_len(b, CONST_STR_LEN( "<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n" "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n" " \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n" "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n" " <head>\n" " <title>")); buffer_append_long(b, con->http_status); buffer_append_string_len(b, CONST_STR_LEN(" - ")); buffer_append_string(b, get_http_status_name(con->http_status)); buffer_append_string_len(b, CONST_STR_LEN( "</title>\n" " </head>\n" " <body>\n" " <h1>")); buffer_append_long(b, con->http_status); buffer_append_string_len(b, CONST_STR_LEN(" - ")); buffer_append_string(b, get_http_status_name(con->http_status)); buffer_append_string_len(b, CONST_STR_LEN("</h1>\n" " </body>\n" "</html>\n" )); response_header_overwrite(srv, con, CONST_STR_LEN("Content-Type"), CONST_STR_LEN("text/html")); } break; } if (con->file_finished) { /* we have all the content and chunked encoding is not used, set a content-length */ if ((!(con->parsed_response & HTTP_CONTENT_LENGTH)) && (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) == 0) { off_t qlen = chunkqueue_length(con->write_queue); /** * The Content-Length header only can be sent if we have content: * - HEAD doesn't have a content-body (but have a content-length) * - 1xx, 204 and 304 don't have a content-body (RFC 2616 Section 4.3) * * Otherwise generate a Content-Length header as chunked encoding is not * available */ if ((con->http_status >= 100 && con->http_status < 200) || con->http_status == 204 || con->http_status == 304) { data_string *ds; /* no Content-Body, no Content-Length */ if (NULL != (ds = (data_string*) array_get_element(con->response.headers, "Content-Length"))) { buffer_reset(ds->value); /* Headers with empty values are ignored for output */ } } else if (qlen > 0 || con->request.http_method != HTTP_METHOD_HEAD) { /* qlen = 0 is important for Redirects (301, ...) as they MAY have * a content. Browsers are waiting for a Content otherwise */ buffer_copy_off_t(srv->tmp_buf, qlen); response_header_overwrite(srv, con, CONST_STR_LEN("Content-Length"), CONST_BUF_LEN(srv->tmp_buf)); } } } else { /** * the file isn't finished yet, but we have all headers * * to get keep-alive we either need: * - Content-Length: ... (HTTP/1.0 and HTTP/1.0) or * - Transfer-Encoding: chunked (HTTP/1.1) */ if (((con->parsed_response & HTTP_CONTENT_LENGTH) == 0) && ((con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) == 0)) { con->keep_alive = 0; } /** * if the backend sent a Connection: close, follow the wish * * NOTE: if the backend sent Connection: Keep-Alive, but no Content-Length, we * will close the connection. That's fine. We can always decide the close * the connection * * FIXME: to be nice we should remove the Connection: ... */ if (con->parsed_response & HTTP_CONNECTION) { /* a subrequest disable keep-alive although the client wanted it */ if (con->keep_alive && !con->response.keep_alive) { con->keep_alive = 0; } } } if (con->request.http_method == HTTP_METHOD_HEAD) { /** * a HEAD request has the same as a GET * without the content */ con->file_finished = 1; chunkqueue_reset(con->write_queue); con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED; } http_response_write_header(srv, con); return 0; } static int connection_handle_write(server *srv, connection *con) { switch(network_write_chunkqueue(srv, con, con->write_queue, MAX_WRITE_LIMIT)) { case 0: con->write_request_ts = srv->cur_ts; if (con->file_finished) { connection_set_state(srv, con, CON_STATE_RESPONSE_END); joblist_append(srv, con); } break; case -1: /* error on our side */ log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: write failed on fd", con->fd); connection_set_state(srv, con, CON_STATE_ERROR); joblist_append(srv, con); break; case -2: /* remote close */ connection_set_state(srv, con, CON_STATE_ERROR); joblist_append(srv, con); break; case 1: con->write_request_ts = srv->cur_ts; con->is_writable = 0; /* not finished yet -> WRITE */ break; } return 0; } connection *connection_init(server *srv) { connection *con; UNUSED(srv); con = calloc(1, sizeof(*con)); con->fd = 0; con->ndx = -1; con->fde_ndx = -1; con->bytes_written = 0; con->bytes_read = 0; con->bytes_header = 0; con->loops_per_request = 0; #define CLEAN(x) \ con->x = buffer_init(); CLEAN(request.uri); CLEAN(request.request_line); CLEAN(request.request); CLEAN(request.pathinfo); CLEAN(request.orig_uri); CLEAN(uri.scheme); CLEAN(uri.authority); CLEAN(uri.path); CLEAN(uri.path_raw); CLEAN(uri.query); CLEAN(physical.doc_root); CLEAN(physical.path); CLEAN(physical.basedir); CLEAN(physical.rel_path); CLEAN(physical.etag); CLEAN(parse_request); CLEAN(authed_user); CLEAN(server_name); CLEAN(error_handler); CLEAN(dst_addr_buf); #if defined USE_OPENSSL && ! defined OPENSSL_NO_TLSEXT CLEAN(tlsext_server_name); #endif #undef CLEAN con->write_queue = chunkqueue_init(); con->read_queue = chunkqueue_init(); con->request_content_queue = chunkqueue_init(); chunkqueue_set_tempdirs(con->request_content_queue, srv->srvconf.upload_tempdirs); con->request.headers = array_init(); con->response.headers = array_init(); con->environment = array_init(); /* init plugin specific connection structures */ con->plugin_ctx = calloc(1, (srv->plugins.used + 1) * sizeof(void *)); con->cond_cache = calloc(srv->config_context->used, sizeof(cond_cache_t)); config_setup_connection(srv, con); return con; }
/* 0: everything ok, -1: error, -2: con closed */ static int connection_handle_read(server *srv, connection *con) { int len; char *mem = NULL; size_t mem_len = 0; int toread; if (con->srv_socket->is_ssl) { return connection_handle_read_ssl(srv, con); } /* default size for chunks is 4kb; only use bigger chunks if FIONREAD tells * us more than 4kb is available * if FIONREAD doesn't signal a big chunk we fill the previous buffer * if it has >= 1kb free */ #if defined(__WIN32) chunkqueue_get_memory(con->read_queue, &mem, &mem_len, 0, 4096); len = recv(con->fd, mem, mem_len, 0); #else if (ioctl(con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { if (toread > MAX_READ_LIMIT) toread = MAX_READ_LIMIT; } else { toread = 4096; } chunkqueue_get_memory(con->read_queue, &mem, &mem_len, 0, toread); len = read(con->fd, mem, mem_len); #endif chunkqueue_use_memory(con->read_queue, len > 0 ? len : 0); if (len < 0) { con->is_readable = 0; if (errno == EAGAIN) return 0; if (errno == EINTR) { /* we have been interrupted before we could read */ con->is_readable = 1; return 0; } if (errno != ECONNRESET) { /* expected for keep-alive */ log_error_write(srv, __FILE__, __LINE__, "ssd", "connection closed - read failed: ", strerror(errno), errno); } connection_set_state(srv, con, CON_STATE_ERROR); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ /* pipelining */ return -2; } else if (len != (ssize_t) mem_len) { /* we got less then expected, wait for the next fd-event */ con->is_readable = 0; } con->bytes_read += len; #if 0 dump_packet(b->ptr, len); #endif return 0; }
static void connection_handle_shutdown(server *srv, connection *con) { int r; #ifdef USE_OPENSSL server_socket *srv_sock = con->srv_socket; if (srv_sock->is_ssl && SSL_is_init_finished(con->ssl)) { int ret, ssl_r; unsigned long err; ERR_clear_error(); switch ((ret = SSL_shutdown(con->ssl))) { case 1: /* ok */ break; case 0: /* wait for fd-event * * FIXME: wait for fdevent and call SSL_shutdown again * */ ERR_clear_error(); if (-1 != (ret = SSL_shutdown(con->ssl))) break; /* fall through */ default: switch ((ssl_r = SSL_get_error(con->ssl, ret))) { case SSL_ERROR_ZERO_RETURN: break; case SSL_ERROR_WANT_WRITE: /*con->is_writable = -1;*//*(no effect; shutdown() called below)*/ case SSL_ERROR_WANT_READ: break; case SSL_ERROR_SYSCALL: /* perhaps we have error waiting in our error-queue */ if (0 != (err = ERR_get_error())) { do { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } while((err = ERR_get_error())); } else if (errno != 0) { /* ssl bug (see lighttpd ticket #2213): sometimes errno == 0 */ switch(errno) { case EPIPE: case ECONNRESET: break; default: log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL (error):", ssl_r, ret, errno, strerror(errno)); break; } } break; default: while((err = ERR_get_error())) { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } break; } } ERR_clear_error(); } #endif switch(r = plugins_call_handle_connection_close(srv, con)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r); break; } srv->con_closed++; connection_reset(srv, con); /* plugins should have cleaned themselves up */ for (size_t i = 0; i < srv->plugins.used; ++i) { plugin *p = ((plugin **)(srv->plugins.ptr))[i]; plugin_data *pd = p->data; if (!pd || NULL == con->plugin_ctx[pd->id]) continue; log_error_write(srv, __FILE__, __LINE__, "sb", "missing cleanup in", p->name); con->plugin_ctx[pd->id] = NULL; } /* close the connection */ if ((0 == shutdown(con->fd, SHUT_WR))) { con->close_timeout_ts = srv->cur_ts; connection_set_state(srv, con, CON_STATE_CLOSE); if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sd", "shutdown for fd", con->fd); } } else { connection_close(srv, con); } }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } /* the last chunk might be empty */ for (c = cq->first; c;) { if (cq->first == c && c->mem->used == 0) { /* the first node is empty */ /* ... and it is empty, move it to unused */ cq->first = c->next; if (cq->first == NULL) cq->last = NULL; c->next = cq->unused; cq->unused = c; cq->unused_chunks++; c = cq->first; } else if (c->next && c->next->mem->used == 0) { chunk *fc; /* next node is the last one */ /* ... and it is empty, move it to unused */ fc = c->next; c->next = fc->next; fc->next = cq->unused; cq->unused = fc; cq->unused_chunks++; /* the last node was empty */ if (c->next == NULL) { cq->last = c; } c = c->next; } else { c = c->next; } } /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { buffer b; size_t i; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (b.used > 0) b.used--; /* buffer "used" includes terminating zero */ for (i = 0; i < b.used; i++) { char ch = b.ptr[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { buffer bb; bb.ptr = cc->mem->ptr + cc->offset; bb.used = cc->mem->used - cc->offset; if (bb.used > 0) bb.used--; /* buffer "used" includes terminating zero */ for ( ; j < bb.used; j++) { ch = bb.ptr[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { buffer b; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (c == last_chunk) { b.used = last_offset + 1; } buffer_append_string_buffer(con->request.request, &b); if (c == last_chunk) { c->offset += last_offset; break; } else { /* the whole packet was copied */ c->offset = c->mem->used - 1; } } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 414"); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: // xb ready to mod for (c = cq->first; c && (dst_cq->bytes_in != (off_t)con->request.content_length); c = c->next) { off_t weWant, weHave, toRead; weWant = con->request.content_length - dst_cq->bytes_in; assert(c->mem->used); weHave = c->mem->used - c->offset - 1; toRead = weHave > weWant ? weWant : weHave; data_string *ds_pi = (data_string *)array_get_element(con->request.headers, "X-Pi-Upload"); if (ds_pi) { float id; sscanf(ds_pi->value->ptr, "%f", &id); if (id != con->upload_id) { log_error_write(srv, __FILE__, __LINE__, "s", "newfile"); char path[256]; sprintf(path, "/tmp/upload_vid/%f", id); int fd = open(path, O_CREAT | O_WRONLY, 0777); con->upload_fd = fd; con->upload_id = id; } log_error_write(srv, __FILE__, __LINE__, "sboood", "pi-upload:", ds_pi->value, weHave, (off_t)dst_cq->bytes_in, (off_t)con->request.content_length, (int)ds_pi); write(con->upload_fd, c->mem->ptr + c->offset, toRead); /* dst_cq->bytes_in += toRead; if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } continue; */ } /* the new way, copy everything into a chunkqueue whcih might use tempfiles */ if (con->request.content_length > 64 * 1024) { chunk *dst_c = NULL; /* copy everything to max 1Mb sized tempfiles */ /* * if the last chunk is * - smaller than 1Mb (size < 1Mb) * - not read yet (offset == 0) * -> append to it * otherwise * -> create a new chunk * * */ // xb mod: my upload file if (dst_cq->last && dst_cq->last->type == FILE_CHUNK && dst_cq->last->file.is_temp && dst_cq->last->offset == 0) { /* ok, take the last chunk for our job */ // xb mod: my upload file if (0) { /* dst_c = chunkqueue_get_append_tempfile(dst_cq); close(dst_c->file.fd); buffer *path = buffer_init_string("/var/cache/lighttpd/uploads/my-upload-"); buffer_append_string_buffer(path, ds->value); dst_c->file.fd = open(path->ptr, O_WRONLY | O_CREAT, 0777); log_error_write(srv, __FILE__, __LINE__, "sb", "pi-upload: path", path); #ifdef FD_CLOEXEC fcntl(dst_c->file.fd, F_SETFD, FD_CLOEXEC); #endif buffer_free(path); */ } else { if (dst_cq->last->file.length < 1 * 1024 * 1024) { dst_c = dst_cq->last; if (dst_c->file.fd == -1) { /* this should not happen as we cache the fd, but you never know */ dst_c->file.fd = open(dst_c->file.name->ptr, O_WRONLY | O_APPEND); #ifdef FD_CLOEXEC fcntl(dst_c->file.fd, F_SETFD, FD_CLOEXEC); #endif } } else { /* the chunk is too large now, close it */ dst_c = dst_cq->last; if (dst_c->file.fd != -1) { close(dst_c->file.fd); dst_c->file.fd = -1; } dst_c = chunkqueue_get_append_tempfile(dst_cq); } } } else { dst_c = chunkqueue_get_append_tempfile(dst_cq); } /* we have a chunk, let's write to it */ if (dst_c->file.fd == -1) { /* we don't have file to write to, * EACCES might be one reason. * * Instead of sending 500 we send 413 and say the request is too large * */ log_error_write(srv, __FILE__, __LINE__, "sbs", "denying upload as opening to temp-file for upload failed:", dst_c->file.name, strerror(errno)); con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } // xb mod //if (toRead != write(dst_c->file.fd, c->mem->ptr + c->offset, toRead)) { if (!ds_pi && toRead != write(dst_c->file.fd, c->mem->ptr + c->offset, toRead)) { /* write failed for some reason ... disk full ? */ log_error_write(srv, __FILE__, __LINE__, "sbs", "denying upload as writing to file failed:", dst_c->file.name, strerror(errno)); con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); close(dst_c->file.fd); dst_c->file.fd = -1; break; } dst_c->file.length += toRead; if (dst_cq->bytes_in + toRead == (off_t)con->request.content_length) { /* we read everything, close the chunk */ close(dst_c->file.fd); dst_c->file.fd = -1; } } else { buffer *b; if (dst_cq->last && dst_cq->last->type == MEM_CHUNK) { b = dst_cq->last->mem; } else { b = chunkqueue_get_append_buffer(dst_cq); /* prepare buffer size for remaining POST data; is < 64kb */ buffer_prepare_copy(b, con->request.content_length - dst_cq->bytes_in + 1); } buffer_append_string_len(b, c->mem->ptr + c->offset, toRead); } c->offset += toRead; dst_cq->bytes_in += toRead; } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); log_error_write(srv, __FILE__, __LINE__, "s", "endfile: ok"); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "s", "endfile: error"); } chunkqueue_remove_finished_chunks(cq); return 0; }
static handler_t proxy_handle_fdevent(void *s, void *ctx, int revents) { server *srv = (server *)s; handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; plugin_data *p = hctx->plugin_data; if ((revents & FDEVENT_IN) && hctx->state == PROXY_STATE_READ) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-in", hctx->state); } switch (proxy_demux_response(srv, hctx)) { case 0: break; case 1: hctx->host->usage--; /* we are done */ proxy_connection_close(srv, hctx); joblist_append(srv, con); return HANDLER_FINISHED; case -1: if (con->file_started == 0) { /* nothing has been send out yet, send a 500 */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; } else { /* response might have been already started, kill the connection */ connection_set_state(srv, con, CON_STATE_ERROR); } joblist_append(srv, con); return HANDLER_FINISHED; } } if (revents & FDEVENT_OUT) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-out", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT || hctx->state == PROXY_STATE_WRITE) { /* we are allowed to send something out * * 1. in a unfinished connect() call * 2. in a unfinished write() call (long POST request) */ return mod_proxy_handle_subrequest(srv, con, p); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: out", hctx->state); } } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "sd", "proxy: fdevent-hup", hctx->state); } if (hctx->state == PROXY_STATE_CONNECT) { /* connect() -> EINPROGRESS -> HUP */ /** * what is proxy is doing if it can't reach the next hop ? * */ proxy_connection_close(srv, hctx); joblist_append(srv, con); con->http_status = 503; con->mode = DIRECT; return HANDLER_FINISHED; } con->file_finished = 1; proxy_connection_close(srv, hctx); joblist_append(srv, con); } else if (revents & FDEVENT_ERR) { /* kill all connections to the proxy process */ log_error_write(srv, __FILE__, __LINE__, "sd", "proxy-FDEVENT_ERR, but no HUP", revents); joblist_append(srv, con); proxy_connection_close(srv, hctx); } return HANDLER_FINISHED; }
/* ****************** * 程序的入口点 ***************** */ int main(int argc, char **argv) { server *srv = NULL; int print_config = 0; int test_config = 0; int i_am_root; int o; int num_childs = 0; int pid_fd = -1, fd; size_t i; #ifdef HAVE_SIGACTION struct sigaction act; #endif #ifdef HAVE_GETRLIMIT struct rlimit rlim; #endif #ifdef USE_ALARM struct itimerval interval; interval.it_interval.tv_sec = 1; interval.it_interval.tv_usec = 0; interval.it_value.tv_sec = 1; interval.it_value.tv_usec = 0; #endif /* * for nice %b handling in strfime() */ setlocale(LC_TIME, "C"); if (NULL == (srv = server_init())) { fprintf(stderr, "did this really happen?\n"); return -1; } /* * init structs done */ srv->srvconf.port = 0; // #ifdef HAVE_GETUID i_am_root = (getuid() == 0); #else i_am_root = 0; #endif //程序将被设置为守护进程。 srv->srvconf.dont_daemonize = 0; //处理参数。 while (-1 != (o = getopt(argc, argv, "f:m:hvVDpt"))) { switch (o) { case 'f': if (config_read(srv, optarg)) { server_free(srv); return -1; } break; case 'm': buffer_copy_string(srv->srvconf.modules_dir, optarg); break; case 'p': print_config = 1; break; case 't': test_config = 1; break; case 'D': srv->srvconf.dont_daemonize = 1; break; case 'v': show_version(); return 0; case 'V': show_features(); return 0; case 'h': show_help(); return 0; default: show_help(); server_free(srv); return -1; } } if (!srv->config_storage) { log_error_write(srv, __FILE__, __LINE__, "s", "No configuration available. Try using -f option."); server_free(srv); return -1; } if (print_config) { data_unset *dc = srv->config_context->data[0]; if (dc) { dc->print(dc, 0); fprintf(stdout, "\n"); } else { /* * shouldn't happend */ fprintf(stderr, "global config not found\n"); } } if (test_config) //没有进行任何测试。。。 { printf("Syntax OK\n"); } if (test_config || print_config) { server_free(srv); return 0; } /* * close stdin and stdout, as they are not needed * 关闭标准输入和标准输出。 */ openDevNull(STDIN_FILENO); openDevNull(STDOUT_FILENO); //设置为默认的配置。 if (0 != config_set_defaults(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "setting default values failed"); server_free(srv); return -1; } /* * UID handling */ #ifdef HAVE_GETUID //检查有效用户ID和有效组ID是否是0(root)。 if (!i_am_root && (geteuid() == 0 || getegid() == 0)) { /* * we are setuid-root * 程序的实际用户ID不是0,也就是程序不是由超级用户运行的,但是程序的有效用户ID * 或者有效组ID是超级用户(组),因此,程序可以访问任何文件而不受限制!这样很 * 不安全。因此程序退出并提示用户。 */ log_error_write(srv, __FILE__, __LINE__, "s", "Are you nuts ? Don't apply a SUID bit to this binary"); server_free(srv); return -1; } #endif /* * check document-root */ if (srv->config_storage[0]->document_root->used <= 1) { log_error_write(srv, __FILE__, __LINE__, "s", "document-root is not set\n"); server_free(srv); return -1; } /* * 加载插件 * * 插件是以动态链接库的形式存在的。在配置文件中,要配置好插件的链接库的 * 路径位置和库中函数的名称。在这个函数中,通过这些配置,获得函数的入口 * 地址。 * 可以看到,这个函数的调用是在整个程序的初始化阶段,而且只调用了这一次, * 因此,在服务器运行之前,要配置好所有的插件。 * 如果想增加插件,只能重启服务器。 * * 在实现plugins_load函数的时候,同时也有一个用于加载静态链接库的版本, * 但函数并没有什么实质性的实现。 * */ if (plugins_load(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "loading plugins finally failed"); plugins_free(srv); server_free(srv); return -1; } /* * open pid file BEFORE chroot * 打开pid文件,并将进程号写入pid文件。 */ if (srv->srvconf.pid_file->used) { if (-1 == (pid_fd = open(srv->srvconf.pid_file->ptr, O_WRONLY | O_CREAT | O_EXCL | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))) /** * O_EXCL和O_CREAT同时使用,测试文件是否存在,如果存在 * 则报错。 */ { //pid文件打开失败。。。 struct stat st; if (errno != EEXIST) //不是报文件已经存在的错误。 { log_error_write(srv, __FILE__, __LINE__, "sbs", "opening pid-file failed:", srv->srvconf.pid_file, strerror(errno)); return -1; } //pid文件已经存在,测试文件的状态。 if (0 != stat(srv->srvconf.pid_file->ptr, &st)) { log_error_write(srv, __FILE__, __LINE__, "sbs", "stating existing pid-file failed:", srv->srvconf.pid_file, strerror(errno)); } if (!S_ISREG(st.st_mode)) //pid文件是普通文件。 { log_error_write(srv, __FILE__, __LINE__, "sb", "pid-file exists and isn't regular file:", srv->srvconf.pid_file); return -1; } //重新打开pid文件。 //这里不在使用O_EXCL参数,由于pid文件已经存在且是普通文件,则覆盖原先的文件。 if (-1 == (pid_fd = open(srv->srvconf.pid_file->ptr, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))) { log_error_write(srv, __FILE__, __LINE__, "sbs", "opening pid-file failed:", srv->srvconf.pid_file, strerror(errno)); return -1; } } } if (srv->event_handler == FDEVENT_HANDLER_SELECT) { /* * select limits itself as it is a hard limit and will lead to a segfault * we add some safety * select的硬限制。减去200是为了增加安全性,防止出现段错误。 */ srv->max_fds = FD_SETSIZE - 200; } else { srv->max_fds = 4096; } //程序是在超级用户模式下运行的。 if (i_am_root) { struct group *grp = NULL; struct passwd *pwd = NULL; int use_rlimit = 1; #ifdef HAVE_VALGRIND_VALGRIND_H if (RUNNING_ON_VALGRIND) use_rlimit = 0; #endif #ifdef HAVE_GETRLIMIT /** * getrlimit和setrlimit函数用于查询和修改进程的资源限制。 * * include <sys/resource.h> * int getrlimit(int resource, struct rlimit *rlim); * int setrlimit(int resource, const struct rlimit *rlim); * 返回:若成功为0,出错为非0 * * 对这两个函数的每一次调用都指定一个资源以及一个指向下列结构的指针。 * * struct rlimit * { * rlim_t rlim_cur; //软限制:当前限制 * rlim_t rlim_max; //硬限制:rlimcur的最大值 * }; * * 这两个函数不属于POSIX.1,但SVR4和4.3+BSD提供它们。SVR4在上面的结构中使用基本系统数据类型rlim_t。 * 其它系统则将这两个成员定义为整型或长整型。 * * 程序中使用的参数RLIMIT_NOFILE:Specifies a value one greater than the maximum file descriptor * number that can be opened by this process. 设置最大的文件打开数,且实际打开的文件数要比这个数 * 小一。 * * 详细使用:man getrlimit */ if (0 != getrlimit(RLIMIT_NOFILE, &rlim)) //获得当前的文件打开数限制。 { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't get 'max filedescriptors'", strerror(errno)); return -1; } if (use_rlimit && srv->srvconf.max_fds) { /* * set rlimits. 设置限制。 */ rlim.rlim_cur = srv->srvconf.max_fds; //软限制。 rlim.rlim_max = srv->srvconf.max_fds; //硬限制。 if (0 != setrlimit(RLIMIT_NOFILE, &rlim)) { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't set 'max filedescriptors'", strerror(errno)); return -1; } } //根据实际设置情况,重新设置max_fds。 if (srv->event_handler == FDEVENT_HANDLER_SELECT) { srv->max_fds = rlim.rlim_cur < FD_SETSIZE - 200 ? rlim.rlim_cur : FD_SETSIZE - 200; } else { srv->max_fds = rlim.rlim_cur; } /* * set core file rlimit, if enable_cores is set * 设置core文件的限制。如果设置了enable_cores。 */ if (use_rlimit && srv->srvconf.enable_cores && getrlimit(RLIMIT_CORE, &rlim) == 0) { rlim.rlim_cur = rlim.rlim_max; setrlimit(RLIMIT_CORE, &rlim); } #endif if (srv->event_handler == FDEVENT_HANDLER_SELECT) { /* * don't raise the limit above FD_SET_SIZE */ if (srv->max_fds > FD_SETSIZE - 200) { log_error_write(srv, __FILE__, __LINE__, "sd", "can't raise max filedescriptors above", FD_SETSIZE - 200, "if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds."); return -1; } } #ifdef HAVE_PWD_H /* * set user and group 设置用户和组。 */ if (srv->srvconf.username->used) { //根据配置中的用户名获取用户信息。 if (NULL == (pwd = getpwnam(srv->srvconf.username->ptr))) { log_error_write(srv, __FILE__, __LINE__, "sb", "can't find username", srv->srvconf.username); return -1; } if (pwd->pw_uid == 0) { log_error_write(srv, __FILE__, __LINE__, "s", "I will not set uid to 0\n"); return -1; } } if (srv->srvconf.groupname->used) { //根据上面得到的用户所在的组的组名,获得组的信息。 if (NULL == (grp = getgrnam(srv->srvconf.groupname->ptr))) { log_error_write(srv, __FILE__, __LINE__, "sb", "can't find groupname", srv->srvconf.groupname); return -1; } if (grp->gr_gid == 0) { log_error_write(srv, __FILE__, __LINE__, "s", "I will not set gid to 0\n"); return -1; } } #endif /* * we need root-perms for port < 1024 * 使用超级用户模式获得小于1024的端口。初始化网络。 * 创建监听socket,绑定地址并开始监听。 */ if (0 != network_init(srv)) { plugins_free(srv); server_free(srv); return -1; } #ifdef HAVE_PWD_H /* * Change group before chroot, when we have access * to /etc/group * */ if (srv->srvconf.groupname->used) { setgid(grp->gr_gid); setgroups(0, NULL); //返回用户组的数目。 if (srv->srvconf.username->used) { //Initialize the group access list by reading the group database /etc/group and using all groups of which //user is a member. The additional group group is also added to the list. initgroups(srv->srvconf.username->ptr, grp->gr_gid); } } #endif #ifdef HAVE_CHROOT if (srv->srvconf.changeroot->used) { //The tzset() function initializes the tzname variable from the TZ environment variable. //This function is automatically called by the other time conversion functions that depend //on the time zone. //In a SysV-like environment it will also set the variables time-zone (seconds West of GMT) //and daylight //(0 if this time zone does not have any daylight saving time rules, nonzero if there is a //time during the year when daylight saving time applies). tzset(); //设置程序所参考的根目录,将被所有的子进程继承。 //也就是对于本程序而言,"/"并不是系统的根目录,而是这设置的目录。 if (-1 == chroot(srv->srvconf.changeroot->ptr)) { log_error_write(srv, __FILE__, __LINE__, "ss", "chroot failed: ", strerror(errno)); return -1; } //修改工作目录. /* * 注意: * 由于前面已经设置了根目录。因此这里将工作目录切换到"/"并不是系统的根目录,而是 * 上面通过函数chroot设置的根目录。 */ if (-1 == chdir("/")) { log_error_write(srv, __FILE__, __LINE__, "ss", "chdir failed: ", strerror(errno)); return -1; } } #endif #ifdef HAVE_PWD_H /* * drop root privs 放弃超级管理员权限。 */ if (srv->srvconf.username->used) { setuid(pwd->pw_uid); } #endif #if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_DUMPABLE) /** * on IRIX 6.5.30 they have prctl() but no DUMPABLE */ if (srv->srvconf.enable_cores) { prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); } #endif } /* * 下面的是程序在非root用户下执行的设置。 */ else { #ifdef HAVE_GETRLIMIT if (0 != getrlimit(RLIMIT_NOFILE, &rlim)) { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't get 'max filedescriptors'", strerror(errno)); return -1; } /** * we are not root can can't increase the fd-limit, but we can reduce it * 我们不是root,不能增加fd-limit,但我们可以减少。 */ if (srv->srvconf.max_fds && srv->srvconf.max_fds < rlim.rlim_cur) { /* * set rlimits */ rlim.rlim_cur = srv->srvconf.max_fds; //只能设置软限制。 if (0 != setrlimit(RLIMIT_NOFILE, &rlim)) { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't set 'max filedescriptors'", strerror(errno)); return -1; } } if (srv->event_handler == FDEVENT_HANDLER_SELECT) { srv->max_fds = rlim.rlim_cur < FD_SETSIZE - 200 ? rlim.rlim_cur : FD_SETSIZE - 200; } else { srv->max_fds = rlim.rlim_cur; } /* * set core file rlimit, if enable_cores is set */ if (srv->srvconf.enable_cores && getrlimit(RLIMIT_CORE, &rlim) == 0) { rlim.rlim_cur = rlim.rlim_max; setrlimit(RLIMIT_CORE, &rlim); } #endif if (srv->event_handler == FDEVENT_HANDLER_SELECT) { /* * don't raise the limit above FD_SET_SIZE */ if (srv->max_fds > FD_SETSIZE - 200) { log_error_write(srv, __FILE__, __LINE__, "sd", "can't raise max filedescriptors above", FD_SETSIZE - 200, "if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds."); return -1; } } if (0 != network_init(srv)) { plugins_free(srv); server_free(srv); return -1; } } /* * set max-conns 设置最大连接数。 */ if (srv->srvconf.max_conns > srv->max_fds) { /* * we can't have more connections than max-fds * 最大连接数要小于最大文件打开数(max-fds) */ srv->max_conns = srv->max_fds; } else if (srv->srvconf.max_conns) { /* * otherwise respect the wishes of the user * 根据用户设置。 */ srv->max_conns = srv->srvconf.max_conns; } else { /* * or use the default 默认。 */ srv->max_conns = srv->max_fds; } /* * 在前面的plugins_load函数中,已经所有的插件读入到系统中,并对插件中含有 * 的各种函数,确定入口地址。 * 在这里,程序对所有插件进行登记造册,确定插件中含有的功能,并计入表中(srv->plugins_slot) * */ if (HANDLER_GO_ON != plugins_call_init(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "Initialization of plugins failed. Going down."); plugins_free(srv); network_close(srv); server_free(srv); return -1; } #ifdef HAVE_FORK /* * network is up, let's deamonize ourself * 设置为守护进程。 */ if (srv->srvconf.dont_daemonize == 0) daemonize(); #endif srv->gid = getgid(); srv->uid = getuid(); /* * write pid file 写pid文件。 */ if (pid_fd != -1) { buffer_copy_long(srv->tmp_buf, getpid()); buffer_append_string_len(srv->tmp_buf, CONST_STR_LEN("\n")); write(pid_fd, srv->tmp_buf->ptr, srv->tmp_buf->used - 1); close(pid_fd); pid_fd = -1; } /* * Close stderr ASAP in the child process to make sure that nothing is * being written to that fd which may not be valid anymore. * 关闭向标准输出的输出,打开日志文件。 */ if (-1 == log_error_open(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "Opening errorlog failed. Going down."); plugins_free(srv); network_close(srv); server_free(srv); return -1; } //将插件设置为默认配置 if (HANDLER_GO_ON != plugins_call_set_defaults(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "Configuration of plugins failed. Going down."); plugins_free(srv); network_close(srv); server_free(srv); return -1; } /* * dump unused config-keys */ for (i = 0; i < srv->config_context->used; i++) { array *config = ((data_config *) srv->config_context->data[i])->value; size_t j; for (j = 0; config && j < config->used; j++) { data_unset *du = config->data[j]; /* * all var.* is known as user defined variable */ if (strncmp(du->key->ptr, "var.", sizeof("var.") - 1) == 0) { continue; } if (NULL == array_get_element(srv->config_touched, du->key->ptr)) { log_error_write(srv, __FILE__, __LINE__, "sbs", "WARNING: unknown config-key:", du->key, "(ignored)"); } } } if (srv->config_unsupported) { log_error_write(srv, __FILE__, __LINE__, "s", "Configuration contains unsupported keys. Going down."); } if (srv->config_deprecated) { log_error_write(srv, __FILE__, __LINE__, "s", "Configuration contains deprecated keys. Going down."); } if (srv->config_unsupported || srv->config_deprecated) { plugins_free(srv); network_close(srv); server_free(srv); return -1; } //设置一些信号的处理方法。 //SIGPIPE:在写管道时,读管道的进程终止,产生此信号。 #ifdef HAVE_SIGACTION memset(&act, 0, sizeof(act)); act.sa_handler = SIG_IGN; sigaction(SIGPIPE, &act, NULL); sigaction(SIGUSR1, &act, NULL); # if defined(SA_SIGINFO) act.sa_sigaction = sigaction_handler; sigemptyset(&act.sa_mask); act.sa_flags = SA_SIGINFO; # else act.sa_handler = signal_handler; sigemptyset(&act.sa_mask); act.sa_flags = 0; # endif sigaction(SIGINT, &act, NULL); sigaction(SIGTERM, &act, NULL); sigaction(SIGHUP, &act, NULL); sigaction(SIGALRM, &act, NULL); sigaction(SIGCHLD, &act, NULL); #elif defined(HAVE_SIGNAL) /* * ignore the SIGPIPE from sendfile() */ signal(SIGPIPE, SIG_IGN); signal(SIGUSR1, SIG_IGN); signal(SIGALRM, signal_handler); signal(SIGTERM, signal_handler); signal(SIGHUP, signal_handler); signal(SIGCHLD, signal_handler); signal(SIGINT, signal_handler); #endif #ifdef USE_ALARM signal(SIGALRM, signal_handler); /* * setup periodic timer (1 second) * The system provides each process with three interval timers, each decrementing in a distinct time domain. * When any timer expires a signal is sent to the process, and the timer (potentially) restarts. * * ITIMER_REAL decrements in real time, and delivers SIGALRM upon expiration. * ITIMER_VIRTUAL decrements only when the process is executing, and delivers SIGVTALRM upon expiration. * ITIMER_PROF decrements both when the process executes and when the system is executing on * behalf of the process. * Coupled with ITIMER_VIRTUAL, this timer is usually used to profile the time spent * by the application in user and kernel space. * SIGPROF is delivered upon expiration. * Timer values are defined by the following structures: * struct itimerval * { * struct timeval it_interval; //next value * struct timeval it_value; //current value * }; * struct timeval * { * long tv_sec; // seconds * long tv_usec; //microseconds * }; * The function getitimer() fills the structure indicated by value with the current setting for the timer * indicated by which (one of ITIMER_REAL, ITIMER_VIRTUAL, or ITIMER_PROF). The element it_value is * set to the amount of time remaining on the timer, or zero ifthe timer is disabled. * Similarly, it_interval is set to the reset value. The function setitimer() sets the indicated timer to the * value in value. If ovalue is nonzero, the old value of the timer is stored there. */ if (setitimer(ITIMER_REAL, &interval, NULL)) { log_error_write(srv, __FILE__, __LINE__, "s", "setting timer failed"); return -1; } getitimer(ITIMER_REAL, &interval); #endif #ifdef HAVE_FORK /* * ************************* * start watcher and workers * ************************* * * 下面程序将产生多个子进程。这些子进程成为worker,也就是用于接受处理用户的连接的进程。而当前的主进程将 * 成为watcher,主要工作就是监视workers的工作状态,当有worker因为意外而退出时,产生新的worker。 * 在程序退出时,watcher负责停止所有的workers并清理资源。 */ num_childs = srv->srvconf.max_worker;//最大worker数。 if (num_childs > 0) { int child = 0; while (!child && !srv_shutdown && !graceful_shutdown) { if (num_childs > 0) //继续产生worker { switch (fork()) { case -1: return -1; case 0: child = 1; break; default: num_childs--; break; } } else //watcher { /** * 当产生了足够的worker时,watcher就在这个while中不断的循环。 * 一但发现有worker退出(进程死亡),立即产生新的worker。 * 如果发生错误并接受到SIGHUP信号,向所有的进程(父进程及其子进程)包括自己发送SIGHUP信号。 * 并退出。 */ int status; if (-1 != wait(&status)) { /** * one of our workers went away */ num_childs++; } else { switch (errno) { case EINTR: /** * if we receive a SIGHUP we have to close our logs ourself as we don't * have the mainloop who can help us here */ if (handle_sig_hup) { handle_sig_hup = 0; log_error_cycle(srv); /** * forward to all procs in the process-group * 向所有进程发送SIGHUP信号。(父进程及其子进程) * we also send it ourself */ if (!forwarded_sig_hup) { forwarded_sig_hup = 1; kill(0, SIGHUP); } } break; default: break; } } } } /** * for the parent this is the exit-point * ***************************************************** * 父进程,也就是watcher在执行完这个if语句中就直接退出了。 * 后面是worker执行的代码。 * ***************************************************** */ if (!child) { /** * kill all children too 。杀死所有的子进程。 */ if (graceful_shutdown) { kill(0, SIGINT); } else if (srv_shutdown) { kill(0, SIGTERM); } log_error_close(srv); network_close(srv); connections_free(srv); plugins_free(srv); server_free(srv); return 0; } } #endif /* * ************************** * 从这开始是worker执行的代码。 * ************************** */ if (NULL == (srv->ev = fdevent_init(srv->max_fds + 1, srv->event_handler))) { log_error_write(srv, __FILE__, __LINE__, "s", "fdevent_init failed"); return -1; } /* * kqueue() is called here, select resets its internals, * all server sockets get their handlers * 将监听socket注册到fd events系统中。 * 在注册的时候为每个socket都同时注册了一个处理函数,用来处理这个socket的IO事件。 * 对于在这次调用中注册的监听socket,注册的处理函数是:network_server_handle_fdevent。 * 这个处理函数用来建立socket连接。 * */ if (0 != network_register_fdevents(srv)) { plugins_free(srv); network_close(srv); server_free(srv); return -1; } /* * might fail if user is using fam (not gamin) and famd isn't running * famd没有运行,则运行失败。。。 */ if (NULL == (srv->stat_cache = stat_cache_init())) { log_error_write(srv, __FILE__, __LINE__, "s", "stat-cache could not be setup, dieing."); return -1; } #ifdef HAVE_FAM_H /* * setup FAM 设置FAM。 */ if (srv->srvconf.stat_cache_engine == STAT_CACHE_ENGINE_FAM) { if (0 != FAMOpen2(srv->stat_cache->fam, "lighttpd")) { log_error_write(srv, __FILE__, __LINE__, "s", "could not open a fam connection, dieing."); return -1; } #ifdef HAVE_FAMNOEXISTS FAMNoExists(srv->stat_cache->fam); #endif srv->stat_cache->fam_fcce_ndx = -1; fdevent_register(srv->ev, FAMCONNECTION_GETFD(srv->stat_cache->fam), stat_cache_handle_fdevent, NULL); fdevent_event_add(srv->ev, &(srv->stat_cache->fam_fcce_ndx), FAMCONNECTION_GETFD(srv->stat_cache->fam), FDEVENT_IN); } #endif /* * get the current number of FDs 获得当前可用的fd值 */ srv->cur_fds = open("/dev/null", O_RDONLY); close(srv->cur_fds); for (i = 0; i < srv->srv_sockets.used; i++) { server_socket *srv_socket = srv->srv_sockets.ptr[i]; /* * close fd on exec (cgi) */ if (-1 == fdevent_fcntl_set(srv->ev, srv_socket->fd)) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed:", strerror(errno)); return -1; } } /* * main-loop * ******************* * worker工作的主循环。 * ******************* */ while (!srv_shutdown) { int n; size_t ndx; time_t min_ts; /** * 收到SIGHUP信号。主要是重新开始日志的周期并提示插件。 * 这个信号表示连接已经断开,通常是做一些清理和准备工作,等待下一次的连接。 */ if (handle_sig_hup) { handler_t r; /* * reset notification 重置 */ handle_sig_hup = 0; /* * cycle logfiles * 重新开始新一轮日志。 * 这里使用了switch而不是if语句,有意思。。。 * 调用插件关于SIGHUP信号的处理函数。 * 这个函数貌似也没实现。。。 */ switch (r = plugins_call_handle_sighup(srv)) { case HANDLER_GO_ON: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "sighup-handler return with an error", r); break; } if (-1 == log_error_cycle(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "cycling errorlog failed, dying"); return -1; } else { #ifdef HAVE_SIGACTION log_error_write(srv, __FILE__, __LINE__, "sdsd", "logfiles cycled UID =", last_sighup_info.si_uid, "PID =", last_sighup_info.si_pid); #else log_error_write(srv, __FILE__, __LINE__, "s", "logfiles cycled"); #endif } } /** * alarm函数发出的信号,表示一秒钟已经过去了。 */ if (handle_sig_alarm) { /* * a new second 新的一秒开始了。。。 */ #ifdef USE_ALARM /* * reset notification 重置 */ handle_sig_alarm = 0; #endif /* * get current time 当前时间。精确到一秒 */ min_ts = time(NULL); /** * 这里判断和服务器记录的当前时间是否相同。 * 相同,则表示服务器还在这一秒中,继续处理请求等。 * 如果不相同,则进入了一个新的周期(当然周期是一秒)。这就要做一些触发和检查以及清理的动作。 * 如插件的触发连接的超时清理状态缓存等。 * 其中,最主要的工作是检查连接的超时。 */ if (min_ts != srv->cur_ts) { int cs = 0; connections *conns = srv->conns; handler_t r; switch (r = plugins_call_handle_trigger(srv)) { case HANDLER_GO_ON: break; case HANDLER_ERROR: log_error_write(srv, __FILE__, __LINE__, "s", "one of the triggers failed"); break; default: log_error_write(srv, __FILE__, __LINE__, "d", r); break; } /* * trigger waitpid 么意思?? */ srv->cur_ts = min_ts; /* * cleanup stat-cache 清理状态缓存。每秒钟清理一次。 */ stat_cache_trigger_cleanup(srv); /** * check all connections for timeouts 检查所有的连接是否超时。 */ for (ndx = 0; ndx < conns->used; ndx++) { int changed = 0; connection *con; int t_diff; con = conns->ptr[ndx]; //连接的状态是在读 if (con->state == CON_STATE_READ || con->state == CON_STATE_READ_POST) { if (con->request_count == 1) //连接正在处理一个请求 { if (srv->cur_ts - con->read_idle_ts > con->conf.max_read_idle) { /* * time - out */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed - read-timeout:", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); changed = 1; } } //这个连接同时处理多个请求 else { if (srv->cur_ts - con->read_idle_ts > con->conf.max_keep_alive_idle) { /* * time - out */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed - read-timeout:", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); changed = 1; } } } //连接的状态是写 if ((con->state == CON_STATE_WRITE) && (con->write_request_ts != 0)) { #if 0 if (srv->cur_ts - con->write_request_ts > 60) { log_error_write(srv, __FILE__, __LINE__, "sdd", "connection closed - pre-write-request-timeout:", con->fd, srv->cur_ts - con->write_request_ts); } #endif if (srv->cur_ts - con->write_request_ts > con->conf.max_write_idle) { /* * time - out */ #if 1 log_error_write(srv, __FILE__, __LINE__, "sbsosds", "NOTE: a request for", con->request.uri, "timed out after writing", con->bytes_written, "bytes. We waited", (int) con->conf. max_write_idle, "seconds. If this a problem increase server.max-write-idle"); #endif connection_set_state(srv, con, CON_STATE_ERROR); changed = 1; } } /* * we don't like div by zero 防止除0。。。 */ if (0 == (t_diff = srv->cur_ts - con->connection_start)) t_diff = 1; /** * 下面的if语句不是用来判断连接是否超时。 * lighttpd对每个连接设置了一个kbytes_per_second,这个变量设定每个连接在一秒钟内多能传输的最大数据量。 * 如果传送的数据大于这个值,那么这个连接将停止传输数据,被追加到作业队列中等待下一次处理。 * 作者这样做估计是为了平衡各个连接之间的数据传输。 */ if (con->traffic_limit_reached && (con->conf.kbytes_per_second == 0 || ((con->bytes_written / t_diff) < con->conf.kbytes_per_second * 1024))) { /* * enable connection again */ con->traffic_limit_reached = 0; changed = 1; } if (changed) { connection_state_machine(srv, con); } con->bytes_written_cur_second = 0; *(con->conf.global_bytes_per_second_cnt_ptr) = 0; #if 0 if (cs == 0) { fprintf(stderr, "connection-state: "); cs = 1; } fprintf(stderr, "c[%d,%d]: %s ", con->fd, con->fcgi.fd, connection_get_state(con->state)); #endif }//end of for( ndx = 0; ... if (cs == 1) fprintf(stderr, "\n"); }//end of if (min_ts != srv->cur_ts)... }//end of if (handle_sig_alarm)... if (srv->sockets_disabled) { /* * our server sockets are disabled, why ? * 服务器socket连接失效。为什么捏???后面的服务器过载处理中。。。 * * 将所有连接重新加入的fdevent中。 */ if ((srv->cur_fds + srv->want_fds < srv->max_fds * 0.8) && /* we have enough unused fds */ (srv->conns->used < srv->max_conns * 0.9) && (0 == graceful_shutdown)) { for (i = 0; i < srv->srv_sockets.used; i++) { server_socket *srv_socket = srv->srv_sockets.ptr[i]; fdevent_event_add(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd, FDEVENT_IN); } log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets enabled again"); srv->sockets_disabled = 0; } } else { /* * 下面处理服务器过载的情况。 */ if ((srv->cur_fds + srv->want_fds > srv->max_fds * 0.9) || /* out of fds */ (srv->conns->used > srv->max_conns) || /* out of connections */ (graceful_shutdown)) /* graceful_shutdown */ { /* * disable server-fds 关闭所有的服务socket */ for (i = 0; i < srv->srv_sockets.used; i++) { server_socket *srv_socket = srv->srv_sockets.ptr[i]; fdevent_event_del(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd); if (graceful_shutdown) { /* * we don't want this socket anymore, closing it right * away will make it possible for the next lighttpd to * take over (graceful restart) */ fdevent_unregister(srv->ev, srv_socket->fd); close(srv_socket->fd); srv_socket->fd = -1; /* * network_close() will cleanup after us */ if (srv->srvconf.pid_file->used && srv->srvconf.changeroot->used == 0) { if (0 != unlink(srv->srvconf.pid_file->ptr)) { if (errno != EACCES && errno != EPERM) { log_error_write(srv, __FILE__, __LINE__, "sbds", "unlink failed for:", srv -> srvconf.pid_file, errno, strerror(errno)); } } } } }//end of for(i = 0; ... if (graceful_shutdown) { log_error_write(srv, __FILE__, __LINE__, "s", "[note] graceful shutdown started"); } else if (srv->conns->used > srv->max_conns) { log_error_write(srv, __FILE__, __LINE__, "s","[note] sockets disabled, connection limit reached"); } else { log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets disabled, out-of-fds"); } srv->sockets_disabled = 1; //服务器过载了,socket失效。 } } if (graceful_shutdown && srv->conns->used == 0) { /* * we are in graceful shutdown phase and all connections are closed * we are ready to terminate without harming anyone */ srv_shutdown = 1; } /* * we still have some fds to share */ if (srv->want_fds) { /* * check the fdwaitqueue for waiting fds */ int free_fds = srv->max_fds - srv->cur_fds - 16; connection *con; for (; free_fds > 0 && NULL != (con = fdwaitqueue_unshift(srv, srv->fdwaitqueue)); free_fds--) { connection_state_machine(srv, con); srv->want_fds--; } } /** ********************************************************** * 至此,上面那些杂七杂八的事全部处理结束。下面,干正事!! * 也就是处理服务请求。 ********************************************************** */ //启动事件轮询。底层使用的是IO多路转接。 if ((n = fdevent_poll(srv->ev, 1000)) > 0) { /* * n is the number of events n是事件的数量(服务请求啦,文件读写啦什么的。。。) */ int revents; int fd_ndx; #if 0 if (n > 0) { log_error_write(srv, __FILE__, __LINE__, "sd", "polls:", n); } #endif fd_ndx = -1; /** * 这个循环中逐个的处理已经准备好的请求,知道所有的请求处理结束。 */ do { fdevent_handler handler; void *context; handler_t r; fd_ndx = fdevent_event_next_fdndx(srv->ev, fd_ndx); revents = fdevent_event_get_revent(srv->ev, fd_ndx); fd = fdevent_event_get_fd(srv->ev, fd_ndx); handler = fdevent_get_handler(srv->ev, fd); context = fdevent_get_context(srv->ev, fd); /* * connection_handle_fdevent needs a joblist_append */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sdd", "event for", fd, revents); #endif /** * 这里,调用请求的处理函数handler处理请求! * 这才是重点中的重点!! */ switch (r = (*handler) (srv, context, revents)) { case HANDLER_FINISHED: case HANDLER_GO_ON: case HANDLER_WAIT_FOR_EVENT: case HANDLER_WAIT_FOR_FD: break; case HANDLER_ERROR: /* * should never happen */ SEGFAULT(); break; default: log_error_write(srv, __FILE__, __LINE__, "d", r); break; } }while (--n > 0); //到这里,本次的请求都处理结束了。。。累啊! } else if (n < 0 && errno != EINTR) { log_error_write(srv, __FILE__, __LINE__, "ss", "fdevent_poll failed:", strerror(errno)); } //由于上面处理的都是发生了IO事件的描述符,这些描述符的状态都被正确的设置了。 //对于没有发生IO事件的描述符,其状态机的状态也需要设置, //下面的循环就是对剩下的描述符进行处理。 for (ndx = 0; ndx < srv->joblist->used; ndx++) { connection *con = srv->joblist->ptr[ndx]; handler_t r; connection_state_machine(srv, con); switch (r = plugins_call_handle_joblist(srv, con)) { case HANDLER_FINISHED: case HANDLER_GO_ON: break; default: log_error_write(srv, __FILE__, __LINE__, "d", r); break; } con->in_joblist = 0; } srv->joblist->used = 0; } /* end of main loop */ /* * 主循环的结束 */ if (srv->srvconf.pid_file->used && srv->srvconf.changeroot->used == 0 && 0 == graceful_shutdown) { if (0 != unlink(srv->srvconf.pid_file->ptr)) { if (errno != EACCES && errno != EPERM) { log_error_write(srv, __FILE__, __LINE__, "sbds", "unlink failed for:", srv->srvconf.pid_file, errno, strerror(errno)); } } } #ifdef HAVE_SIGACTION log_error_write(srv, __FILE__, __LINE__, "sdsd", "server stopped by UID =", last_sigterm_info.si_uid,"PID =", last_sigterm_info.si_pid); #else log_error_write(srv, __FILE__, __LINE__, "s", "server stopped"); #endif /* * clean-up */ log_error_close(srv); network_close(srv); connections_free(srv); plugins_free(srv); server_free(srv); return 0; }
static int connection_handle_read(server *srv, connection *con) { int len; buffer *b; int toread; if (con->conf.is_ssl) { return connection_handle_read_ssl(srv, con); } #if defined(__WIN32) b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, 4 * 1024); len = recv(con->fd, b->ptr, b->size - 1, 0); #else if (ioctl(con->fd, FIONREAD, &toread)) { log_error_write(srv, __FILE__, __LINE__, "sd", "unexpected end-of-file:", con->fd); return -1; } b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, toread + 1); len = read(con->fd, b->ptr, b->size - 1); #endif if (len < 0) { con->is_readable = 0; if (errno == EAGAIN) return 0; if (errno == EINTR) { /* we have been interrupted before we could read */ con->is_readable = 1; return 0; } if (errno != ECONNRESET) { /* expected for keep-alive */ log_error_write(srv, __FILE__, __LINE__, "ssd", "connection closed - read failed: ", strerror(errno), errno); } connection_set_state(srv, con, CON_STATE_ERROR); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ /* pipelining */ return -2; } else if ((size_t)len < b->size - 1) { /* we got less then expected, wait for the next fd-event */ con->is_readable = 0; } b->used = len; b->ptr[b->used++] = '\0'; con->bytes_read += len; #if 0 dump_packet(b->ptr, len); #endif return 0; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ int connection_handle_read_state(server *srv, connection *con) { /** * Recording the connection activity to be used for the purpose of timeout calc * See '/sbin/timeout' * Touching the file '/var/last_tcp_connection_time' */ system("touch /var/last_tcp_connection_time"); connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } //log_error_write(srv, __FILE__, __LINE__, "sd", "http_status", con->http_status); switch(con->http_status) { case 416: return 0; default: break; } /* the last chunk might be empty */ for (c = cq->first; c;) { if (cq->first == c && c->mem->used == 0) { /* the first node is empty */ /* ... and it is empty, move it to unused */ cq->first = c->next; if (cq->first == NULL) cq->last = NULL; c->next = cq->unused; cq->unused = c; cq->unused_chunks++; c = cq->first; } else if (c->next && c->next->mem->used == 0) { chunk *fc; /* next node is the last one */ /* ... and it is empty, move it to unused */ fc = c->next; c->next = fc->next; fc->next = cq->unused; cq->unused = fc; cq->unused_chunks++; /* the last node was empty */ if (c->next == NULL) { cq->last = c; } c = c->next; } else { c = c->next; } } /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; !last_chunk && c; c = c->next) { buffer b; size_t i; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; for (i = 0; !last_chunk && i < b.used; i++) { char ch = b.ptr[i]; size_t have_chars = 0; switch (ch) { case '\r': /* we have to do a 4 char lookup */ have_chars = b.used - i - 1; if (have_chars >= 4) { /* all chars are in this buffer */ if (0 == strncmp(b.ptr + i, "\r\n\r\n", 4)) { /* found */ last_chunk = c; last_offset = i + 4; break; } } else { chunk *lookahead_chunk = c->next; size_t missing_chars; /* looks like the following chars are not in the same chunk */ missing_chars = 4 - have_chars; if (lookahead_chunk && lookahead_chunk->type == MEM_CHUNK) { /* is the chunk long enough to contain the other chars ? */ if (lookahead_chunk->mem->used > missing_chars) { if (0 == strncmp(b.ptr + i, "\r\n\r\n", have_chars) && 0 == strncmp(lookahead_chunk->mem->ptr, "\r\n\r\n" + have_chars, missing_chars)) { last_chunk = lookahead_chunk; last_offset = missing_chars; break; } } else { /* a splited \r \n */ break; } } } break; } } } /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { buffer b; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (c == last_chunk) { b.used = last_offset + 1; } buffer_append_string_buffer(con->request.request, &b); if (c == last_chunk) { c->offset += last_offset; break; } else { /* the whole packet was copied */ c->offset = c->mem->used - 1; } } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "sdd", "oversized request-header", con->http_status, con->file_finished); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: for (c = cq->first; c && (dst_cq->bytes_in != (off_t)con->request.content_length); c = c->next) { off_t weWant, weHave, toRead; weWant = con->request.content_length - dst_cq->bytes_in; assert(c->mem->used); weHave = c->mem->used - c->offset - 1; toRead = weHave > weWant ? weWant : weHave; buffer *b; b = chunkqueue_get_append_buffer(dst_cq); buffer_copy_string_len(b, c->mem->ptr + c->offset, toRead); c->offset += toRead; dst_cq->bytes_in += toRead; } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length || dst_cq->bytes_in - dst_cq->bytes_out > srv->srvconf.max_request_size * 1024) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { log_error_write(srv, __FILE__, __LINE__, "sdd", "Connection got closed, not enough data to leave one of the READ states", is_closed, ostate, con->state); connection_set_state(srv, con, CON_STATE_ERROR); } chunkqueue_remove_finished_chunks(cq); return 0; }
static int connection_handle_read_ssl(server *srv, connection *con) { #ifdef USE_OPENSSL int r, ssl_err, len; buffer *b = NULL; if (!con->conf.is_ssl) return -1; /* don't resize the buffer if we were in SSL_ERROR_WANT_* */ ERR_clear_error(); do { if (!con->ssl_error_want_reuse_buffer) { b = buffer_init(); buffer_prepare_copy(b, SSL_pending(con->ssl) + (16 * 1024)); /* the pending bytes + 16kb */ /* overwrite everything with 0 */ memset(b->ptr, 0, b->size); } else { b = con->ssl_error_want_reuse_buffer; } len = SSL_read(con->ssl, b->ptr, b->size - 1); con->ssl_error_want_reuse_buffer = NULL; /* reuse it only once */ if (len > 0) { b->used = len; b->ptr[b->used++] = '\0'; /* we move the buffer to the chunk-queue, no need to free it */ chunkqueue_append_buffer_weak(con->read_queue, b); con->bytes_read += len; b = NULL; } } while (len > 0); if (len < 0) { switch ((r = SSL_get_error(con->ssl, len))) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: con->is_readable = 0; con->ssl_error_want_reuse_buffer = b; b = NULL; /* we have to steal the buffer from the queue-queue */ return 0; case SSL_ERROR_SYSCALL: /** * man SSL_get_error() * * SSL_ERROR_SYSCALL * Some I/O error occurred. The OpenSSL error queue may contain more * information on the error. If the error queue is empty (i.e. * ERR_get_error() returns 0), ret can be used to find out more about * the error: If ret == 0, an EOF was observed that violates the * protocol. If ret == -1, the underlying BIO reported an I/O error * (for socket I/O on Unix systems, consult errno for details). * */ while((ssl_err = ERR_get_error())) { /* get all errors from the error-queue */ log_error_write(srv, __FILE__, __LINE__, "sds", "SSL:", r, ERR_error_string(ssl_err, NULL)); } switch(errno) { default: log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL:", len, r, errno, strerror(errno)); break; } break; case SSL_ERROR_ZERO_RETURN: /* clean shutdown on the remote side */ if (r == 0) { /* FIXME: later */ } /* fall thourgh */ default: while((ssl_err = ERR_get_error())) { switch (ERR_GET_REASON(ssl_err)) { case SSL_R_SSL_HANDSHAKE_FAILURE: case SSL_R_TLSV1_ALERT_UNKNOWN_CA: case SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN: case SSL_R_SSLV3_ALERT_BAD_CERTIFICATE: if (!con->conf.log_ssl_noise) continue; break; default: break; } /* get all errors from the error-queue */ log_error_write(srv, __FILE__, __LINE__, "sds", "SSL:", r, ERR_error_string(ssl_err, NULL)); } break; } connection_set_state(srv, con, CON_STATE_ERROR); buffer_free(b); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ /* pipelining */ buffer_free(b); return -2; } return 0; #else UNUSED(srv); UNUSED(con); return -1; #endif }
int connection_state_machine(server *srv, connection *con) { int done = 0, r; #ifdef USE_OPENSSL server_socket *srv_sock = con->srv_socket; #endif chunkqueue *dst_cq = con->request_content_queue; if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at start", con->fd, connection_get_state(con->state)); } while (done == 0) { size_t ostate = con->state; int b; switch (con->state) { case CON_STATE_REQUEST_START: /* transient */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; con->request_count++; con->loops_per_request = 0; connection_set_state(srv, con, CON_STATE_READ); /* patch con->conf.is_ssl if the connection is a ssl-socket already */ #ifdef USE_OPENSSL con->conf.is_ssl = srv_sock->is_ssl; #endif break; case CON_STATE_REQUEST_END: /* transient */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } if (http_request_parse(srv, con)) { /* we have to read some data from the POST request */ connection_set_state(srv, con, CON_STATE_READ_POST); break; } connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case CON_STATE_HANDLE_REQUEST: /* * the request is parsed * * decided what to do with the request * - * * */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } switch (r = http_response_prepare(srv, con)) { case HANDLER_FINISHED: if (con->mode == DIRECT) { if (con->http_status == 404 || con->http_status == 403) { /* 404 error-handler */ if (con->in_error_handler == 0 && (!buffer_is_empty(con->conf.error_handler) || !buffer_is_empty(con->error_handler))) { /* call error-handler */ con->error_handler_saved_status = con->http_status; //log_error_write(srv, __FILE__, __LINE__, "s", "http_status set to 0"); con->http_status = 0; if (buffer_is_empty(con->error_handler)) { buffer_copy_string_buffer(con->request.uri, con->conf.error_handler); } else { buffer_copy_string_buffer(con->request.uri, con->error_handler); } buffer_reset(con->physical.path); con->in_error_handler = 1; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); done = -1; break; } else if (con->in_error_handler) { /* error-handler is a 404 */ con->http_status = con->error_handler_saved_status; } } else if (con->in_error_handler) { /* error-handler is back and has generated content */ /* if Status: was set, take it otherwise use 200 */ } } if (con->http_status == 0) con->http_status = 200; /* we have something to send, go on */ connection_set_state(srv, con, CON_STATE_RESPONSE_START); break; case HANDLER_WAIT_FOR_FD: srv->want_fds++; fdwaitqueue_append(srv, con); connection_set_state(srv, con, CON_STATE_READ_POST); break; case HANDLER_COMEBACK: done = -1; case HANDLER_WAIT_FOR_EVENT: /* come back here */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case HANDLER_ERROR: /* something went wrong */ connection_set_state(srv, con, CON_STATE_ERROR); break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown ret-value: ", con->fd, r); break; } break; case CON_STATE_RESPONSE_START: /* * the decision is done * - create the HTTP-Response-Header * */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } if (-1 == connection_handle_write_prepare(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); break; } if (con->http_status = 416){ //log_error_write(srv, __FILE__, __LINE__, "s", "Resetting read_queue"); chunkqueue_reset(con->read_queue); } connection_set_state(srv, con, CON_STATE_WRITE); break; case CON_STATE_RESPONSE_END: /* transient */ /* log the request */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } plugins_call_handle_request_done(srv, con); srv->con_written++; if (con->keep_alive) { connection_set_state(srv, con, CON_STATE_REQUEST_START); #if 0 con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; #endif } else { switch(r = plugins_call_handle_connection_close(srv, con)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r); break; } #ifdef USE_OPENSSL if (srv_sock->is_ssl) { switch (SSL_shutdown(con->ssl)) { case 1: /* done */ break; case 0: /* wait for fd-event * * FIXME: wait for fdevent and call SSL_shutdown again * */ break; default: log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); } } #endif connection_close(srv, con); srv->con_closed++; } connection_reset(srv, con); break; case CON_STATE_CONNECT: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } /* if (con->read_queue != NULL) { chunk *c; size_t avail = 0; for (c= con->read_queue->first; c != NULL; c = c->next) { if(c->type == MEM_CHUNK) avail += c->mem->used - c->offset -1; } log_error_write(srv, __FILE__, __LINE__, "sd", "con->read_queue is not empty!", avail); }*/ chunkqueue_reset(con->read_queue); con->request_count = 0; break; case CON_STATE_CLOSE: //log_error_write(srv, __FILE__, __LINE__, "so", "con->range_offset", con->range_offset); //log_error_write(srv, __FILE__, __LINE__, "sdsd", "CON_STATE_ERROR: Bytes in", dst_cq->bytes_in, "Bytes out", dst_cq->bytes_out); /* Flush out any data that is currently sitting in the chunqueue */ if ((dst_cq->bytes_in - dst_cq->bytes_out) > 0) { log_error_write(srv, __FILE__, __LINE__, "sd", "Flush data, handle remaining chunks", dst_cq->bytes_in); connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } if (con->keep_alive) { if (ioctl(con->fd, FIONREAD, &b)) { log_error_write(srv, __FILE__, __LINE__, "ss", "ioctl() failed", strerror(errno)); } if (b > 0) { char buf[1024]; log_error_write(srv, __FILE__, __LINE__, "sdd", "CLOSE-read()", con->fd, b); /* */ read(con->fd, buf, sizeof(buf)); } else { /* nothing to read */ con->close_timeout_ts = 0; } } else { con->close_timeout_ts = 0; } if (srv->cur_ts - con->close_timeout_ts > 1) { connection_close(srv, con); log_error_write(srv, __FILE__, __LINE__, "so", "con->range_offset", con->range_offset); log_error_write(srv, __FILE__, __LINE__, "sdsd", "CON_STATE_ERROR: Bytes in", dst_cq->bytes_in, "Bytes out", dst_cq->bytes_out); /* Flush out any data that is currently sitting in the chunqueue */ if ((dst_cq->bytes_in - dst_cq->bytes_out) > 0) { log_error_write(srv, __FILE__, __LINE__, "sd", "Flush data, handle remaining chunks", dst_cq->bytes_in); connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed for fd", con->fd); } } break; case CON_STATE_READ_POST: case CON_STATE_READ: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } connection_handle_read_state(srv, con); break; case CON_STATE_WRITE: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } /* only try to write if we have something in the queue */ if (!chunkqueue_is_empty(con->write_queue)) { #if 0 log_error_write(srv, __FILE__, __LINE__, "dsd", con->fd, "packets to write:", con->write_queue->used); #endif } if (!chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); connection_set_state(srv, con, CON_STATE_ERROR); } else if (con->state == CON_STATE_WRITE) { con->write_request_ts = srv->cur_ts; } } break; case CON_STATE_ERROR: /* transient */ //log_error_write(srv, __FILE__, __LINE__, "so", "con->range_offset", con->range_offset); //log_error_write(srv, __FILE__, __LINE__, "sdsd", "CON_STATE_ERROR: Bytes in", dst_cq->bytes_in, "Bytes out", dst_cq->bytes_out); /* Flush out any data that is currently sitting in the chunqueue */ if ((dst_cq->bytes_in - dst_cq->bytes_out) > 0) { log_error_write(srv, __FILE__, __LINE__, "sd", "Flush data, handle remaining chunks", dst_cq->bytes_in); connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } /* even if the connection was drop we still have to write it to the access log */ if (con->http_status) { plugins_call_handle_request_done(srv, con); } #ifdef USE_OPENSSL if (srv_sock->is_ssl) { int ret, ssl_r; unsigned long err; ERR_clear_error(); switch ((ret = SSL_shutdown(con->ssl))) { case 1: /* ok */ break; case 0: ERR_clear_error(); if (-1 != (ret = SSL_shutdown(con->ssl))) break; /* fall through */ default: switch ((ssl_r = SSL_get_error(con->ssl, ret))) { case SSL_ERROR_WANT_WRITE: case SSL_ERROR_WANT_READ: break; case SSL_ERROR_SYSCALL: /* perhaps we have error waiting in our error-queue */ if (0 != (err = ERR_get_error())) { do { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } while((err = ERR_get_error())); } else { log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL (error):", ssl_r, ret, errno, strerror(errno)); } break; default: while((err = ERR_get_error())) { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } break; } } } ERR_clear_error(); #endif switch(con->mode) { case DIRECT: #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "emergency exit: direct", con->fd); #endif break; default: switch(r = plugins_call_handle_connection_close(srv, con)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error_write(srv, __FILE__, __LINE__, ""); break; } break; } connection_reset(srv, con); /* close the connection */ if ((con->keep_alive == 1) && (0 == shutdown(con->fd, SHUT_WR))) { con->close_timeout_ts = srv->cur_ts; connection_set_state(srv, con, CON_STATE_CLOSE); if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sd", "shutdown for fd", con->fd); } } else { connection_close(srv, con); } con->keep_alive = 0; srv->con_closed++; break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown state:", con->fd, con->state); break; } if (done == -1) { done = 0; } else if (ostate == con->state) { done = 1; } } if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at exit:", con->fd, connection_get_state(con->state)); } switch(con->state) { case CON_STATE_READ_POST: case CON_STATE_READ: case CON_STATE_CLOSE: fdevent_event_add(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_IN); break; case CON_STATE_WRITE: /* request write-fdevent only if we really need it * - if we have data to write * - if the socket is not writable yet */ if (!chunkqueue_is_empty(con->write_queue) && (con->is_writable == 0) && (con->traffic_limit_reached == 0)) { fdevent_event_add(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_OUT); } else { fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); } break; default: fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); break; } return 0; }
handler_t connection_handle_fdevent(void *s, void *context, int revents) { server *srv = (server *)s; connection *con = context; joblist_append(srv, con); if (revents & FDEVENT_IN) { con->is_readable = 1; #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "read-wait - done", con->fd); #endif } if (revents & FDEVENT_OUT) { con->is_writable = 1; /* we don't need the event twice */ } if (revents & ~(FDEVENT_IN | FDEVENT_OUT)) { /* looks like an error */ /* FIXME: revents = 0x19 still means that we should read from the queue */ if (revents & FDEVENT_HUP) { if (con->state == CON_STATE_CLOSE) { con->close_timeout_ts = 0; } else { /* sigio reports the wrong event here * * there was no HUP at all */ #ifdef USE_LINUX_SIGIO if (srv->ev->in_sigio == 1) { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> HUP", con->fd); } else { connection_set_state(srv, con, CON_STATE_ERROR); } #else connection_set_state(srv, con, CON_STATE_ERROR); #endif } } else if (revents & FDEVENT_ERR) { #ifndef USE_LINUX_SIGIO log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ERR", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ???", revents); } } if (con->state == CON_STATE_READ || con->state == CON_STATE_READ_POST) { connection_handle_read_state(srv, con); } if (con->state == CON_STATE_WRITE && !chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); } else if (con->state == CON_STATE_WRITE) { con->write_request_ts = srv->cur_ts; } } if (con->state == CON_STATE_CLOSE) { /* flush the read buffers */ int b; if (ioctl(con->fd, FIONREAD, &b)) { log_error_write(srv, __FILE__, __LINE__, "ss", "ioctl() failed", strerror(errno)); } if (b > 0) { char buf[1024]; log_error_write(srv, __FILE__, __LINE__, "sdd", "CLOSE-read()", con->fd, b); /* */ read(con->fd, buf, sizeof(buf)); } else { /* nothing to read */ con->close_timeout_ts = 0; } } return HANDLER_FINISHED; }
/* Open and return the socket. Cf. proxy_establish_connection() in mod_proxy.c. */ static handler_t lisp_connection_open(server *srv, handler_ctx *hctx) { struct sockaddr_in addr; int ret, sock; plugin_data *p = hctx->plugin; connection *con = hctx->connection; if (hctx->socket_data) { sock = hctx->socket_data->fd; } else { if (! (hctx->socket_data = mod_lisp_allocate_socket(p))) { LOG_ERROR_MAYBE(srv, p, LOGLEVEL_ERR, "Cannot allocate from Lisp socket pool: no free slots"); return HANDLER_WAIT_FOR_FD; } LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "Lisp process at %s:%d for %s: allocated fd=%d," " fde_ndx=%d, state=%d, total socket slot(s) allocated: %d", SPLICE_HOSTPORT(hctx->socket_data), hctx->socket_data->fd, hctx->socket_data->fde_ndx, hctx->socket_data->state, p->LispSocketPoolUsed); if ((sock = hctx->socket_data->fd) >= 0) { if (FD_STATE_UNSAFE_EV_COUNT(hctx->socket_data->state) > 0) { fdevent_event_del(srv->ev, SPLICE_FDE(hctx->socket_data)); fdevent_unregister(srv->ev, sock); close(sock); srv->cur_fds--; LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "Lisp process at %s:%d for %s: close unsafe socket (fd=%d)", SPLICE_HOSTPORT(hctx->socket_data), sock); mod_lisp_reset_socket(hctx->socket_data); sock = -1; } else { return HANDLER_GO_ON; } } } if (sock <= 0) { if (-1 == (sock = socket(AF_INET, SOCK_STREAM, 0))) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "socket() failed (%s)", strerror(errno)); return HANDLER_ERROR; } mod_lisp_reset_socket(hctx->socket_data); hctx->socket_data->fd = sock; srv->cur_fds++; fdevent_register(srv->ev, sock, lisp_handle_fdevent, hctx); if (-1 == fdevent_fcntl_set(srv->ev, sock)) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "fcntl() failed (%s)", strerror(errno)); return HANDLER_ERROR; } addr.sin_addr.s_addr = inet_addr(hctx->socket_data->ip->ptr); addr.sin_port = htons(hctx->socket_data->port); addr.sin_family = AF_INET; /* Try to connect to Lisp. */ ret = connect(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); #ifdef WIN32 if (ret == SOCKET_ERROR) { ret = -1; errno = WSAGetLastError()-WSABASEERR; } #endif /* WIN32 */ if (ret == -1 && (errno == EINTR || errno == EINPROGRESS)) { /* As soon as something happens on the socket, this function shall be re-entered and follow the getsockopt branch below. */ fdevent_event_set(srv->ev, SPLICE_FDE(hctx->socket_data), FDEVENT_OUT); LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "connection to Lisp process at %s:%d for %s delayed (%s)", SPLICE_HOSTPORT(hctx->socket_data), strerror(errno)); return HANDLER_WAIT_FOR_EVENT; } } else { int sockerr; socklen_t sockerr_len = sizeof(sockerr); fdevent_event_del(srv->ev, SPLICE_FDE(hctx->socket_data)); /* try to finish the connect() */ if (0 != getsockopt(sock, SOL_SOCKET, SO_ERROR, &sockerr, &sockerr_len)) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "getsockopt() failed (%s)", strerror(errno)); return HANDLER_ERROR; } ret = sockerr ? -1 : 0; } /* Check if we connected */ if (ret == -1) { LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR, "cannot connect socket to Lisp process at %s:%d for %s (%s)", SPLICE_HOSTPORT(hctx->socket_data), strerror(errno)); hctx->socket_data->fde_ndx = -1; mod_lisp_connection_close(srv, con, p); /* reset the enviroment and restart the sub-request */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 503; con->mode = DIRECT; joblist_append(srv, con); return HANDLER_FINISHED; } hctx->socket_data->state |= (FD_STATE_READ | FD_STATE_WRITE); LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG, "opened socket fd=%d to Lisp process at %s:%d for %s", sock, SPLICE_HOSTPORT(hctx->socket_data)); return HANDLER_GO_ON; }
static handler_t connection_handle_fdevent(server *srv, void *context, int revents) { connection *con = context; joblist_append(srv, con); if (con->srv_socket->is_ssl) { /* ssl may read and write for both reads and writes */ if (revents & (FDEVENT_IN | FDEVENT_OUT)) { con->is_readable = 1; con->is_writable = 1; } } else { if (revents & FDEVENT_IN) { con->is_readable = 1; } if (revents & FDEVENT_OUT) { con->is_writable = 1; /* we don't need the event twice */ } } if (con->state == CON_STATE_READ) { connection_handle_read_state(srv, con); } if (con->state == CON_STATE_WRITE && !chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); } } if (con->state == CON_STATE_CLOSE) { /* flush the read buffers */ connection_read_for_eos(srv, con); } /* attempt (above) to read data in kernel socket buffers * prior to handling FDEVENT_HUP and FDEVENT_ERR */ if ((revents & ~(FDEVENT_IN | FDEVENT_OUT)) && con->state != CON_STATE_ERROR) { if (con->state == CON_STATE_CLOSE) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } else if (revents & FDEVENT_HUP) { if (fdevent_is_tcp_half_closed(con->fd)) { con->keep_alive = 0; } else { connection_set_state(srv, con, CON_STATE_ERROR); } } else if (revents & FDEVENT_ERR) { /* error, connection reset */ connection_set_state(srv, con, CON_STATE_ERROR); } else { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed: poll() -> ???", revents); } } return HANDLER_FINISHED; }
int connection_state_machine(server *srv, connection *con) { int done = 0, r; if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at start", con->fd, connection_get_state(con->state)); } while (done == 0) { size_t ostate = con->state; if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } switch (con->state) { case CON_STATE_REQUEST_START: /* transient */ con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; if (con->conf.high_precision_timestamps) log_clock_gettime_realtime(&con->request_start_hp); con->request_count++; con->loops_per_request = 0; connection_set_state(srv, con, CON_STATE_READ); break; case CON_STATE_REQUEST_END: /* transient */ buffer_reset(con->uri.authority); buffer_reset(con->uri.path); buffer_reset(con->uri.query); buffer_reset(con->request.orig_uri); if (http_request_parse(srv, con)) { /* we have to read some data from the POST request */ connection_set_state(srv, con, CON_STATE_READ_POST); break; } connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case CON_STATE_READ_POST: case CON_STATE_HANDLE_REQUEST: /* * the request is parsed * * decided what to do with the request * - * * */ switch (r = http_response_prepare(srv, con)) { case HANDLER_WAIT_FOR_EVENT: if (!con->file_finished && (!con->file_started || 0 == con->conf.stream_response_body)) { break; /* come back here */ } /* response headers received from backend; fall through to start response */ case HANDLER_FINISHED: if (con->error_handler_saved_status > 0) { con->request.http_method = con->error_handler_saved_method; } if (con->mode == DIRECT) { if (con->error_handler_saved_status) { if (con->error_handler_saved_status > 0) { con->http_status = con->error_handler_saved_status; } else if (con->http_status == 404 || con->http_status == 403) { /* error-handler-404 is a 404 */ con->http_status = -con->error_handler_saved_status; } else { /* error-handler-404 is back and has generated content */ /* if Status: was set, take it otherwise use 200 */ } } else if (con->http_status >= 400) { buffer *error_handler = NULL; if (!buffer_string_is_empty(con->conf.error_handler)) { error_handler = con->conf.error_handler; } else if ((con->http_status == 404 || con->http_status == 403) && !buffer_string_is_empty(con->conf.error_handler_404)) { error_handler = con->conf.error_handler_404; } if (error_handler) { /* call error-handler */ /* set REDIRECT_STATUS to save current HTTP status code * for access by dynamic handlers * https://redmine.lighttpd.net/issues/1828 */ data_string *ds; if (NULL == (ds = (data_string *)array_get_unused_element(con->environment, TYPE_STRING))) { ds = data_string_init(); } buffer_copy_string_len(ds->key, CONST_STR_LEN("REDIRECT_STATUS")); buffer_append_int(ds->value, con->http_status); array_insert_unique(con->environment, (data_unset *)ds); if (error_handler == con->conf.error_handler) { plugins_call_connection_reset(srv, con); if (con->request.content_length) { if (con->request.content_length != con->request_content_queue->bytes_in) { con->keep_alive = 0; } con->request.content_length = 0; chunkqueue_reset(con->request_content_queue); } con->is_writable = 1; con->file_finished = 0; con->file_started = 0; con->got_response = 0; con->parsed_response = 0; con->response.keep_alive = 0; con->response.content_length = -1; con->response.transfer_encoding = 0; con->error_handler_saved_status = con->http_status; con->error_handler_saved_method = con->request.http_method; con->request.http_method = HTTP_METHOD_GET; } else { /*(preserve behavior for server.error-handler-404)*/ con->error_handler_saved_status = -con->http_status; /*(negative to flag old behavior)*/ } buffer_copy_buffer(con->request.uri, error_handler); connection_handle_errdoc_init(srv, con); con->http_status = 0; /*(after connection_handle_errdoc_init())*/ done = -1; break; } } } if (con->http_status == 0) con->http_status = 200; /* we have something to send, go on */ connection_set_state(srv, con, CON_STATE_RESPONSE_START); break; case HANDLER_WAIT_FOR_FD: srv->want_fds++; fdwaitqueue_append(srv, con); break; case HANDLER_COMEBACK: done = -1; break; case HANDLER_ERROR: /* something went wrong */ connection_set_state(srv, con, CON_STATE_ERROR); break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown ret-value: ", con->fd, r); break; } if (con->state == CON_STATE_HANDLE_REQUEST && ostate == CON_STATE_READ_POST) { ostate = CON_STATE_HANDLE_REQUEST; } break; case CON_STATE_RESPONSE_START: /* * the decision is done * - create the HTTP-Response-Header * */ if (-1 == connection_handle_write_prepare(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); break; } connection_set_state(srv, con, CON_STATE_WRITE); break; case CON_STATE_RESPONSE_END: /* transient */ case CON_STATE_ERROR: /* transient */ connection_handle_response_end_state(srv, con); break; case CON_STATE_CONNECT: chunkqueue_reset(con->read_queue); con->request_count = 0; break; case CON_STATE_CLOSE: connection_handle_close_state(srv, con); break; case CON_STATE_READ: connection_handle_read_state(srv, con); break; case CON_STATE_WRITE: do { /* only try to write if we have something in the queue */ if (!chunkqueue_is_empty(con->write_queue)) { if (con->is_writable) { if (-1 == connection_handle_write(srv, con)) { log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); connection_set_state(srv, con, CON_STATE_ERROR); break; } if (con->state != CON_STATE_WRITE) break; } } else if (con->file_finished) { connection_set_state(srv, con, CON_STATE_RESPONSE_END); break; } if (con->mode != DIRECT && !con->file_finished) { switch(r = plugins_call_handle_subrequest(srv, con)) { case HANDLER_WAIT_FOR_EVENT: case HANDLER_FINISHED: case HANDLER_GO_ON: break; case HANDLER_WAIT_FOR_FD: srv->want_fds++; fdwaitqueue_append(srv, con); break; case HANDLER_COMEBACK: default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unexpected subrequest handler ret-value: ", con->fd, r); /* fall through */ case HANDLER_ERROR: connection_set_state(srv, con, CON_STATE_ERROR); break; } } } while (con->state == CON_STATE_WRITE && (!chunkqueue_is_empty(con->write_queue) ? con->is_writable : con->file_finished)); break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown state:", con->fd, con->state); break; } if (done == -1) { done = 0; } else if (ostate == con->state) { done = 1; } } if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at exit:", con->fd, connection_get_state(con->state)); } r = 0; switch(con->state) { case CON_STATE_READ: case CON_STATE_CLOSE: r = FDEVENT_IN; break; case CON_STATE_WRITE: /* request write-fdevent only if we really need it * - if we have data to write * - if the socket is not writable yet */ if (!chunkqueue_is_empty(con->write_queue) && (con->is_writable == 0) && (con->traffic_limit_reached == 0)) { r |= FDEVENT_OUT; } /* fall through */ case CON_STATE_READ_POST: if (con->conf.stream_request_body & FDEVENT_STREAM_REQUEST_POLLIN) { r |= FDEVENT_IN; } break; default: break; } if (-1 != con->fd) { const int events = fdevent_event_get_interest(srv->ev, con->fd); if (con->is_readable < 0) { con->is_readable = 0; r |= FDEVENT_IN; } if (con->is_writable < 0) { con->is_writable = 0; r |= FDEVENT_OUT; } if (r != events) { /* update timestamps when enabling interest in events */ if ((r & FDEVENT_IN) && !(events & FDEVENT_IN)) { con->read_idle_ts = srv->cur_ts; } if ((r & FDEVENT_OUT) && !(events & FDEVENT_OUT)) { con->write_request_ts = srv->cur_ts; } fdevent_event_set(srv->ev, &con->fde_ndx, con->fd, r); } } return 0; }
static handler_t cgi_connection_close(server *srv, handler_ctx *hctx) { int status; pid_t pid; plugin_data *p; connection *con; if (NULL == hctx) return HANDLER_GO_ON; p = hctx->plugin_data; con = hctx->remote_conn; if (con->mode != p->id) return HANDLER_GO_ON; #ifndef __WIN32 /* the connection to the browser went away, but we still have a connection * to the CGI script * * close cgi-connection */ if (hctx->fd != -1) { /* close connection to the cgi-script */ fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_unregister(srv->ev, hctx->fd); if (close(hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "sds", "cgi close failed ", hctx->fd, strerror(errno)); } hctx->fd = -1; hctx->fde_ndx = -1; } pid = hctx->pid; con->plugin_ctx[p->id] = NULL; /* is this a good idea ? */ cgi_handler_ctx_free(hctx); /* if waitpid hasn't been called by response.c yet, do it here */ if (pid) { /* check if the CGI-script is already gone */ switch(waitpid(pid, &status, WNOHANG)) { case 0: /* not finished yet */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "(debug) child isn't done yet, pid:", pid); #endif break; case -1: /* */ if (errno == EINTR) break; /* * errno == ECHILD happens if _subrequest catches the process-status before * we have read the response of the cgi process * * -> catch status * -> WAIT_FOR_EVENT * -> read response * -> we get here with waitpid == ECHILD * */ if (errno == ECHILD) return HANDLER_GO_ON; log_error_write(srv, __FILE__, __LINE__, "ss", "waitpid failed: ", strerror(errno)); return HANDLER_ERROR; default: /* Send an error if we haven't sent any data yet */ if (0 == con->file_started) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; } if (WIFEXITED(status)) { #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "(debug) cgi exited fine, pid:", pid); #endif pid = 0; return HANDLER_GO_ON; } else { log_error_write(srv, __FILE__, __LINE__, "sd", "cgi died, pid:", pid); pid = 0; return HANDLER_GO_ON; } } kill(pid, SIGTERM); /* cgi-script is still alive, queue the PID for removal */ cgi_pid_add(srv, p, pid); } #endif return HANDLER_GO_ON; }
connection *connection_accept(server *srv, server_socket *srv_socket) { /* accept everything */ /* search an empty place */ int cnt; sock_addr cnt_addr; socklen_t cnt_len; /* accept it and register the fd */ /** * check if we can still open a new connections * * see #1216 */ if (srv->conns->used >= srv->max_conns) { return NULL; } cnt_len = sizeof(cnt_addr); if (-1 == (cnt = accept(srv_socket->fd, (struct sockaddr *) &cnt_addr, &cnt_len))) { switch (errno) { case EAGAIN: #if EWOULDBLOCK != EAGAIN case EWOULDBLOCK: #endif case EINTR: /* we were stopped _before_ we had a connection */ case ECONNABORTED: /* this is a FreeBSD thingy */ /* we were stopped _after_ we had a connection */ break; case EMFILE: /* out of fds */ break; default: log_error_write(srv, __FILE__, __LINE__, "ssd", "accept failed:", strerror(errno), errno); } return NULL; } else { connection *con; srv->cur_fds++; /* ok, we have the connection, register it */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "appected()", cnt); #endif srv->con_opened++; con = connections_get_new_connection(srv); con->fd = cnt; con->fde_ndx = -1; #if 0 gettimeofday(&(con->start_tv), NULL); #endif fdevent_register(srv->ev, con->fd, connection_handle_fdevent, con); connection_set_state(srv, con, CON_STATE_REQUEST_START); con->connection_start = srv->cur_ts; con->dst_addr = cnt_addr; buffer_copy_string(con->dst_addr_buf, inet_ntop_cache_get_ip(srv, &(con->dst_addr))); con->srv_socket = srv_socket; if (-1 == (fdevent_fcntl_set(srv->ev, con->fd))) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno)); return NULL; } #ifdef USE_OPENSSL /* connect FD to SSL */ if (srv_socket->is_ssl) { if (NULL == (con->ssl = SSL_new(srv_socket->ssl_ctx))) { log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); return NULL; } con->renegotiations = 0; SSL_set_app_data(con->ssl, con); SSL_set_accept_state(con->ssl); if (1 != (SSL_set_fd(con->ssl, cnt))) { log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); return NULL; } } #endif return con; } }
/* 0: everything ok, -1: error, -2: con closed */ static int connection_handle_read(server *srv, connection *con) { int len; buffer *b; int toread, read_offset; if (con->conf.is_ssl) { return connection_handle_read_ssl(srv, con); } b = (NULL != con->read_queue->last) ? con->read_queue->last->mem : NULL; /* default size for chunks is 4kb; only use bigger chunks if FIONREAD tells * us more than 4kb is available * if FIONREAD doesn't signal a big chunk we fill the previous buffer * if it has >= 1kb free */ #if defined(__WIN32) if (NULL == b || b->size - b->used < 1024) { b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, 4 * 1024); } read_offset = (b->used == 0) ? 0 : b->used - 1; len = recv(con->fd, b->ptr + read_offset, b->size - 1 - read_offset, 0); #else if (ioctl(con->fd, FIONREAD, &toread) || toread == 0 || toread <= 4*1024) { if (NULL == b || b->size - b->used < 1024) { b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, 4 * 1024); } } else { if (toread > MAX_READ_LIMIT) toread = MAX_READ_LIMIT; b = chunkqueue_get_append_buffer(con->read_queue); buffer_prepare_copy(b, toread + 1); } read_offset = (b->used == 0) ? 0 : b->used - 1; len = read(con->fd, b->ptr + read_offset, b->size - 1 - read_offset); #endif if (len < 0) { con->is_readable = 0; if (errno == EAGAIN) return 0; if (errno == EINTR) { /* we have been interrupted before we could read */ con->is_readable = 1; return 0; } if (errno != ECONNRESET) { /* expected for keep-alive */ log_error_write(srv, __FILE__, __LINE__, "ssd", "connection closed - read failed: ", strerror(errno), errno); } connection_set_state(srv, con, CON_STATE_ERROR); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ /* pipelining */ return -2; } else if ((size_t)len < b->size - 1) { /* we got less then expected, wait for the next fd-event */ con->is_readable = 0; } if (b->used > 0) b->used--; b->used += len; b->ptr[b->used++] = '\0'; con->bytes_read += len; #if 0 dump_packet(b->ptr, len); #endif return 0; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ /* when in CON_STATE_READ: about to receive first byte for a request: */ int is_request_start = chunkqueue_is_empty(cq); if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } chunkqueue_remove_finished_chunks(cq); /* we might have got several packets at once */ /* update request_start timestamp when first byte of * next request is received on a keep-alive connection */ if (con->request_count > 1 && is_request_start) { con->request_start = srv->cur_ts; if (con->conf.high_precision_timestamps) log_clock_gettime_realtime(&con->request_start_hp); } /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { size_t i; size_t len = buffer_string_length(c->mem) - c->offset; const char *b = c->mem->ptr + c->offset; for (i = 0; i < len; ++i) { char ch = b[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { size_t bblen = buffer_string_length(cc->mem) - cc->offset; const char *bb = cc->mem->ptr + cc->offset; for ( ; j < bblen; j++) { ch = bb[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { size_t len = buffer_string_length(c->mem) - c->offset; if (c == last_chunk) { len = last_offset; } buffer_append_string_len(con->request.request, c->mem->ptr + c->offset, len); c->offset += len; cq->bytes_out += len; if (c == last_chunk) break; } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (is_closed) { /* the connection got closed and we didn't got enough data to leave CON_STATE_READ; * the only way is to leave here */ connection_set_state(srv, con, CON_STATE_ERROR); } if ((last_chunk ? buffer_string_length(con->request.request) : (size_t)chunkqueue_length(cq)) > srv->srvconf.max_request_field_size) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 431"); con->http_status = 431; /* Request Header Fields Too Large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } chunkqueue_remove_finished_chunks(cq); return 0; }
int connection_state_machine(server *srv, connection *con) { int done = 0, r; #ifdef USE_OPENSSL server_socket *srv_sock = con->srv_socket; #endif if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at start", con->fd, connection_get_state(con->state)); } while (done == 0) { size_t ostate = con->state; switch (con->state) { case CON_STATE_REQUEST_START: /* transient */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; con->request_count++; con->loops_per_request = 0; connection_set_state(srv, con, CON_STATE_READ); break; case CON_STATE_REQUEST_END: /* transient */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } buffer_reset(con->uri.authority); buffer_reset(con->uri.path); buffer_reset(con->uri.query); buffer_reset(con->request.orig_uri); if (http_request_parse(srv, con)) { /* we have to read some data from the POST request */ connection_set_state(srv, con, CON_STATE_READ_POST); break; } connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case CON_STATE_HANDLE_REQUEST: /* * the request is parsed * * decided what to do with the request * - * * */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } switch (r = http_response_prepare(srv, con)) { case HANDLER_FINISHED: if (con->mode == DIRECT) { if (con->http_status == 404 || con->http_status == 403) { /* 404 error-handler */ if (con->in_error_handler == 0 && (!buffer_string_is_empty(con->conf.error_handler) || !buffer_string_is_empty(con->error_handler))) { /* call error-handler */ con->error_handler_saved_status = con->http_status; con->http_status = 0; if (buffer_string_is_empty(con->error_handler)) { buffer_copy_buffer(con->request.uri, con->conf.error_handler); } else { buffer_copy_buffer(con->request.uri, con->error_handler); } buffer_reset(con->physical.path); con->in_error_handler = 1; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); done = -1; break; } else if (con->in_error_handler) { /* error-handler is a 404 */ con->http_status = con->error_handler_saved_status; } } else if (con->in_error_handler) { /* error-handler is back and has generated content */ /* if Status: was set, take it otherwise use 200 */ } } if (con->http_status == 0) con->http_status = 200; /* we have something to send, go on */ connection_set_state(srv, con, CON_STATE_RESPONSE_START); break; case HANDLER_WAIT_FOR_FD: srv->want_fds++; fdwaitqueue_append(srv, con); connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case HANDLER_COMEBACK: done = -1; /* fallthrough */ case HANDLER_WAIT_FOR_EVENT: /* come back here */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case HANDLER_ERROR: /* something went wrong */ connection_set_state(srv, con, CON_STATE_ERROR); break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown ret-value: ", con->fd, r); break; } break; case CON_STATE_RESPONSE_START: /* * the decision is done * - create the HTTP-Response-Header * */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } if (-1 == connection_handle_write_prepare(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); break; } connection_set_state(srv, con, CON_STATE_WRITE); break; case CON_STATE_RESPONSE_END: /* transient */ /* log the request */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } plugins_call_handle_request_done(srv, con); srv->con_written++; if (con->keep_alive) { connection_set_state(srv, con, CON_STATE_REQUEST_START); #if 0 con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; #endif } else { switch(r = plugins_call_handle_connection_close(srv, con)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r); break; } #ifdef USE_OPENSSL if (srv_sock->is_ssl) { switch (SSL_shutdown(con->ssl)) { case 1: /* done */ break; case 0: /* wait for fd-event * * FIXME: wait for fdevent and call SSL_shutdown again * */ break; default: log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); } } #endif if ((0 == shutdown(con->fd, SHUT_WR))) { con->close_timeout_ts = srv->cur_ts; connection_set_state(srv, con, CON_STATE_CLOSE); } else { connection_close(srv, con); } srv->con_closed++; } connection_reset(srv, con); break; case CON_STATE_CONNECT: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } chunkqueue_reset(con->read_queue); con->request_count = 0; break; case CON_STATE_CLOSE: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } /* we have to do the linger_on_close stuff regardless * of con->keep_alive; even non-keepalive sockets may * still have unread data, and closing before reading * it will make the client not see all our output. */ { int len; char buf[1024]; len = read(con->fd, buf, sizeof(buf)); if (len == 0 || (len < 0 && errno != EAGAIN && errno != EINTR) ) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } } if (srv->cur_ts - con->close_timeout_ts > HTTP_LINGER_TIMEOUT) { connection_close(srv, con); if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed for fd", con->fd); } } break; case CON_STATE_READ_POST: case CON_STATE_READ: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } connection_handle_read_state(srv, con); break; case CON_STATE_WRITE: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } /* only try to write if we have something in the queue */ if (!chunkqueue_is_empty(con->write_queue)) { if (con->is_writable) { if (-1 == connection_handle_write(srv, con)) { log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); connection_set_state(srv, con, CON_STATE_ERROR); } } } else if (con->file_finished) { connection_set_state(srv, con, CON_STATE_RESPONSE_END); } break; case CON_STATE_ERROR: /* transient */ /* even if the connection was drop we still have to write it to the access log */ if (con->http_status) { plugins_call_handle_request_done(srv, con); } #ifdef USE_OPENSSL if (srv_sock->is_ssl) { int ret, ssl_r; unsigned long err; ERR_clear_error(); switch ((ret = SSL_shutdown(con->ssl))) { case 1: /* ok */ break; case 0: ERR_clear_error(); if (-1 != (ret = SSL_shutdown(con->ssl))) break; /* fall through */ default: switch ((ssl_r = SSL_get_error(con->ssl, ret))) { case SSL_ERROR_WANT_WRITE: case SSL_ERROR_WANT_READ: break; case SSL_ERROR_SYSCALL: /* perhaps we have error waiting in our error-queue */ if (0 != (err = ERR_get_error())) { do { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } while((err = ERR_get_error())); } else if (errno != 0) { /* ssl bug (see lighttpd ticket #2213): sometimes errno == 0 */ switch(errno) { case EPIPE: case ECONNRESET: break; default: log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL (error):", ssl_r, ret, errno, strerror(errno)); break; } } break; default: while((err = ERR_get_error())) { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } break; } } ERR_clear_error(); } #endif switch(con->mode) { case DIRECT: #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "emergency exit: direct", con->fd); #endif break; default: switch(r = plugins_call_handle_connection_close(srv, con)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r); break; } break; } connection_reset(srv, con); /* close the connection */ if ((0 == shutdown(con->fd, SHUT_WR))) { con->close_timeout_ts = srv->cur_ts; connection_set_state(srv, con, CON_STATE_CLOSE); if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sd", "shutdown for fd", con->fd); } } else { connection_close(srv, con); } con->keep_alive = 0; srv->con_closed++; break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown state:", con->fd, con->state); break; } if (done == -1) { done = 0; } else if (ostate == con->state) { done = 1; } } if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at exit:", con->fd, connection_get_state(con->state)); } switch(con->state) { case CON_STATE_READ_POST: case CON_STATE_READ: case CON_STATE_CLOSE: fdevent_event_set(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_IN); break; case CON_STATE_WRITE: /* request write-fdevent only if we really need it * - if we have data to write * - if the socket is not writable yet */ if (!chunkqueue_is_empty(con->write_queue) && (con->is_writable == 0) && (con->traffic_limit_reached == 0)) { fdevent_event_set(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_OUT); } else { fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); } break; default: fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); break; } return 0; }
int main (int argc, char **argv) { server *srv = NULL; int print_config = 0; int test_config = 0; int i_am_root; int o; int num_childs = 0; int pid_fd = -1, fd; size_t i; #ifdef HAVE_SIGACTION struct sigaction act; #endif #ifdef HAVE_GETRLIMIT struct rlimit rlim; #endif #ifdef USE_ALARM struct itimerval interval; interval.it_interval.tv_sec = 1; interval.it_interval.tv_usec = 0; interval.it_value.tv_sec = 1; interval.it_value.tv_usec = 0; #endif /* for nice %b handling in strfime() */ setlocale(LC_TIME, "C"); if (NULL == (srv = server_init())) { fprintf(stderr, "did this really happen?\n"); return -1; } /* init structs done */ srv->srvconf.port = 0; #ifdef HAVE_GETUID i_am_root = (getuid() == 0); #else i_am_root = 0; #endif srv->srvconf.dont_daemonize = 0; while(-1 != (o = getopt(argc, argv, "f:m:hvVDpt"))) { switch(o) { case 'f': if (srv->config_storage) { log_error_write(srv, __FILE__, __LINE__, "s", "Can only read one config file. Use the include command to use multiple config files."); server_free(srv); return -1; } if (config_read(srv, optarg)) { server_free(srv); return -1; } break; case 'm': buffer_copy_string(srv->srvconf.modules_dir, optarg); break; case 'p': print_config = 1; break; case 't': test_config = 1; break; case 'D': srv->srvconf.dont_daemonize = 1; break; case 'v': show_version(); return 0; case 'V': show_features(); return 0; case 'h': show_help(); return 0; default: show_help(); server_free(srv); return -1; } } if (!srv->config_storage) { log_error_write(srv, __FILE__, __LINE__, "s", "No configuration available. Try using -f option."); server_free(srv); return -1; } if (print_config) { data_unset *dc = srv->config_context->data[0]; if (dc) { dc->print(dc, 0); fprintf(stdout, "\n"); } else { /* shouldn't happend */ fprintf(stderr, "global config not found\n"); } } if (test_config) { printf("Syntax OK\n"); } if (test_config || print_config) { server_free(srv); return 0; } /* close stdin and stdout, as they are not needed */ openDevNull(STDIN_FILENO); openDevNull(STDOUT_FILENO); if (0 != config_set_defaults(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "setting default values failed"); server_free(srv); return -1; } /* UID handling */ #ifdef HAVE_GETUID if (!i_am_root && issetugid()) { /* we are setuid-root */ log_error_write(srv, __FILE__, __LINE__, "s", "Are you nuts ? Don't apply a SUID bit to this binary"); server_free(srv); return -1; } #endif /* check document-root */ if (srv->config_storage[0]->document_root->used <= 1) { log_error_write(srv, __FILE__, __LINE__, "s", "document-root is not set\n"); server_free(srv); return -1; } if (plugins_load(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "loading plugins finally failed"); plugins_free(srv); server_free(srv); return -1; } /* open pid file BEFORE chroot */ if (srv->srvconf.pid_file->used) { if (-1 == (pid_fd = open(srv->srvconf.pid_file->ptr, O_WRONLY | O_CREAT | O_EXCL | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))) { struct stat st; if (errno != EEXIST) { log_error_write(srv, __FILE__, __LINE__, "sbs", "opening pid-file failed:", srv->srvconf.pid_file, strerror(errno)); return -1; } if (0 != stat(srv->srvconf.pid_file->ptr, &st)) { log_error_write(srv, __FILE__, __LINE__, "sbs", "stating existing pid-file failed:", srv->srvconf.pid_file, strerror(errno)); } if (!S_ISREG(st.st_mode)) { log_error_write(srv, __FILE__, __LINE__, "sb", "pid-file exists and isn't regular file:", srv->srvconf.pid_file); return -1; } if (-1 == (pid_fd = open(srv->srvconf.pid_file->ptr, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))) { log_error_write(srv, __FILE__, __LINE__, "sbs", "opening pid-file failed:", srv->srvconf.pid_file, strerror(errno)); return -1; } } } if (srv->event_handler == FDEVENT_HANDLER_SELECT) { /* select limits itself * * as it is a hard limit and will lead to a segfault we add some safety * */ srv->max_fds = FD_SETSIZE - 200; } else { srv->max_fds = 4096; } if (i_am_root) { struct group *grp = NULL; struct passwd *pwd = NULL; int use_rlimit = 1; #ifdef HAVE_VALGRIND_VALGRIND_H if (RUNNING_ON_VALGRIND) use_rlimit = 0; #endif #ifdef HAVE_GETRLIMIT if (0 != getrlimit(RLIMIT_NOFILE, &rlim)) { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't get 'max filedescriptors'", strerror(errno)); return -1; } if (use_rlimit && srv->srvconf.max_fds) { /* set rlimits */ rlim.rlim_cur = srv->srvconf.max_fds; rlim.rlim_max = srv->srvconf.max_fds; if (0 != setrlimit(RLIMIT_NOFILE, &rlim)) { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't set 'max filedescriptors'", strerror(errno)); return -1; } } if (srv->event_handler == FDEVENT_HANDLER_SELECT) { srv->max_fds = rlim.rlim_cur < ((int)FD_SETSIZE) - 200 ? rlim.rlim_cur : FD_SETSIZE - 200; } else { srv->max_fds = rlim.rlim_cur; } /* set core file rlimit, if enable_cores is set */ if (use_rlimit && srv->srvconf.enable_cores && getrlimit(RLIMIT_CORE, &rlim) == 0) { rlim.rlim_cur = rlim.rlim_max; setrlimit(RLIMIT_CORE, &rlim); } #endif if (srv->event_handler == FDEVENT_HANDLER_SELECT) { /* don't raise the limit above FD_SET_SIZE */ if (srv->max_fds > ((int)FD_SETSIZE) - 200) { log_error_write(srv, __FILE__, __LINE__, "sd", "can't raise max filedescriptors above", FD_SETSIZE - 200, "if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds."); return -1; } } #ifdef HAVE_PWD_H /* set user and group */ if (srv->srvconf.username->used) { if (NULL == (pwd = getpwnam(srv->srvconf.username->ptr))) { log_error_write(srv, __FILE__, __LINE__, "sb", "can't find username", srv->srvconf.username); return -1; } if (pwd->pw_uid == 0) { log_error_write(srv, __FILE__, __LINE__, "s", "I will not set uid to 0\n"); return -1; } } if (srv->srvconf.groupname->used) { if (NULL == (grp = getgrnam(srv->srvconf.groupname->ptr))) { log_error_write(srv, __FILE__, __LINE__, "sb", "can't find groupname", srv->srvconf.groupname); return -1; } if (grp->gr_gid == 0) { log_error_write(srv, __FILE__, __LINE__, "s", "I will not set gid to 0\n"); return -1; } } #endif /* we need root-perms for port < 1024 */ if (0 != network_init(srv)) { plugins_free(srv); server_free(srv); return -1; } #ifdef HAVE_PWD_H /* * Change group before chroot, when we have access * to /etc/group * */ if (NULL != grp) { setgid(grp->gr_gid); setgroups(0, NULL); if (srv->srvconf.username->used) { initgroups(srv->srvconf.username->ptr, grp->gr_gid); } } #endif #ifdef HAVE_CHROOT if (srv->srvconf.changeroot->used) { tzset(); if (-1 == chroot(srv->srvconf.changeroot->ptr)) { log_error_write(srv, __FILE__, __LINE__, "ss", "chroot failed: ", strerror(errno)); return -1; } if (-1 == chdir("/")) { log_error_write(srv, __FILE__, __LINE__, "ss", "chdir failed: ", strerror(errno)); return -1; } } #endif #ifdef HAVE_PWD_H /* drop root privs */ if (NULL != pwd) { setuid(pwd->pw_uid); } #endif #if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_DUMPABLE) /** * on IRIX 6.5.30 they have prctl() but no DUMPABLE */ if (srv->srvconf.enable_cores) { prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); } #endif } else { #ifdef HAVE_GETRLIMIT if (0 != getrlimit(RLIMIT_NOFILE, &rlim)) { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't get 'max filedescriptors'", strerror(errno)); return -1; } /** * we are not root can can't increase the fd-limit, but we can reduce it */ if (srv->srvconf.max_fds && srv->srvconf.max_fds < rlim.rlim_cur) { /* set rlimits */ rlim.rlim_cur = srv->srvconf.max_fds; if (0 != setrlimit(RLIMIT_NOFILE, &rlim)) { log_error_write(srv, __FILE__, __LINE__, "ss", "couldn't set 'max filedescriptors'", strerror(errno)); return -1; } } if (srv->event_handler == FDEVENT_HANDLER_SELECT) { srv->max_fds = rlim.rlim_cur < ((int)FD_SETSIZE) - 200 ? rlim.rlim_cur : FD_SETSIZE - 200; } else { srv->max_fds = rlim.rlim_cur; } /* set core file rlimit, if enable_cores is set */ if (srv->srvconf.enable_cores && getrlimit(RLIMIT_CORE, &rlim) == 0) { rlim.rlim_cur = rlim.rlim_max; setrlimit(RLIMIT_CORE, &rlim); } #endif if (srv->event_handler == FDEVENT_HANDLER_SELECT) { /* don't raise the limit above FD_SET_SIZE */ if (srv->max_fds > ((int)FD_SETSIZE) - 200) { log_error_write(srv, __FILE__, __LINE__, "sd", "can't raise max filedescriptors above", FD_SETSIZE - 200, "if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds."); return -1; } } if (0 != network_init(srv)) { plugins_free(srv); server_free(srv); return -1; } } /* set max-conns */ if (srv->srvconf.max_conns > srv->max_fds/2) { /* we can't have more connections than max-fds/2 */ log_error_write(srv, __FILE__, __LINE__, "sdd", "can't have more connections than fds/2: ", srv->srvconf.max_conns, srv->max_fds); srv->max_conns = srv->max_fds/2; } else if (srv->srvconf.max_conns) { /* otherwise respect the wishes of the user */ srv->max_conns = srv->srvconf.max_conns; } else { /* or use the default: we really don't want to hit max-fds */ srv->max_conns = srv->max_fds/3; } if (HANDLER_GO_ON != plugins_call_init(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "Initialization of plugins failed. Going down."); plugins_free(srv); network_close(srv); server_free(srv); return -1; } #ifdef HAVE_FORK /* network is up, let's deamonize ourself */ if (srv->srvconf.dont_daemonize == 0) daemonize(); #endif srv->gid = getgid(); srv->uid = getuid(); /* write pid file */ if (pid_fd != -1) { buffer_copy_long(srv->tmp_buf, getpid()); buffer_append_string_len(srv->tmp_buf, CONST_STR_LEN("\n")); write(pid_fd, srv->tmp_buf->ptr, srv->tmp_buf->used - 1); close(pid_fd); pid_fd = -1; } /* Close stderr ASAP in the child process to make sure that nothing * is being written to that fd which may not be valid anymore. */ if (-1 == log_error_open(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "Opening errorlog failed. Going down."); plugins_free(srv); network_close(srv); server_free(srv); return -1; } if (HANDLER_GO_ON != plugins_call_set_defaults(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "Configuration of plugins failed. Going down."); plugins_free(srv); network_close(srv); server_free(srv); return -1; } /* dump unused config-keys */ for (i = 0; i < srv->config_context->used; i++) { array *config = ((data_config *)srv->config_context->data[i])->value; size_t j; for (j = 0; config && j < config->used; j++) { data_unset *du = config->data[j]; /* all var.* is known as user defined variable */ if (strncmp(du->key->ptr, "var.", sizeof("var.") - 1) == 0) { continue; } if (NULL == array_get_element(srv->config_touched, du->key->ptr)) { log_error_write(srv, __FILE__, __LINE__, "sbs", "WARNING: unknown config-key:", du->key, "(ignored)"); } } } if (srv->config_unsupported) { log_error_write(srv, __FILE__, __LINE__, "s", "Configuration contains unsupported keys. Going down."); } if (srv->config_deprecated) { log_error_write(srv, __FILE__, __LINE__, "s", "Configuration contains deprecated keys. Going down."); } if (srv->config_unsupported || srv->config_deprecated) { plugins_free(srv); network_close(srv); server_free(srv); return -1; } #ifdef HAVE_SIGACTION memset(&act, 0, sizeof(act)); act.sa_handler = SIG_IGN; sigaction(SIGPIPE, &act, NULL); sigaction(SIGUSR1, &act, NULL); # if defined(SA_SIGINFO) act.sa_sigaction = sigaction_handler; sigemptyset(&act.sa_mask); act.sa_flags = SA_SIGINFO; # else act.sa_handler = signal_handler; sigemptyset(&act.sa_mask); act.sa_flags = 0; # endif sigaction(SIGINT, &act, NULL); sigaction(SIGTERM, &act, NULL); sigaction(SIGHUP, &act, NULL); sigaction(SIGALRM, &act, NULL); sigaction(SIGCHLD, &act, NULL); #elif defined(HAVE_SIGNAL) /* ignore the SIGPIPE from sendfile() */ signal(SIGPIPE, SIG_IGN); signal(SIGUSR1, SIG_IGN); signal(SIGALRM, signal_handler); signal(SIGTERM, signal_handler); signal(SIGHUP, signal_handler); signal(SIGCHLD, signal_handler); signal(SIGINT, signal_handler); #endif #ifdef USE_ALARM signal(SIGALRM, signal_handler); /* setup periodic timer (1 second) */ if (setitimer(ITIMER_REAL, &interval, NULL)) { log_error_write(srv, __FILE__, __LINE__, "s", "setting timer failed"); return -1; } getitimer(ITIMER_REAL, &interval); #endif #ifdef HAVE_FORK /* start watcher and workers */ num_childs = srv->srvconf.max_worker; if (num_childs > 0) { int child = 0; while (!child && !srv_shutdown && !graceful_shutdown) { if (num_childs > 0) { switch (fork()) { case -1: return -1; case 0: child = 1; break; default: num_childs--; break; } } else { int status; if (-1 != wait(&status)) { /** * one of our workers went away */ num_childs++; } else { switch (errno) { case EINTR: /** * if we receive a SIGHUP we have to close our logs ourself as we don't * have the mainloop who can help us here */ if (handle_sig_hup) { handle_sig_hup = 0; log_error_cycle(srv); /** * forward to all procs in the process-group * * we also send it ourself */ if (!forwarded_sig_hup) { forwarded_sig_hup = 1; kill(0, SIGHUP); } } break; default: break; } } } } /** * for the parent this is the exit-point */ if (!child) { /** * kill all children too */ if (graceful_shutdown) { kill(0, SIGINT); } else if (srv_shutdown) { kill(0, SIGTERM); } log_error_close(srv); network_close(srv); connections_free(srv); plugins_free(srv); server_free(srv); return 0; } } #endif if (NULL == (srv->ev = fdevent_init(srv, srv->max_fds + 1, srv->event_handler))) { log_error_write(srv, __FILE__, __LINE__, "s", "fdevent_init failed"); return -1; } /* libev backend overwrites our SIGCHLD handler and calls waitpid on SIGCHLD; we want our own SIGCHLD handling. */ #ifdef HAVE_SIGACTION sigaction(SIGCHLD, &act, NULL); #elif defined(HAVE_SIGNAL) signal(SIGCHLD, signal_handler); #endif /* * kqueue() is called here, select resets its internals, * all server sockets get their handlers * * */ if (0 != network_register_fdevents(srv)) { plugins_free(srv); network_close(srv); server_free(srv); return -1; } /* might fail if user is using fam (not gamin) and famd isn't running */ if (NULL == (srv->stat_cache = stat_cache_init())) { log_error_write(srv, __FILE__, __LINE__, "s", "stat-cache could not be setup, dieing."); return -1; } #ifdef HAVE_FAM_H /* setup FAM */ if (srv->srvconf.stat_cache_engine == STAT_CACHE_ENGINE_FAM) { if (0 != FAMOpen2(srv->stat_cache->fam, "lighttpd")) { log_error_write(srv, __FILE__, __LINE__, "s", "could not open a fam connection, dieing."); return -1; } #ifdef HAVE_FAMNOEXISTS FAMNoExists(srv->stat_cache->fam); #endif srv->stat_cache->fam_fcce_ndx = -1; fdevent_register(srv->ev, FAMCONNECTION_GETFD(srv->stat_cache->fam), stat_cache_handle_fdevent, NULL); fdevent_event_set(srv->ev, &(srv->stat_cache->fam_fcce_ndx), FAMCONNECTION_GETFD(srv->stat_cache->fam), FDEVENT_IN); } #endif /* get the current number of FDs */ srv->cur_fds = open("/dev/null", O_RDONLY); close(srv->cur_fds); for (i = 0; i < srv->srv_sockets.used; i++) { server_socket *srv_socket = srv->srv_sockets.ptr[i]; if (-1 == fdevent_fcntl_set(srv->ev, srv_socket->fd)) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed:", strerror(errno)); return -1; } } /* main-loop */ while (!srv_shutdown) { int n; size_t ndx; time_t min_ts; if (handle_sig_hup) { handler_t r; /* reset notification */ handle_sig_hup = 0; /* cycle logfiles */ switch(r = plugins_call_handle_sighup(srv)) { case HANDLER_GO_ON: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "sighup-handler return with an error", r); break; } if (-1 == log_error_cycle(srv)) { log_error_write(srv, __FILE__, __LINE__, "s", "cycling errorlog failed, dying"); return -1; } else { #ifdef HAVE_SIGACTION log_error_write(srv, __FILE__, __LINE__, "sdsd", "logfiles cycled UID =", last_sighup_info.si_uid, "PID =", last_sighup_info.si_pid); #else log_error_write(srv, __FILE__, __LINE__, "s", "logfiles cycled"); #endif } } if (handle_sig_alarm) { /* a new second */ #ifdef USE_ALARM /* reset notification */ handle_sig_alarm = 0; #endif /* get current time */ min_ts = time(NULL); if (min_ts != srv->cur_ts) { int cs = 0; connections *conns = srv->conns; handler_t r; switch(r = plugins_call_handle_trigger(srv)) { case HANDLER_GO_ON: break; case HANDLER_ERROR: log_error_write(srv, __FILE__, __LINE__, "s", "one of the triggers failed"); break; default: log_error_write(srv, __FILE__, __LINE__, "d", r); break; } /* trigger waitpid */ srv->cur_ts = min_ts; /* cleanup stat-cache */ stat_cache_trigger_cleanup(srv); /** * check all connections for timeouts * */ for (ndx = 0; ndx < conns->used; ndx++) { int changed = 0; connection *con; int t_diff; con = conns->ptr[ndx]; if (con->state == CON_STATE_READ || con->state == CON_STATE_READ_POST) { if (con->request_count == 1) { if (srv->cur_ts - con->read_idle_ts > con->conf.max_read_idle) { /* time - out */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed - read-timeout:", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); changed = 1; } } else { if (srv->cur_ts - con->read_idle_ts > con->keep_alive_idle) { /* time - out */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed - read-timeout:", con->fd); #endif connection_set_state(srv, con, CON_STATE_ERROR); changed = 1; } } } if ((con->state == CON_STATE_WRITE) && (con->write_request_ts != 0)) { #if 0 if (srv->cur_ts - con->write_request_ts > 60) { log_error_write(srv, __FILE__, __LINE__, "sdd", "connection closed - pre-write-request-timeout:", con->fd, srv->cur_ts - con->write_request_ts); } #endif if (srv->cur_ts - con->write_request_ts > con->conf.max_write_idle) { /* time - out */ if (con->conf.log_timeouts) { log_error_write(srv, __FILE__, __LINE__, "sbsosds", "NOTE: a request for", con->request.uri, "timed out after writing", con->bytes_written, "bytes. We waited", (int)con->conf.max_write_idle, "seconds. If this a problem increase server.max-write-idle"); } connection_set_state(srv, con, CON_STATE_ERROR); changed = 1; } } if (con->state == CON_STATE_CLOSE && (srv->cur_ts - con->close_timeout_ts > HTTP_LINGER_TIMEOUT)) { changed = 1; } /* we don't like div by zero */ if (0 == (t_diff = srv->cur_ts - con->connection_start)) t_diff = 1; if (con->traffic_limit_reached && (con->conf.kbytes_per_second == 0 || ((con->bytes_written / t_diff) < con->conf.kbytes_per_second * 1024))) { /* enable connection again */ con->traffic_limit_reached = 0; changed = 1; } if (changed) { connection_state_machine(srv, con); } con->bytes_written_cur_second = 0; *(con->conf.global_bytes_per_second_cnt_ptr) = 0; #if 0 if (cs == 0) { fprintf(stderr, "connection-state: "); cs = 1; } fprintf(stderr, "c[%d,%d]: %s ", con->fd, con->fcgi.fd, connection_get_state(con->state)); #endif } if (cs == 1) fprintf(stderr, "\n"); } } if (srv->sockets_disabled) { /* our server sockets are disabled, why ? */ if ((srv->cur_fds + srv->want_fds < srv->max_fds * 8 / 10) && /* we have enough unused fds */ (srv->conns->used <= srv->max_conns * 9 / 10) && (0 == graceful_shutdown)) { for (i = 0; i < srv->srv_sockets.used; i++) { server_socket *srv_socket = srv->srv_sockets.ptr[i]; fdevent_event_set(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd, FDEVENT_IN); } log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets enabled again"); srv->sockets_disabled = 0; } } else { if ((srv->cur_fds + srv->want_fds > srv->max_fds * 9 / 10) || /* out of fds */ (srv->conns->used >= srv->max_conns) || /* out of connections */ (graceful_shutdown)) { /* graceful_shutdown */ /* disable server-fds */ for (i = 0; i < srv->srv_sockets.used; i++) { server_socket *srv_socket = srv->srv_sockets.ptr[i]; fdevent_event_del(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd); if (graceful_shutdown) { /* we don't want this socket anymore, * * closing it right away will make it possible for * the next lighttpd to take over (graceful restart) * */ fdevent_unregister(srv->ev, srv_socket->fd); close(srv_socket->fd); srv_socket->fd = -1; /* network_close() will cleanup after us */ if (srv->srvconf.pid_file->used && srv->srvconf.changeroot->used == 0) { if (0 != unlink(srv->srvconf.pid_file->ptr)) { if (errno != EACCES && errno != EPERM) { log_error_write(srv, __FILE__, __LINE__, "sbds", "unlink failed for:", srv->srvconf.pid_file, errno, strerror(errno)); } } } } } if (graceful_shutdown) { log_error_write(srv, __FILE__, __LINE__, "s", "[note] graceful shutdown started"); } else if (srv->conns->used >= srv->max_conns) { log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets disabled, connection limit reached"); } else { log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets disabled, out-of-fds"); } srv->sockets_disabled = 1; } } if (graceful_shutdown && srv->conns->used == 0) { /* we are in graceful shutdown phase and all connections are closed * we are ready to terminate without harming anyone */ srv_shutdown = 1; } /* we still have some fds to share */ if (srv->want_fds) { /* check the fdwaitqueue for waiting fds */ int free_fds = srv->max_fds - srv->cur_fds - 16; connection *con; for (; free_fds > 0 && NULL != (con = fdwaitqueue_unshift(srv, srv->fdwaitqueue)); free_fds--) { connection_state_machine(srv, con); srv->want_fds--; } } if ((n = fdevent_poll(srv->ev, 1000)) > 0) { /* n is the number of events */ int revents; int fd_ndx; #if 0 if (n > 0) { log_error_write(srv, __FILE__, __LINE__, "sd", "polls:", n); } #endif fd_ndx = -1; do { fdevent_handler handler; void *context; handler_t r; fd_ndx = fdevent_event_next_fdndx (srv->ev, fd_ndx); if (-1 == fd_ndx) break; /* not all fdevent handlers know how many fds got an event */ revents = fdevent_event_get_revent (srv->ev, fd_ndx); fd = fdevent_event_get_fd (srv->ev, fd_ndx); handler = fdevent_get_handler(srv->ev, fd); context = fdevent_get_context(srv->ev, fd); /* connection_handle_fdevent needs a joblist_append */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sdd", "event for", fd, revents); #endif switch (r = (*handler)(srv, context, revents)) { case HANDLER_FINISHED: case HANDLER_GO_ON: case HANDLER_WAIT_FOR_EVENT: case HANDLER_WAIT_FOR_FD: break; case HANDLER_ERROR: /* should never happen */ SEGFAULT(); break; default: log_error_write(srv, __FILE__, __LINE__, "d", r); break; } } while (--n > 0); } else if (n < 0 && errno != EINTR) { log_error_write(srv, __FILE__, __LINE__, "ss", "fdevent_poll failed:", strerror(errno)); } for (ndx = 0; ndx < srv->joblist->used; ndx++) { connection *con = srv->joblist->ptr[ndx]; handler_t r; connection_state_machine(srv, con); switch(r = plugins_call_handle_joblist(srv, con)) { case HANDLER_FINISHED: case HANDLER_GO_ON: break; default: log_error_write(srv, __FILE__, __LINE__, "d", r); break; } con->in_joblist = 0; } srv->joblist->used = 0; } if (srv->srvconf.pid_file->used && srv->srvconf.changeroot->used == 0 && 0 == graceful_shutdown) { if (0 != unlink(srv->srvconf.pid_file->ptr)) { if (errno != EACCES && errno != EPERM) { log_error_write(srv, __FILE__, __LINE__, "sbds", "unlink failed for:", srv->srvconf.pid_file, errno, strerror(errno)); } } } #ifdef HAVE_SIGACTION log_error_write(srv, __FILE__, __LINE__, "sdsd", "server stopped by UID =", last_sigterm_info.si_uid, "PID =", last_sigterm_info.si_pid); #else log_error_write(srv, __FILE__, __LINE__, "s", "server stopped"); #endif /* clean-up */ log_error_close(srv); network_close(srv); connections_free(srv); plugins_free(srv); server_free(srv); return 0; }
static int connection_handle_read_ssl(server *srv, connection *con) { #ifdef USE_OPENSSL int r, ssl_err, len, count = 0; char *mem = NULL; size_t mem_len = 0; if (!con->srv_socket->is_ssl) return -1; ERR_clear_error(); do { chunkqueue_get_memory(con->read_queue, &mem, &mem_len, 0, SSL_pending(con->ssl)); #if 0 /* overwrite everything with 0 */ memset(mem, 0, mem_len); #endif len = SSL_read(con->ssl, mem, mem_len); chunkqueue_use_memory(con->read_queue, len > 0 ? len : 0); if (con->renegotiations > 1 && con->conf.ssl_disable_client_renegotiation) { log_error_write(srv, __FILE__, __LINE__, "s", "SSL: renegotiation initiated by client, killing connection"); connection_set_state(srv, con, CON_STATE_ERROR); return -1; } if (len > 0) { con->bytes_read += len; count += len; } } while (len == (ssize_t) mem_len && count < MAX_READ_LIMIT); if (len < 0) { int oerrno = errno; switch ((r = SSL_get_error(con->ssl, len))) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: con->is_readable = 0; /* the manual says we have to call SSL_read with the same arguments next time. * we ignore this restriction; no one has complained about it in 1.5 yet, so it probably works anyway. */ return 0; case SSL_ERROR_SYSCALL: /** * man SSL_get_error() * * SSL_ERROR_SYSCALL * Some I/O error occurred. The OpenSSL error queue may contain more * information on the error. If the error queue is empty (i.e. * ERR_get_error() returns 0), ret can be used to find out more about * the error: If ret == 0, an EOF was observed that violates the * protocol. If ret == -1, the underlying BIO reported an I/O error * (for socket I/O on Unix systems, consult errno for details). * */ while((ssl_err = ERR_get_error())) { /* get all errors from the error-queue */ log_error_write(srv, __FILE__, __LINE__, "sds", "SSL:", r, ERR_error_string(ssl_err, NULL)); } switch(oerrno) { default: log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL:", len, r, oerrno, strerror(oerrno)); break; } break; case SSL_ERROR_ZERO_RETURN: /* clean shutdown on the remote side */ if (r == 0) { /* FIXME: later */ } /* fall thourgh */ default: while((ssl_err = ERR_get_error())) { switch (ERR_GET_REASON(ssl_err)) { case SSL_R_SSL_HANDSHAKE_FAILURE: case SSL_R_TLSV1_ALERT_UNKNOWN_CA: case SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN: case SSL_R_SSLV3_ALERT_BAD_CERTIFICATE: if (!con->conf.log_ssl_noise) continue; break; default: break; } /* get all errors from the error-queue */ log_error_write(srv, __FILE__, __LINE__, "sds", "SSL:", r, ERR_error_string(ssl_err, NULL)); } break; } connection_set_state(srv, con, CON_STATE_ERROR); return -1; } else if (len == 0) { con->is_readable = 0; /* the other end close the connection -> KEEP-ALIVE */ return -2; } else { joblist_append(srv, con); } return 0; #else UNUSED(srv); UNUSED(con); return -1; #endif }
static handler_t cgi_handle_fdevent(void *s, void *ctx, int revents) { server *srv = (server *)s; handler_ctx *hctx = ctx; connection *con = hctx->remote_conn; joblist_append(srv, con); if (hctx->fd == -1) { log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), "invalid cgi-fd"); return HANDLER_ERROR; } if (revents & FDEVENT_IN) { switch (cgi_demux_response(srv, hctx)) { case FDEVENT_HANDLED_NOT_FINISHED: break; case FDEVENT_HANDLED_FINISHED: /* we are done */ #if 0 log_error_write(srv, __FILE__, __LINE__, "ddss", con->fd, hctx->fd, connection_get_state(con->state), "finished"); #endif cgi_connection_close(srv, hctx); /* if we get a IN|HUP and have read everything don't exec the close twice */ return HANDLER_FINISHED; case FDEVENT_HANDLED_ERROR: connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); con->http_status = 500; con->mode = DIRECT; log_error_write(srv, __FILE__, __LINE__, "s", "demuxer failed: "); break; } } if (revents & FDEVENT_OUT) { /* nothing to do */ } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { /* check if we still have a unfinished header package which is a body in reality */ if (con->file_started == 0 && hctx->response_header->used) { con->file_started = 1; http_chunk_append_mem(srv, con, hctx->response_header->ptr, hctx->response_header->used); joblist_append(srv, con); } if (con->file_finished == 0) { http_chunk_append_mem(srv, con, NULL, 0); joblist_append(srv, con); } con->file_finished = 1; if (chunkqueue_is_empty(con->write_queue)) { /* there is nothing left to write */ connection_set_state(srv, con, CON_STATE_RESPONSE_END); } else { /* used the write-handler to finish the request on demand */ } # if 0 log_error_write(srv, __FILE__, __LINE__, "sddd", "got HUP from cgi", con->fd, hctx->fd, revents); # endif /* rtsigs didn't liked the close */ cgi_connection_close(srv, hctx); } else if (revents & FDEVENT_ERR) { con->file_finished = 1; /* kill all connections to the cgi process */ cgi_connection_close(srv, hctx); #if 1 log_error_write(srv, __FILE__, __LINE__, "s", "cgi-FDEVENT_ERR"); #endif return HANDLER_ERROR; } return HANDLER_FINISHED; }
connection *connection_accept(server *srv, server_socket *srv_socket) { /* accept everything */ /* search an empty place */ int cnt; sock_addr cnt_addr; #ifndef HAVE_LIBMTCP socklen_t cnt_len; #endif /* accept it and register the fd */ /** * check if we can still open a new connections * * see #1216 */ if (srv->conns->used >= srv->max_conns) { return NULL; } #ifdef HAVE_LIBMTCP if (-1 == (cnt = mtcp_accept(srv->mctx, srv_socket->fd, NULL, NULL))) { #else cnt_len = sizeof(cnt_addr); if (-1 == (cnt = accept(srv_socket->fd, (struct sockaddr *) &cnt_addr, &cnt_len))) { #endif switch (errno) { case EAGAIN: #if EWOULDBLOCK != EAGAIN case EWOULDBLOCK: #endif case EINTR: /* we were stopped _before_ we had a connection */ case ECONNABORTED: /* this is a FreeBSD thingy */ /* we were stopped _after_ we had a connection */ break; case EMFILE: /* out of fds */ break; default: log_error_write(srv, __FILE__, __LINE__, "ssd", "accept failed:", strerror(errno), errno); } return NULL; } else { connection *con; srv->cur_fds++; /* ok, we have the connection, register it */ #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "appected()", cnt); #endif srv->con_opened++; con = connections_get_new_connection(srv); con->fd = cnt; con->fde_ndx = -1; #if 0 gettimeofday(&(con->start_tv), NULL); #endif fdevent_register(srv->ev, con->fd, connection_handle_fdevent, con); connection_set_state(srv, con, CON_STATE_REQUEST_START); con->connection_start = srv->cur_ts; con->dst_addr = cnt_addr; buffer_copy_string(con->dst_addr_buf, inet_ntop_cache_get_ip(srv, &(con->dst_addr))); con->srv_socket = srv_socket; if (-1 == (fdevent_fcntl_set(srv->ev, con->fd))) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno)); return NULL; } #ifdef USE_OPENSSL /* connect FD to SSL */ if (srv_socket->is_ssl) { if (NULL == (con->ssl = SSL_new(srv_socket->ssl_ctx))) { log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); return NULL; } con->renegotiations = 0; SSL_set_app_data(con->ssl, con); SSL_set_accept_state(con->ssl); con->conf.is_ssl=1; if (1 != (SSL_set_fd(con->ssl, cnt))) { log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); return NULL; } } #endif return con; } } int connection_state_machine(server *srv, connection *con) { int done = 0, r; #ifdef USE_OPENSSL server_socket *srv_sock = con->srv_socket; #endif if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at start", con->fd, connection_get_state(con->state)); } while (done == 0) { size_t ostate = con->state; switch (con->state) { case CON_STATE_REQUEST_START: /* transient */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; con->request_count++; con->loops_per_request = 0; connection_set_state(srv, con, CON_STATE_READ); /* patch con->conf.is_ssl if the connection is a ssl-socket already */ #ifdef USE_OPENSSL con->conf.is_ssl = srv_sock->is_ssl; #endif break; case CON_STATE_REQUEST_END: /* transient */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } buffer_reset(con->uri.authority); buffer_reset(con->uri.path); buffer_reset(con->uri.query); buffer_reset(con->request.orig_uri); if (http_request_parse(srv, con)) { /* we have to read some data from the POST request */ connection_set_state(srv, con, CON_STATE_READ_POST); break; } connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case CON_STATE_HANDLE_REQUEST: /* * the request is parsed * * decided what to do with the request * - * * */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } switch (r = http_response_prepare(srv, con)) { case HANDLER_FINISHED: if (con->mode == DIRECT) { if (con->http_status == 404 || con->http_status == 403) { /* 404 error-handler */ if (con->in_error_handler == 0 && (!buffer_is_empty(con->conf.error_handler) || !buffer_is_empty(con->error_handler))) { /* call error-handler */ con->error_handler_saved_status = con->http_status; con->http_status = 0; if (buffer_is_empty(con->error_handler)) { buffer_copy_string_buffer(con->request.uri, con->conf.error_handler); } else { buffer_copy_string_buffer(con->request.uri, con->error_handler); } buffer_reset(con->physical.path); con->in_error_handler = 1; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); done = -1; break; } else if (con->in_error_handler) { /* error-handler is a 404 */ con->http_status = con->error_handler_saved_status; } } else if (con->in_error_handler) { /* error-handler is back and has generated content */ /* if Status: was set, take it otherwise use 200 */ } } if (con->http_status == 0) con->http_status = 200; /* we have something to send, go on */ connection_set_state(srv, con, CON_STATE_RESPONSE_START); break; case HANDLER_WAIT_FOR_FD: srv->want_fds++; fdwaitqueue_append(srv, con); connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case HANDLER_COMEBACK: done = -1; case HANDLER_WAIT_FOR_EVENT: /* come back here */ connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; case HANDLER_ERROR: /* something went wrong */ connection_set_state(srv, con, CON_STATE_ERROR); break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown ret-value: ", con->fd, r); break; } break; case CON_STATE_RESPONSE_START: /* * the decision is done * - create the HTTP-Response-Header * */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } if (-1 == connection_handle_write_prepare(srv, con)) { connection_set_state(srv, con, CON_STATE_ERROR); break; } connection_set_state(srv, con, CON_STATE_WRITE); break; case CON_STATE_RESPONSE_END: /* transient */ /* log the request */ if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } plugins_call_handle_request_done(srv, con); srv->con_written++; if (con->keep_alive) { connection_set_state(srv, con, CON_STATE_REQUEST_START); #if 0 con->request_start = srv->cur_ts; con->read_idle_ts = srv->cur_ts; #endif } else { switch(r = plugins_call_handle_connection_close(srv, con)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r); break; } #ifdef USE_OPENSSL if (srv_sock->is_ssl) { switch (SSL_shutdown(con->ssl)) { case 1: /* done */ break; case 0: /* wait for fd-event * * FIXME: wait for fdevent and call SSL_shutdown again * */ break; default: log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:", ERR_error_string(ERR_get_error(), NULL)); } } #endif if ((0 == shutdown(con->fd, SHUT_WR))) { con->close_timeout_ts = srv->cur_ts; connection_set_state(srv, con, CON_STATE_CLOSE); } else { connection_close(srv, con); } srv->con_closed++; } connection_reset(srv, con); break; case CON_STATE_CONNECT: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } chunkqueue_reset(con->read_queue); con->request_count = 0; break; case CON_STATE_CLOSE: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } /* we have to do the linger_on_close stuff regardless * of con->keep_alive; even non-keepalive sockets may * still have unread data, and closing before reading * it will make the client not see all our output. */ { int len; char buf[1024]; #ifdef HAVE_LIBMTCP len = mtcp_read(srv->mctx, con->fd, buf, sizeof(buf)); #else len = read(con->fd, buf, sizeof(buf)); #endif if (len == 0 || (len < 0 && errno != EAGAIN && errno != EINTR) ) { con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1); } } if (srv->cur_ts - con->close_timeout_ts > HTTP_LINGER_TIMEOUT) { connection_close(srv, con); if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sd", "connection closed for fd", con->fd); } } break; case CON_STATE_READ_POST: case CON_STATE_READ: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } connection_handle_read_state(srv, con); break; case CON_STATE_WRITE: if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state for fd", con->fd, connection_get_state(con->state)); } /* only try to write if we have something in the queue */ if (!chunkqueue_is_empty(con->write_queue)) { #if 0 log_error_write(srv, __FILE__, __LINE__, "dsd", con->fd, "packets to write:", con->write_queue->used); #endif } if (!chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, "handle write failed."); connection_set_state(srv, con, CON_STATE_ERROR); } } break; case CON_STATE_ERROR: /* transient */ /* even if the connection was drop we still have to write it to the access log */ if (con->http_status) { plugins_call_handle_request_done(srv, con); } #ifdef USE_OPENSSL if (srv_sock->is_ssl) { int ret, ssl_r; unsigned long err; ERR_clear_error(); switch ((ret = SSL_shutdown(con->ssl))) { case 1: /* ok */ break; case 0: ERR_clear_error(); if (-1 != (ret = SSL_shutdown(con->ssl))) break; /* fall through */ default: switch ((ssl_r = SSL_get_error(con->ssl, ret))) { case SSL_ERROR_WANT_WRITE: case SSL_ERROR_WANT_READ: break; case SSL_ERROR_SYSCALL: /* perhaps we have error waiting in our error-queue */ if (0 != (err = ERR_get_error())) { do { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } while((err = ERR_get_error())); } else if (errno != 0) { /* ssl bug (see lighttpd ticket #2213): sometimes errno == 0 */ switch(errno) { case EPIPE: case ECONNRESET: break; default: log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL (error):", ssl_r, ret, errno, strerror(errno)); break; } } break; default: while((err = ERR_get_error())) { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, ret, ERR_error_string(err, NULL)); } break; } } } ERR_clear_error(); #endif switch(con->mode) { case DIRECT: #if 0 log_error_write(srv, __FILE__, __LINE__, "sd", "emergency exit: direct", con->fd); #endif break; default: switch(r = plugins_call_handle_connection_close(srv, con)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r); break; } break; } connection_reset(srv, con); /* close the connection */ if ((0 == shutdown(con->fd, SHUT_WR))) { con->close_timeout_ts = srv->cur_ts; connection_set_state(srv, con, CON_STATE_CLOSE); if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sd", "shutdown for fd", con->fd); } } else { connection_close(srv, con); } con->keep_alive = 0; srv->con_closed++; break; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown state:", con->fd, con->state); break; } if (done == -1) { done = 0; } else if (ostate == con->state) { done = 1; } } if (srv->srvconf.log_state_handling) { log_error_write(srv, __FILE__, __LINE__, "sds", "state at exit:", con->fd, connection_get_state(con->state)); } switch(con->state) { case CON_STATE_READ_POST: case CON_STATE_READ: case CON_STATE_CLOSE: fdevent_event_set(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_IN); break; case CON_STATE_WRITE: /* request write-fdevent only if we really need it * - if we have data to write * - if the socket is not writable yet */ if (!chunkqueue_is_empty(con->write_queue) && (con->is_writable == 0) && (con->traffic_limit_reached == 0)) { fdevent_event_set(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_OUT); } else { fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); } break; default: fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); break; } return 0; }