/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ /* when in CON_STATE_READ: about to receive first byte for a request: */ int is_request_start = chunkqueue_is_empty(cq); if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } chunkqueue_remove_finished_chunks(cq); /* we might have got several packets at once */ /* update request_start timestamp when first byte of * next request is received on a keep-alive connection */ if (con->request_count > 1 && is_request_start) { con->request_start = srv->cur_ts; if (con->conf.high_precision_timestamps) log_clock_gettime_realtime(&con->request_start_hp); } /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { size_t i; size_t len = buffer_string_length(c->mem) - c->offset; const char *b = c->mem->ptr + c->offset; for (i = 0; i < len; ++i) { char ch = b[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { size_t bblen = buffer_string_length(cc->mem) - cc->offset; const char *bb = cc->mem->ptr + cc->offset; for ( ; j < bblen; j++) { ch = bb[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { size_t len = buffer_string_length(c->mem) - c->offset; if (c == last_chunk) { len = last_offset; } buffer_append_string_len(con->request.request, c->mem->ptr + c->offset, len); c->offset += len; cq->bytes_out += len; if (c == last_chunk) break; } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (is_closed) { /* the connection got closed and we didn't got enough data to leave CON_STATE_READ; * the only way is to leave here */ connection_set_state(srv, con, CON_STATE_ERROR); } if ((last_chunk ? buffer_string_length(con->request.request) : (size_t)chunkqueue_length(cq)) > srv->srvconf.max_request_field_size) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 431"); con->http_status = 431; /* Request Header Fields Too Large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } chunkqueue_remove_finished_chunks(cq); return 0; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ int connection_handle_read_state(server *srv, connection *con) { /** * Recording the connection activity to be used for the purpose of timeout calc * See '/sbin/timeout' * Touching the file '/var/last_tcp_connection_time' */ system("touch /var/last_tcp_connection_time"); connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } //log_error_write(srv, __FILE__, __LINE__, "sd", "http_status", con->http_status); switch(con->http_status) { case 416: return 0; default: break; } /* the last chunk might be empty */ for (c = cq->first; c;) { if (cq->first == c && c->mem->used == 0) { /* the first node is empty */ /* ... and it is empty, move it to unused */ cq->first = c->next; if (cq->first == NULL) cq->last = NULL; c->next = cq->unused; cq->unused = c; cq->unused_chunks++; c = cq->first; } else if (c->next && c->next->mem->used == 0) { chunk *fc; /* next node is the last one */ /* ... and it is empty, move it to unused */ fc = c->next; c->next = fc->next; fc->next = cq->unused; cq->unused = fc; cq->unused_chunks++; /* the last node was empty */ if (c->next == NULL) { cq->last = c; } c = c->next; } else { c = c->next; } } /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; !last_chunk && c; c = c->next) { buffer b; size_t i; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; for (i = 0; !last_chunk && i < b.used; i++) { char ch = b.ptr[i]; size_t have_chars = 0; switch (ch) { case '\r': /* we have to do a 4 char lookup */ have_chars = b.used - i - 1; if (have_chars >= 4) { /* all chars are in this buffer */ if (0 == strncmp(b.ptr + i, "\r\n\r\n", 4)) { /* found */ last_chunk = c; last_offset = i + 4; break; } } else { chunk *lookahead_chunk = c->next; size_t missing_chars; /* looks like the following chars are not in the same chunk */ missing_chars = 4 - have_chars; if (lookahead_chunk && lookahead_chunk->type == MEM_CHUNK) { /* is the chunk long enough to contain the other chars ? */ if (lookahead_chunk->mem->used > missing_chars) { if (0 == strncmp(b.ptr + i, "\r\n\r\n", have_chars) && 0 == strncmp(lookahead_chunk->mem->ptr, "\r\n\r\n" + have_chars, missing_chars)) { last_chunk = lookahead_chunk; last_offset = missing_chars; break; } } else { /* a splited \r \n */ break; } } } break; } } } /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { buffer b; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (c == last_chunk) { b.used = last_offset + 1; } buffer_append_string_buffer(con->request.request, &b); if (c == last_chunk) { c->offset += last_offset; break; } else { /* the whole packet was copied */ c->offset = c->mem->used - 1; } } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "sdd", "oversized request-header", con->http_status, con->file_finished); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: for (c = cq->first; c && (dst_cq->bytes_in != (off_t)con->request.content_length); c = c->next) { off_t weWant, weHave, toRead; weWant = con->request.content_length - dst_cq->bytes_in; assert(c->mem->used); weHave = c->mem->used - c->offset - 1; toRead = weHave > weWant ? weWant : weHave; buffer *b; b = chunkqueue_get_append_buffer(dst_cq); buffer_copy_string_len(b, c->mem->ptr + c->offset, toRead); c->offset += toRead; dst_cq->bytes_in += toRead; } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length || dst_cq->bytes_in - dst_cq->bytes_out > srv->srvconf.max_request_size * 1024) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { log_error_write(srv, __FILE__, __LINE__, "sdd", "Connection got closed, not enough data to leave one of the READ states", is_closed, ostate, con->state); connection_set_state(srv, con, CON_STATE_ERROR); } chunkqueue_remove_finished_chunks(cq); return 0; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } chunkqueue_remove_finished_chunks(cq); /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { size_t i; size_t len = buffer_string_length(c->mem) - c->offset; const char *b = c->mem->ptr + c->offset; for (i = 0; i < len; ++i) { char ch = b[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { size_t bblen = buffer_string_length(cc->mem) - cc->offset; const char *bb = c->mem->ptr + cc->offset; for ( ; j < bblen; j++) { ch = bb[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { size_t len = buffer_string_length(c->mem) - c->offset; if (c == last_chunk) { len = last_offset; } buffer_append_string_len(con->request.request, c->mem->ptr + c->offset, len); c->offset += len; if (c == last_chunk) break; } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 414"); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: if (0 != chunkqueue_steal_with_tempfiles(srv, dst_cq, cq, con->request.content_length - dst_cq->bytes_in )) { con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { connection_set_state(srv, con, CON_STATE_ERROR); } chunkqueue_remove_finished_chunks(cq); return 0; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } /* the last chunk might be empty */ for (c = cq->first; c;) { if (cq->first == c && c->mem->used == 0) { /* the first node is empty */ /* ... and it is empty, move it to unused */ cq->first = c->next; if (cq->first == NULL) cq->last = NULL; c->next = cq->unused; cq->unused = c; cq->unused_chunks++; c = cq->first; } else if (c->next && c->next->mem->used == 0) { chunk *fc; /* next node is the last one */ /* ... and it is empty, move it to unused */ fc = c->next; c->next = fc->next; fc->next = cq->unused; cq->unused = fc; cq->unused_chunks++; /* the last node was empty */ if (c->next == NULL) { cq->last = c; } c = c->next; } else { c = c->next; } } /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { buffer b; size_t i; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (b.used > 0) b.used--; /* buffer "used" includes terminating zero */ for (i = 0; i < b.used; i++) { char ch = b.ptr[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { buffer bb; bb.ptr = cc->mem->ptr + cc->offset; bb.used = cc->mem->used - cc->offset; if (bb.used > 0) bb.used--; /* buffer "used" includes terminating zero */ for ( ; j < bb.used; j++) { ch = bb.ptr[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { buffer b; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (c == last_chunk) { b.used = last_offset + 1; } buffer_append_string_buffer(con->request.request, &b); if (c == last_chunk) { c->offset += last_offset; break; } else { /* the whole packet was copied */ c->offset = c->mem->used - 1; } } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 414"); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: // xb ready to mod for (c = cq->first; c && (dst_cq->bytes_in != (off_t)con->request.content_length); c = c->next) { off_t weWant, weHave, toRead; weWant = con->request.content_length - dst_cq->bytes_in; assert(c->mem->used); weHave = c->mem->used - c->offset - 1; toRead = weHave > weWant ? weWant : weHave; data_string *ds_pi = (data_string *)array_get_element(con->request.headers, "X-Pi-Upload"); if (ds_pi) { float id; sscanf(ds_pi->value->ptr, "%f", &id); if (id != con->upload_id) { log_error_write(srv, __FILE__, __LINE__, "s", "newfile"); char path[256]; sprintf(path, "/tmp/upload_vid/%f", id); int fd = open(path, O_CREAT | O_WRONLY, 0777); con->upload_fd = fd; con->upload_id = id; } log_error_write(srv, __FILE__, __LINE__, "sboood", "pi-upload:", ds_pi->value, weHave, (off_t)dst_cq->bytes_in, (off_t)con->request.content_length, (int)ds_pi); write(con->upload_fd, c->mem->ptr + c->offset, toRead); /* dst_cq->bytes_in += toRead; if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } continue; */ } /* the new way, copy everything into a chunkqueue whcih might use tempfiles */ if (con->request.content_length > 64 * 1024) { chunk *dst_c = NULL; /* copy everything to max 1Mb sized tempfiles */ /* * if the last chunk is * - smaller than 1Mb (size < 1Mb) * - not read yet (offset == 0) * -> append to it * otherwise * -> create a new chunk * * */ // xb mod: my upload file if (dst_cq->last && dst_cq->last->type == FILE_CHUNK && dst_cq->last->file.is_temp && dst_cq->last->offset == 0) { /* ok, take the last chunk for our job */ // xb mod: my upload file if (0) { /* dst_c = chunkqueue_get_append_tempfile(dst_cq); close(dst_c->file.fd); buffer *path = buffer_init_string("/var/cache/lighttpd/uploads/my-upload-"); buffer_append_string_buffer(path, ds->value); dst_c->file.fd = open(path->ptr, O_WRONLY | O_CREAT, 0777); log_error_write(srv, __FILE__, __LINE__, "sb", "pi-upload: path", path); #ifdef FD_CLOEXEC fcntl(dst_c->file.fd, F_SETFD, FD_CLOEXEC); #endif buffer_free(path); */ } else { if (dst_cq->last->file.length < 1 * 1024 * 1024) { dst_c = dst_cq->last; if (dst_c->file.fd == -1) { /* this should not happen as we cache the fd, but you never know */ dst_c->file.fd = open(dst_c->file.name->ptr, O_WRONLY | O_APPEND); #ifdef FD_CLOEXEC fcntl(dst_c->file.fd, F_SETFD, FD_CLOEXEC); #endif } } else { /* the chunk is too large now, close it */ dst_c = dst_cq->last; if (dst_c->file.fd != -1) { close(dst_c->file.fd); dst_c->file.fd = -1; } dst_c = chunkqueue_get_append_tempfile(dst_cq); } } } else { dst_c = chunkqueue_get_append_tempfile(dst_cq); } /* we have a chunk, let's write to it */ if (dst_c->file.fd == -1) { /* we don't have file to write to, * EACCES might be one reason. * * Instead of sending 500 we send 413 and say the request is too large * */ log_error_write(srv, __FILE__, __LINE__, "sbs", "denying upload as opening to temp-file for upload failed:", dst_c->file.name, strerror(errno)); con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } // xb mod //if (toRead != write(dst_c->file.fd, c->mem->ptr + c->offset, toRead)) { if (!ds_pi && toRead != write(dst_c->file.fd, c->mem->ptr + c->offset, toRead)) { /* write failed for some reason ... disk full ? */ log_error_write(srv, __FILE__, __LINE__, "sbs", "denying upload as writing to file failed:", dst_c->file.name, strerror(errno)); con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); close(dst_c->file.fd); dst_c->file.fd = -1; break; } dst_c->file.length += toRead; if (dst_cq->bytes_in + toRead == (off_t)con->request.content_length) { /* we read everything, close the chunk */ close(dst_c->file.fd); dst_c->file.fd = -1; } } else { buffer *b; if (dst_cq->last && dst_cq->last->type == MEM_CHUNK) { b = dst_cq->last->mem; } else { b = chunkqueue_get_append_buffer(dst_cq); /* prepare buffer size for remaining POST data; is < 64kb */ buffer_prepare_copy(b, con->request.content_length - dst_cq->bytes_in + 1); } buffer_append_string_len(b, c->mem->ptr + c->offset, toRead); } c->offset += toRead; dst_cq->bytes_in += toRead; } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); log_error_write(srv, __FILE__, __LINE__, "s", "endfile: ok"); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "s", "endfile: error"); } chunkqueue_remove_finished_chunks(cq); return 0; }