void chunkqueue_reset(chunkqueue *cq) { chunk *c; /* move everything to the unused queue */ /* mark all read written */ for (c = cq->first; c; c = c->next) { switch(c->type) { case MEM_CHUNK: c->offset = c->mem->used - 1; break; case FILE_CHUNK: c->offset = c->file.length; break; case SMB_CHUNK: c->offset = c->file.length; break; default: break; } } chunkqueue_remove_finished_chunks(cq); cq->bytes_in = 0; cq->bytes_out = 0; }
int network_writev_mem_chunks(server *srv, connection *con, int fd, chunkqueue *cq, off_t *p_max_bytes) { struct iovec chunks[MAX_CHUNKS]; size_t num_chunks; off_t max_bytes = *p_max_bytes; off_t toSend; ssize_t r; UNUSED(con); force_assert(NULL != cq->first); force_assert(MEM_CHUNK == cq->first->type); { chunk const *c; toSend = 0; num_chunks = 0; for (c = cq->first; NULL != c && MEM_CHUNK == c->type && num_chunks < MAX_CHUNKS && toSend < max_bytes; c = c->next) { size_t c_len; force_assert(c->offset >= 0 && c->offset <= (off_t)buffer_string_length(c->mem)); c_len = buffer_string_length(c->mem) - c->offset; if (c_len > 0) { toSend += c_len; chunks[num_chunks].iov_base = c->mem->ptr + c->offset; chunks[num_chunks].iov_len = c_len; ++num_chunks; } } } if (0 == num_chunks) { chunkqueue_remove_finished_chunks(cq); return 0; } r = writev(fd, chunks, num_chunks); if (r < 0) switch (errno) { case EAGAIN: case EINTR: break; case EPIPE: case ECONNRESET: return -2; default: log_error_write(srv, __FILE__, __LINE__, "ssd", "writev failed:", strerror(errno), fd); return -1; } if (r >= 0) { *p_max_bytes -= r; chunkqueue_mark_written(cq, r); } return (r > 0 && r == toSend) ? 0 : -3; }
int network_write_mem_chunk(server *srv, connection *con, int fd, chunkqueue *cq, off_t *p_max_bytes) { chunk* const c = cq->first; off_t c_len; ssize_t r; UNUSED(con); force_assert(NULL != c); force_assert(MEM_CHUNK == c->type); force_assert(c->offset >= 0 && c->offset <= (off_t)buffer_string_length(c->mem)); c_len = buffer_string_length(c->mem) - c->offset; if (c_len > *p_max_bytes) c_len = *p_max_bytes; if (0 == c_len) { chunkqueue_remove_finished_chunks(cq); return 0; } #if defined(__WIN32) if ((r = send(fd, c->mem->ptr + c->offset, c_len, 0)) < 0) { int lastError = WSAGetLastError(); switch (lastError) { case WSAEINTR: case WSAEWOULDBLOCK: break; case WSAECONNRESET: case WSAETIMEDOUT: case WSAECONNABORTED: return -2; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "send failed: ", lastError, fd); return -1; } } #else /* __WIN32 */ if ((r = write(fd, c->mem->ptr + c->offset, c_len)) < 0) { switch (errno) { case EAGAIN: case EINTR: break; case EPIPE: case ECONNRESET: return -2; default: log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed:", strerror(errno), fd); return -1; } } #endif /* __WIN32 */ if (r >= 0) { *p_max_bytes -= r; chunkqueue_mark_written(cq, r); } return (r > 0 && r == c_len) ? 0 : -3; }
/** * Copy decoded response content to client connection. */ static int cgi_copy_response(server *srv, connection *con, cgi_session *sess) { chunk *c; int we_have = 0; UNUSED(srv); chunkqueue_remove_finished_chunks(sess->rb); /* copy the content to the next cq */ for (c = sess->rb->first; c; c = c->next) { if (c->mem->used == 0) continue; we_have = chunkqueue_steal_chunk(con->send, c); sess->rb->bytes_out += we_have; con->send->bytes_in += we_have; } chunkqueue_remove_finished_chunks(sess->rb); if(sess->rb->is_closed) { con->send->is_closed = 1; } return 0; }
/** * reset all chunks of the queue */ void chunkqueue_reset(chunkqueue *cq) { chunk *c; /* mark all read done */ for (c = cq->first; c; c = c->next) { chunk_set_done(c); } chunkqueue_remove_finished_chunks(cq); cq->bytes_in = 0; cq->bytes_out = 0; cq->is_closed = 0; }
int network_write_file_chunk_sendfile(server *srv, connection *con, int fd, chunkqueue *cq, off_t *p_max_bytes) { chunk* const c = cq->first; off_t offset, written = 0; off_t toSend; int r; force_assert(NULL != c); force_assert(FILE_CHUNK == c->type); force_assert(c->offset >= 0 && c->offset <= c->file.length); offset = c->file.start + c->offset; toSend = c->file.length - c->offset; if (toSend > *p_max_bytes) toSend = *p_max_bytes; if (0 == toSend) { chunkqueue_remove_finished_chunks(cq); return 0; } if (0 != network_open_file_chunk(srv, con, cq)) return -1; /* Darwin sendfile() */ written = toSend; if (-1 == (r = sendfile(c->file.fd, fd, offset, &written, NULL, 0))) { switch(errno) { case EAGAIN: case EINTR: /* for EAGAIN/EINTR written still contains the sent bytes */ break; /* try again later */ case EPIPE: case ENOTCONN: return -2; default: log_error_write(srv, __FILE__, __LINE__, "ssd", "sendfile: ", strerror(errno), errno); return -1; } } if (written >= 0) { chunkqueue_mark_written(cq, written); *p_max_bytes -= written; } return (r >= 0 && written == toSend) ? 0 : -3; }
static void cgi_copy_err(chunkqueue *cq) { buffer *line = buffer_init(); chunk *c; for (c = cq->first; c; c = c->next) { off_t we_have; char *str, *nl; if (c->type != MEM_CHUNK) { ERROR("%s", "wrong chunk type"); chunk_set_done(c); continue; } we_have = c->mem->used - 1 - c->offset; str = c->mem->ptr + c->offset; if (we_have <= 0) continue; for ( ; NULL != (nl = strchr(str, '\n')); str = nl+1) { *nl = '\0'; if (!buffer_is_empty(line)) { buffer_append_string(line, str); cgi_log_err(SAFE_BUF_STR(line)); buffer_reset(line); } else { cgi_log_err(str); } } if (*str) { buffer_append_string(line, str); } chunk_set_done(c); } if (!buffer_is_empty(line)) { cgi_log_err(SAFE_BUF_STR(line)); } chunkqueue_remove_finished_chunks(cq); }
int consume_bytes(server *srv, int op, int fd, void *dst, chunkqueue *cq, size_t num_bytes) { chunk *c; size_t written, bytes_this_chunk, bytes_consumed = 0; int gc = 0; if (!num_bytes) return 0; for (c = cq->first; c != NULL; c = c->next) { if (c->type != MEM_CHUNK) { LOG("sd", "ERROR: chunk not MEM_CHUNK:", c->type); break; } if (c->mem->used - c->offset <= 1) { LOG("s", "WARNING: empty chunk"); gc = 1; continue; } bytes_this_chunk = c->mem->used - c->offset - 1; if (bytes_this_chunk > num_bytes) bytes_this_chunk = num_bytes; if (op == OP_COPY) { memcpy((char *)dst + bytes_consumed, c->mem->ptr + c->offset, bytes_this_chunk); } else if (op == OP_WRITE) { errno = 0; written = write(fd, c->mem->ptr + c->offset, bytes_this_chunk); //cq->bytes_out += written; if (written != bytes_this_chunk) { LOG("sdsdsd", "ERROR:", errno, "wrote", written, "instead of", bytes_this_chunk); break; } } c->offset += bytes_this_chunk; if (c->mem->used - c->offset <= 1) { gc = 1; } bytes_consumed += bytes_this_chunk; num_bytes -= bytes_this_chunk; if (num_bytes == 0) break; } /*if (num_bytes) DEBUGLOG("sdsd", "Still need", num_bytes, "cq_len:", chunkqueue_avail(cq)); */ if (gc) chunkqueue_remove_finished_chunks(cq); //TODO: Rename bytes_out to bytes_consumed cq->bytes_out += bytes_consumed; return bytes_consumed; }
int network_write_chunkqueue(server *srv, connection *con, chunkqueue *cq, off_t max_bytes) { int ret = -1; off_t written = 0; #ifdef TCP_CORK int corked = 0; #endif server_socket *srv_socket = con->srv_socket; if (con->conf.global_kbytes_per_second) { off_t limit = con->conf.global_kbytes_per_second * 1024 - *(con->conf.global_bytes_per_second_cnt_ptr); if (limit <= 0) { /* we reached the global traffic limit */ con->traffic_limit_reached = 1; joblist_append(srv, con); return 1; } else { if (max_bytes > limit) max_bytes = limit; } } if (con->conf.kbytes_per_second) { off_t limit = con->conf.kbytes_per_second * 1024 - con->bytes_written_cur_second; if (limit <= 0) { /* we reached the traffic limit */ con->traffic_limit_reached = 1; joblist_append(srv, con); return 1; } else { if (max_bytes > limit) max_bytes = limit; } } written = cq->bytes_out; #ifdef TCP_CORK /* Linux: put a cork into the socket as we want to combine the write() calls * but only if we really have multiple chunks */ if (cq->first && cq->first->next) { corked = 1; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif if (srv_socket->is_ssl) { #ifdef USE_OPENSSL ret = srv->network_ssl_backend_write(srv, con, con->ssl, cq, max_bytes); #endif } else { ret = srv->network_backend_write(srv, con, con->fd, cq, max_bytes); } if (ret >= 0) { chunkqueue_remove_finished_chunks(cq); ret = chunkqueue_is_empty(cq) ? 0 : 1; } #ifdef TCP_CORK if (corked) { corked = 0; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif written = cq->bytes_out - written; con->bytes_written += written; con->bytes_written_cur_second += written; *(con->conf.global_bytes_per_second_cnt_ptr) += written; return ret; }
static handler_t cgi_handle_fdevent(void *s, void *ctx, int revents) { server *srv = (server *)s; cgi_session *sess = ctx; connection *con = sess->remote_con; if (revents & FDEVENT_IN) { switch (sess->state) { case CGI_STATE_READ_RESPONSE_HEADER: /* parse the header and set file-started, the demuxer will care about it */ joblist_append(srv, con); break; case CGI_STATE_READ_RESPONSE_CONTENT: /* just forward the content to the out-going queue */ chunkqueue_remove_finished_chunks(sess->rb); switch (srv->network_backend_read(srv, con, sess->sock, sess->rb)) { case NETWORK_STATUS_CONNECTION_CLOSE: fdevent_event_del(srv->ev, sess->sock); /* connection closed. close the read chunkqueue. */ sess->rb->is_closed = 1; case NETWORK_STATUS_SUCCESS: /* read even more, do we have all the content */ /* how much do we want to read ? */ /* copy the resopnse content */ cgi_copy_response(srv, con, sess); break; default: ERROR("%s", "oops, we failed to read"); break; } joblist_append(srv, con); break; default: TRACE("unexpected state for a FDEVENT_IN: %d", sess->state); break; } } if (revents & FDEVENT_OUT) { /* nothing to do */ } /* perhaps this issue is already handled */ if (revents & FDEVENT_HUP) { con->send->is_closed = 1; fdevent_event_del(srv->ev, sess->sock); joblist_append(srv, con); } else if (revents & FDEVENT_ERR) { con->send->is_closed = 1; /* kill all connections to the cgi process */ fdevent_event_del(srv->ev, sess->sock); joblist_append(srv, con); } return HANDLER_FINISHED; }
static int cgi_demux_response(server *srv, connection *con, plugin_data *p) { cgi_session *sess = con->plugin_ctx[p->id]; switch(srv->network_backend_read(srv, con, sess->sock, sess->rb)) { case NETWORK_STATUS_CONNECTION_CLOSE: fdevent_event_del(srv->ev, sess->sock); /* connection closed. close the read chunkqueue. */ sess->rb->is_closed = 1; case NETWORK_STATUS_SUCCESS: /* we got content */ break; case NETWORK_STATUS_WAIT_FOR_EVENT: return 0; default: /* oops */ ERROR("%s", "oops, read-pipe-read failed and I don't know why"); return -1; } /* looks like we got some content * * split off the header from the incoming stream */ if (con->file_started == 0) { size_t i; int have_content_length = 0; http_response_reset(p->resp); /* the response header is not fully received yet, * * extract the http-response header from the rb-cq */ switch (http_response_parse_cq(sess->rb, p->resp)) { case PARSE_UNSET: case PARSE_ERROR: /* parsing failed */ TRACE("%s", "response parser failed"); con->http_status = 502; /* Bad Gateway */ return -1; case PARSE_NEED_MORE: if (sess->rb->is_closed) { /* backend died before sending a header */ con->http_status = 502; /* Bad Gateway */ return -1; } return 0; case PARSE_SUCCESS: con->http_status = p->resp->status; chunkqueue_remove_finished_chunks(sess->rb); /* copy the http-headers */ for (i = 0; i < p->resp->headers->used; i++) { const char *ign[] = { "Status", "Connection", NULL }; size_t j; data_string *ds; data_string *header = (data_string *)p->resp->headers->data[i]; /* some headers are ignored by default */ for (j = 0; ign[j]; j++) { if (0 == strcasecmp(ign[j], header->key->ptr)) break; } if (ign[j]) continue; if (0 == buffer_caseless_compare(CONST_BUF_LEN(header->key), CONST_STR_LEN("Location"))) { /* CGI/1.1 rev 03 - 7.2.1.2 */ if (con->http_status == 0) con->http_status = 302; } else if (0 == buffer_caseless_compare(CONST_BUF_LEN(header->key), CONST_STR_LEN("Content-Length"))) { have_content_length = 1; } if (NULL == (ds = (data_string *)array_get_unused_element(con->response.headers, TYPE_STRING))) { ds = data_response_init(); } buffer_copy_string_buffer(ds->key, header->key); buffer_copy_string_buffer(ds->value, header->value); array_insert_unique(con->response.headers, (data_unset *)ds); } con->file_started = 1; /* if Status: ... is not set, 200 is our default status-code */ if (con->http_status == 0) con->http_status = 200; sess->state = CGI_STATE_READ_RESPONSE_CONTENT; if (con->request.http_version == HTTP_VERSION_1_1 && !have_content_length) { con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; } break; } } /* FIXME: pass the response-header to the other plugins to * setup the filter-queue * * - use next-queue instead of con->write_queue */ /* copy the resopnse content */ cgi_copy_response(srv, con, sess); joblist_append(srv, con); return 0; }
static handler_t fdt_check_service(server *srv, connection *con, void *p_d) { plugin_data *p = p_d; size_t s_len; size_t k; buffer *fn; fdt_server *m_server = NULL; if (con->mode != DIRECT) return HANDLER_GO_ON; /* Possibly, we processed already this request */ if (con->file_started == 1) return HANDLER_GO_ON; fn = con->request.uri; if (buffer_is_empty(fn)) return HANDLER_GO_ON; s_len = fn->used - 1; mod_fd_transfer_patch_connection(srv, con, p); /* check if service matches */ for (k = 0; k < p->conf.services->used; k++) { size_t ct_len; /* length of the config entry */ fdt_server *s_server = p->conf.services->servers[k]; if (s_server->service_name->used == 0) continue; ct_len = s_server->service_name->used - 1; if ((ct_len <= s_len) && (0 == strncmp(fn->ptr, s_server->service_name->ptr, ct_len))) { /* check service in the form "/google/" */ m_server = s_server; break; } } /* service doesn't match */ if (NULL == m_server) { return HANDLER_GO_ON; } con->mode = p->id; if(m_server->state == PROC_STATE_INIT || m_server->state == PROC_STATE_DIED) { if(-1 == fdt_spawn_connection(srv, m_server)) { log_error_write(srv, __FILE__, __LINE__, "s", "Could not spawn connection"); close(m_server->fdt_sfd); con->file_finished = 1; con->http_status = 500; return HANDLER_ERROR; } } chunk *c; chunkqueue *cq = con->read_queue; buffer *data = buffer_init(); buffer_copy_string_buffer(data, con->request.request); for (c = cq->first; c; c = c->next) { buffer b; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; buffer_append_string_buffer(data, &b); /* the whole packet was copied */ c->offset = c->mem->used - 1; } // log_error_write(srv, __FILE__, __LINE__, "sb", "Request Data - ", data); mod_fd_transfer_send_fd(m_server->fdt_sfd, con->fd, data->ptr); con->file_finished = 1; buffer_free(data); chunkqueue_remove_finished_chunks(cq); return HANDLER_FORWARD; }
static int nlua_eval(FCGX_Request *request) { return make_error(request, "Unimplemented", ENOSYS); #if 0 char *program; int prog_ptr = 0; lua_State *lua; int streamer[2]; int in, out; int content_length; char *tmp = FCGX_GetParam("CONTENT_LENGTH", request->envp); close(0); content_length = strtoul(content_length, 0, NULL); /* If there's no program, there's nothing to do */ if (content_length <= 0) { FCGX_FPrintF(request, "Content-Type: text/plain\r\n\r\n"); return 0; } program = malloc(con->request.content_length+1); if (!program) return make_error(request, "Program size to large", ENOMEM); program[content_length] = '\0'; /* there is content to eval */ for (c = cq->first; c; c = cq->first) { int r = 0; /* copy all chunks */ switch(c->type) { case FILE_CHUNK: if (c->file.mmap.start == MAP_FAILED) { if (-1 == c->file.fd && /* open the file if not already open */ -1 == (c->file.fd = open(c->file.name->ptr, O_RDONLY))) { //log_error_write(srv, __FILE__, __LINE__, "ss", "open failed: ", strerror(errno)); free(program); return -1; } c->file.mmap.length = c->file.length; if (MAP_FAILED == (c->file.mmap.start = mmap(0, c->file.mmap.length, PROT_READ, MAP_SHARED, c->file.fd, 0))) { //log_error_write(srv, __FILE__, __LINE__, "ssbd", "mmap failed: ", strerror(errno), c->file.name, c->file.fd); free(program); return -1; } close(c->file.fd); c->file.fd = -1; /* chunk_reset() or chunk_free() will cleanup for us */ } memcpy(program+prog_ptr, c->file.mmap.start+c->offset, c->file.length - c->offset); r = c->file.length - c->offset; break; case MEM_CHUNK: memcpy(program+prog_ptr, c->mem->ptr + c->offset, c->mem->used - c->offset - 1); r = c->mem->used - c->offset - 1; break; case UNUSED_CHUNK: break; } c->offset += r; cq->bytes_out += r; prog_ptr += r; chunkqueue_remove_finished_chunks(cq); } lua = lua_open(); if (!lua) return make_error(con, "Unable to open lua", errno); luaL_openlibs(lua); pipe(streamer); out = dup2(streamer[1], 0); in = streamer[0]; if (streamer[1] != out) close(streamer[1]); if (luaL_dostring(lua, program)) { char errmsg[2048]; snprintf(errmsg, sizeof(errmsg)-1, "LUA program \"%s\" encountered an error: %s", program, lua_tostring(lua, 1)); make_error(con, errmsg, 1); con->http_status = 200; } else { char data[4096]; int len; bzero(data, sizeof(data)); len = read(in, data, sizeof(data)); b = chunkqueue_get_append_buffer(con->write_queue); buffer_copy_string_len(b, data, len); con->http_status = 200; } lua_close(lua); free(program); close(streamer[1]); close(streamer[0]); close(out); close(in); return HANDLER_FINISHED; #endif }
static int cgi_create_env(server *srv, connection *con, plugin_data *p, buffer *cgi_handler) { pid_t pid; #ifdef HAVE_IPV6 char b2[INET6_ADDRSTRLEN + 1]; #endif int to_cgi_fds[2]; int from_cgi_fds[2]; struct stat st; #ifndef __WIN32 if (cgi_handler->used > 1) { /* stat the exec file */ if (-1 == (stat(cgi_handler->ptr, &st))) { log_error_write(srv, __FILE__, __LINE__, "sbss", "stat for cgi-handler", cgi_handler, "failed:", strerror(errno)); return -1; } } if (pipe(to_cgi_fds)) { log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno)); return -1; } if (pipe(from_cgi_fds)) { log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno)); return -1; } /* fork, execve */ switch (pid = fork()) { case 0: { /* child */ char **args; int argc; int i = 0; char buf[32]; size_t n; char_array env; char *c; const char *s; server_socket *srv_sock = con->srv_socket; /* move stdout to from_cgi_fd[1] */ close(STDOUT_FILENO); dup2(from_cgi_fds[1], STDOUT_FILENO); close(from_cgi_fds[1]); /* not needed */ close(from_cgi_fds[0]); /* move the stdin to to_cgi_fd[0] */ close(STDIN_FILENO); dup2(to_cgi_fds[0], STDIN_FILENO); close(to_cgi_fds[0]); /* not needed */ close(to_cgi_fds[1]); /* HACK: * this is not nice, but it works * * we feed the stderr of the CGI to our errorlog, if possible */ if (srv->errorlog_mode == ERRORLOG_FILE) { close(STDERR_FILENO); dup2(srv->errorlog_fd, STDERR_FILENO); } /* create environment */ env.ptr = NULL; env.size = 0; env.used = 0; cgi_env_add(&env, CONST_STR_LEN("SERVER_SOFTWARE"), CONST_STR_LEN(PACKAGE_NAME"/"PACKAGE_VERSION)); if (!buffer_is_empty(con->server_name)) { cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), CONST_BUF_LEN(con->server_name)); } else { #ifdef HAVE_IPV6 s = inet_ntop(srv_sock->addr.plain.sa_family, srv_sock->addr.plain.sa_family == AF_INET6 ? (const void *) &(srv_sock->addr.ipv6.sin6_addr) : (const void *) &(srv_sock->addr.ipv4.sin_addr), b2, sizeof(b2)-1); #else s = inet_ntoa(srv_sock->addr.ipv4.sin_addr); #endif cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), s, strlen(s)); } cgi_env_add(&env, CONST_STR_LEN("GATEWAY_INTERFACE"), CONST_STR_LEN("CGI/1.1")); s = get_http_version_name(con->request.http_version); cgi_env_add(&env, CONST_STR_LEN("SERVER_PROTOCOL"), s, strlen(s)); ltostr(buf, #ifdef HAVE_IPV6 ntohs(srv_sock->addr.plain.sa_family == AF_INET6 ? srv_sock->addr.ipv6.sin6_port : srv_sock->addr.ipv4.sin_port) #else ntohs(srv_sock->addr.ipv4.sin_port) #endif ); cgi_env_add(&env, CONST_STR_LEN("SERVER_PORT"), buf, strlen(buf)); #ifdef HAVE_IPV6 s = inet_ntop(srv_sock->addr.plain.sa_family, srv_sock->addr.plain.sa_family == AF_INET6 ? (const void *) &(srv_sock->addr.ipv6.sin6_addr) : (const void *) &(srv_sock->addr.ipv4.sin_addr), b2, sizeof(b2)-1); #else s = inet_ntoa(srv_sock->addr.ipv4.sin_addr); #endif cgi_env_add(&env, CONST_STR_LEN("SERVER_ADDR"), s, strlen(s)); s = get_http_method_name(con->request.http_method); cgi_env_add(&env, CONST_STR_LEN("REQUEST_METHOD"), s, strlen(s)); if (!buffer_is_empty(con->request.pathinfo)) { cgi_env_add(&env, CONST_STR_LEN("PATH_INFO"), CONST_BUF_LEN(con->request.pathinfo)); } cgi_env_add(&env, CONST_STR_LEN("REDIRECT_STATUS"), CONST_STR_LEN("200")); if (!buffer_is_empty(con->uri.query)) { cgi_env_add(&env, CONST_STR_LEN("QUERY_STRING"), CONST_BUF_LEN(con->uri.query)); } if (!buffer_is_empty(con->request.orig_uri)) { cgi_env_add(&env, CONST_STR_LEN("REQUEST_URI"), CONST_BUF_LEN(con->request.orig_uri)); } #ifdef HAVE_IPV6 s = inet_ntop(con->dst_addr.plain.sa_family, con->dst_addr.plain.sa_family == AF_INET6 ? (const void *) &(con->dst_addr.ipv6.sin6_addr) : (const void *) &(con->dst_addr.ipv4.sin_addr), b2, sizeof(b2)-1); #else s = inet_ntoa(con->dst_addr.ipv4.sin_addr); #endif cgi_env_add(&env, CONST_STR_LEN("REMOTE_ADDR"), s, strlen(s)); ltostr(buf, #ifdef HAVE_IPV6 ntohs(con->dst_addr.plain.sa_family == AF_INET6 ? con->dst_addr.ipv6.sin6_port : con->dst_addr.ipv4.sin_port) #else ntohs(con->dst_addr.ipv4.sin_port) #endif ); cgi_env_add(&env, CONST_STR_LEN("REMOTE_PORT"), buf, strlen(buf)); if (!buffer_is_empty(con->authed_user)) { cgi_env_add(&env, CONST_STR_LEN("REMOTE_USER"), CONST_BUF_LEN(con->authed_user)); } /* request.content_length < SSIZE_MAX, see request.c */ ltostr(buf, con->request.content_length); cgi_env_add(&env, CONST_STR_LEN("CONTENT_LENGTH"), buf, strlen(buf)); cgi_env_add(&env, CONST_STR_LEN("SCRIPT_FILENAME"), CONST_BUF_LEN(con->physical.path)); cgi_env_add(&env, CONST_STR_LEN("SCRIPT_NAME"), CONST_BUF_LEN(con->uri.path)); cgi_env_add(&env, CONST_STR_LEN("DOCUMENT_ROOT"), CONST_BUF_LEN(con->physical.doc_root)); /* for valgrind */ if (NULL != (s = getenv("LD_PRELOAD"))) { cgi_env_add(&env, CONST_STR_LEN("LD_PRELOAD"), s, strlen(s)); } if (NULL != (s = getenv("LD_LIBRARY_PATH"))) { cgi_env_add(&env, CONST_STR_LEN("LD_LIBRARY_PATH"), s, strlen(s)); } #ifdef __CYGWIN__ /* CYGWIN needs SYSTEMROOT */ if (NULL != (s = getenv("SYSTEMROOT"))) { cgi_env_add(&env, CONST_STR_LEN("SYSTEMROOT"), s, strlen(s)); } #endif for (n = 0; n < con->request.headers->used; n++) { data_string *ds; ds = (data_string *)con->request.headers->data[n]; if (ds->value->used && ds->key->used) { size_t j; buffer_reset(p->tmp_buf); if (0 != strcasecmp(ds->key->ptr, "CONTENT-TYPE")) { buffer_copy_string(p->tmp_buf, "HTTP_"); p->tmp_buf->used--; /* strip \0 after HTTP_ */ } buffer_prepare_append(p->tmp_buf, ds->key->used + 2); for (j = 0; j < ds->key->used - 1; j++) { char cr = '_'; if (light_isalpha(ds->key->ptr[j])) { /* upper-case */ cr = ds->key->ptr[j] & ~32; } else if (light_isdigit(ds->key->ptr[j])) { /* copy */ cr = ds->key->ptr[j]; } p->tmp_buf->ptr[p->tmp_buf->used++] = cr; } p->tmp_buf->ptr[p->tmp_buf->used++] = '\0'; cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value)); } } for (n = 0; n < con->environment->used; n++) { data_string *ds; ds = (data_string *)con->environment->data[n]; if (ds->value->used && ds->key->used) { size_t j; buffer_reset(p->tmp_buf); buffer_prepare_append(p->tmp_buf, ds->key->used + 2); for (j = 0; j < ds->key->used - 1; j++) { p->tmp_buf->ptr[p->tmp_buf->used++] = isalpha((unsigned char)ds->key->ptr[j]) ? toupper((unsigned char)ds->key->ptr[j]) : '_'; } p->tmp_buf->ptr[p->tmp_buf->used++] = '\0'; cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value)); } } if (env.size == env.used) { env.size += 16; env.ptr = realloc(env.ptr, env.size * sizeof(*env.ptr)); } env.ptr[env.used] = NULL; /* set up args */ argc = 3; args = malloc(sizeof(*args) * argc); i = 0; if (cgi_handler->used > 1) { args[i++] = cgi_handler->ptr; } args[i++] = con->physical.path->ptr; args[i++] = NULL; /* search for the last / */ if (NULL != (c = strrchr(con->physical.path->ptr, '/'))) { *c = '\0'; /* change to the physical directory */ if (-1 == chdir(con->physical.path->ptr)) { log_error_write(srv, __FILE__, __LINE__, "ssb", "chdir failed:", strerror(errno), con->physical.path); } *c = '/'; } /* we don't need the client socket */ for (i = 3; i < 256; i++) { if (i != srv->errorlog_fd) close(i); } /* exec the cgi */ execve(args[0], args, env.ptr); log_error_write(srv, __FILE__, __LINE__, "sss", "CGI failed:", strerror(errno), args[0]); /* */ SEGFAULT(); break; } case -1: /* error */ log_error_write(srv, __FILE__, __LINE__, "ss", "fork failed:", strerror(errno)); break; default: { handler_ctx *hctx; /* father */ close(from_cgi_fds[1]); close(to_cgi_fds[0]); if (con->request.content_length) { chunkqueue *cq = con->request_content_queue; chunk *c; assert(chunkqueue_length(cq) == (off_t)con->request.content_length); /* there is content to send */ for (c = cq->first; c; c = cq->first) { int r = 0; /* copy all chunks */ switch(c->type) { case FILE_CHUNK: if (c->file.mmap.start == MAP_FAILED) { if (-1 == c->file.fd && /* open the file if not already open */ -1 == (c->file.fd = open(c->file.name->ptr, O_RDONLY))) { log_error_write(srv, __FILE__, __LINE__, "ss", "open failed: ", strerror(errno)); close(from_cgi_fds[0]); close(to_cgi_fds[1]); return -1; } c->file.mmap.length = c->file.length; if (MAP_FAILED == (c->file.mmap.start = mmap(0, c->file.mmap.length, PROT_READ, MAP_SHARED, c->file.fd, 0))) { log_error_write(srv, __FILE__, __LINE__, "ssbd", "mmap failed: ", strerror(errno), c->file.name, c->file.fd); close(from_cgi_fds[0]); close(to_cgi_fds[1]); return -1; } close(c->file.fd); c->file.fd = -1; /* chunk_reset() or chunk_free() will cleanup for us */ } if ((r = write(to_cgi_fds[1], c->file.mmap.start + c->offset, c->file.length - c->offset)) < 0) { switch(errno) { case ENOSPC: con->http_status = 507; break; default: con->http_status = 403; break; } } break; case MEM_CHUNK: if ((r = write(to_cgi_fds[1], c->mem->ptr + c->offset, c->mem->used - c->offset - 1)) < 0) { switch(errno) { case ENOSPC: con->http_status = 507; break; default: con->http_status = 403; break; } } break; case UNUSED_CHUNK: break; } if (r > 0) { c->offset += r; cq->bytes_out += r; } else { break; } chunkqueue_remove_finished_chunks(cq); } } close(to_cgi_fds[1]); /* register PID and wait for them asyncronously */ con->mode = p->id; buffer_reset(con->physical.path); hctx = cgi_handler_ctx_init(); hctx->remote_conn = con; hctx->plugin_data = p; hctx->pid = pid; hctx->fd = from_cgi_fds[0]; hctx->fde_ndx = -1; con->plugin_ctx[p->id] = hctx; fdevent_register(srv->ev, hctx->fd, cgi_handle_fdevent, hctx); fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN); if (-1 == fdevent_fcntl_set(srv->ev, hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno)); fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_unregister(srv->ev, hctx->fd); log_error_write(srv, __FILE__, __LINE__, "sd", "cgi close:", hctx->fd); close(hctx->fd); cgi_handler_ctx_free(hctx); con->plugin_ctx[p->id] = NULL; return -1; } break; } } return 0; #else return -1; #endif }
static handler_t proxy_write_request(server *srv, handler_ctx *hctx) { data_proxy *host= hctx->host; connection *con = hctx->remote_conn; int ret; if (!host || (!host->host->used || !host->port)) return -1; switch(hctx->state) { case PROXY_STATE_CONNECT: /* wait for the connect() to finish */ /* connect failed ? */ if (-1 == hctx->fde_ndx) return HANDLER_ERROR; /* wait */ return HANDLER_WAIT_FOR_EVENT; break; case PROXY_STATE_INIT: #if defined(HAVE_IPV6) && defined(HAVE_INET_PTON) if (strstr(host->host->ptr,":")) { if (-1 == (hctx->fd = socket(AF_INET6, SOCK_STREAM, 0))) { log_error_write(srv, __FILE__, __LINE__, "ss", "socket failed: ", strerror(errno)); return HANDLER_ERROR; } } else #endif { if (-1 == (hctx->fd = socket(AF_INET, SOCK_STREAM, 0))) { log_error_write(srv, __FILE__, __LINE__, "ss", "socket failed: ", strerror(errno)); return HANDLER_ERROR; } } hctx->fde_ndx = -1; srv->cur_fds++; fdevent_register(srv->ev, hctx->fd, proxy_handle_fdevent, hctx); if (-1 == fdevent_fcntl_set(srv->ev, hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno)); return HANDLER_ERROR; } switch (proxy_establish_connection(srv, hctx)) { case 1: proxy_set_state(srv, hctx, PROXY_STATE_CONNECT); /* connection is in progress, wait for an event and call getsockopt() below */ fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT); return HANDLER_WAIT_FOR_EVENT; case -1: /* if ECONNREFUSED choose another connection -> FIXME */ hctx->fde_ndx = -1; return HANDLER_ERROR; default: /* everything is ok, go on */ proxy_set_state(srv, hctx, PROXY_STATE_PREPARE_WRITE); break; } /* fall through */ case PROXY_STATE_PREPARE_WRITE: proxy_create_env(srv, hctx); proxy_set_state(srv, hctx, PROXY_STATE_WRITE); /* fall through */ case PROXY_STATE_WRITE:; ret = srv->network_backend_write(srv, con, hctx->fd, hctx->wb, MAX_WRITE_LIMIT); chunkqueue_remove_finished_chunks(hctx->wb); if (-1 == ret) { /* error on our side */ log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed:", strerror(errno), errno); return HANDLER_ERROR; } else if (-2 == ret) { /* remote close */ log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed, remote connection close:", strerror(errno), errno); return HANDLER_ERROR; } if (hctx->wb->bytes_out == hctx->wb->bytes_in) { proxy_set_state(srv, hctx, PROXY_STATE_READ); fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN); } else { fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT); return HANDLER_WAIT_FOR_EVENT; } return HANDLER_WAIT_FOR_EVENT; case PROXY_STATE_READ: /* waiting for a response */ return HANDLER_WAIT_FOR_EVENT; default: log_error_write(srv, __FILE__, __LINE__, "s", "(debug) unknown state"); return HANDLER_ERROR; } return HANDLER_GO_ON; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } /* the last chunk might be empty */ for (c = cq->first; c;) { if (cq->first == c && c->mem->used == 0) { /* the first node is empty */ /* ... and it is empty, move it to unused */ cq->first = c->next; if (cq->first == NULL) cq->last = NULL; c->next = cq->unused; cq->unused = c; cq->unused_chunks++; c = cq->first; } else if (c->next && c->next->mem->used == 0) { chunk *fc; /* next node is the last one */ /* ... and it is empty, move it to unused */ fc = c->next; c->next = fc->next; fc->next = cq->unused; cq->unused = fc; cq->unused_chunks++; /* the last node was empty */ if (c->next == NULL) { cq->last = c; } c = c->next; } else { c = c->next; } } /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { buffer b; size_t i; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (b.used > 0) b.used--; /* buffer "used" includes terminating zero */ for (i = 0; i < b.used; i++) { char ch = b.ptr[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { buffer bb; bb.ptr = cc->mem->ptr + cc->offset; bb.used = cc->mem->used - cc->offset; if (bb.used > 0) bb.used--; /* buffer "used" includes terminating zero */ for ( ; j < bb.used; j++) { ch = bb.ptr[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { buffer b; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (c == last_chunk) { b.used = last_offset + 1; } buffer_append_string_buffer(con->request.request, &b); if (c == last_chunk) { c->offset += last_offset; break; } else { /* the whole packet was copied */ c->offset = c->mem->used - 1; } } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 414"); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: // xb ready to mod for (c = cq->first; c && (dst_cq->bytes_in != (off_t)con->request.content_length); c = c->next) { off_t weWant, weHave, toRead; weWant = con->request.content_length - dst_cq->bytes_in; assert(c->mem->used); weHave = c->mem->used - c->offset - 1; toRead = weHave > weWant ? weWant : weHave; data_string *ds_pi = (data_string *)array_get_element(con->request.headers, "X-Pi-Upload"); if (ds_pi) { float id; sscanf(ds_pi->value->ptr, "%f", &id); if (id != con->upload_id) { log_error_write(srv, __FILE__, __LINE__, "s", "newfile"); char path[256]; sprintf(path, "/tmp/upload_vid/%f", id); int fd = open(path, O_CREAT | O_WRONLY, 0777); con->upload_fd = fd; con->upload_id = id; } log_error_write(srv, __FILE__, __LINE__, "sboood", "pi-upload:", ds_pi->value, weHave, (off_t)dst_cq->bytes_in, (off_t)con->request.content_length, (int)ds_pi); write(con->upload_fd, c->mem->ptr + c->offset, toRead); /* dst_cq->bytes_in += toRead; if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } continue; */ } /* the new way, copy everything into a chunkqueue whcih might use tempfiles */ if (con->request.content_length > 64 * 1024) { chunk *dst_c = NULL; /* copy everything to max 1Mb sized tempfiles */ /* * if the last chunk is * - smaller than 1Mb (size < 1Mb) * - not read yet (offset == 0) * -> append to it * otherwise * -> create a new chunk * * */ // xb mod: my upload file if (dst_cq->last && dst_cq->last->type == FILE_CHUNK && dst_cq->last->file.is_temp && dst_cq->last->offset == 0) { /* ok, take the last chunk for our job */ // xb mod: my upload file if (0) { /* dst_c = chunkqueue_get_append_tempfile(dst_cq); close(dst_c->file.fd); buffer *path = buffer_init_string("/var/cache/lighttpd/uploads/my-upload-"); buffer_append_string_buffer(path, ds->value); dst_c->file.fd = open(path->ptr, O_WRONLY | O_CREAT, 0777); log_error_write(srv, __FILE__, __LINE__, "sb", "pi-upload: path", path); #ifdef FD_CLOEXEC fcntl(dst_c->file.fd, F_SETFD, FD_CLOEXEC); #endif buffer_free(path); */ } else { if (dst_cq->last->file.length < 1 * 1024 * 1024) { dst_c = dst_cq->last; if (dst_c->file.fd == -1) { /* this should not happen as we cache the fd, but you never know */ dst_c->file.fd = open(dst_c->file.name->ptr, O_WRONLY | O_APPEND); #ifdef FD_CLOEXEC fcntl(dst_c->file.fd, F_SETFD, FD_CLOEXEC); #endif } } else { /* the chunk is too large now, close it */ dst_c = dst_cq->last; if (dst_c->file.fd != -1) { close(dst_c->file.fd); dst_c->file.fd = -1; } dst_c = chunkqueue_get_append_tempfile(dst_cq); } } } else { dst_c = chunkqueue_get_append_tempfile(dst_cq); } /* we have a chunk, let's write to it */ if (dst_c->file.fd == -1) { /* we don't have file to write to, * EACCES might be one reason. * * Instead of sending 500 we send 413 and say the request is too large * */ log_error_write(srv, __FILE__, __LINE__, "sbs", "denying upload as opening to temp-file for upload failed:", dst_c->file.name, strerror(errno)); con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); break; } // xb mod //if (toRead != write(dst_c->file.fd, c->mem->ptr + c->offset, toRead)) { if (!ds_pi && toRead != write(dst_c->file.fd, c->mem->ptr + c->offset, toRead)) { /* write failed for some reason ... disk full ? */ log_error_write(srv, __FILE__, __LINE__, "sbs", "denying upload as writing to file failed:", dst_c->file.name, strerror(errno)); con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); close(dst_c->file.fd); dst_c->file.fd = -1; break; } dst_c->file.length += toRead; if (dst_cq->bytes_in + toRead == (off_t)con->request.content_length) { /* we read everything, close the chunk */ close(dst_c->file.fd); dst_c->file.fd = -1; } } else { buffer *b; if (dst_cq->last && dst_cq->last->type == MEM_CHUNK) { b = dst_cq->last->mem; } else { b = chunkqueue_get_append_buffer(dst_cq); /* prepare buffer size for remaining POST data; is < 64kb */ buffer_prepare_copy(b, con->request.content_length - dst_cq->bytes_in + 1); } buffer_append_string_len(b, c->mem->ptr + c->offset, toRead); } c->offset += toRead; dst_cq->bytes_in += toRead; } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); log_error_write(srv, __FILE__, __LINE__, "s", "endfile: ok"); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { connection_set_state(srv, con, CON_STATE_ERROR); log_error_write(srv, __FILE__, __LINE__, "s", "endfile: error"); } chunkqueue_remove_finished_chunks(cq); return 0; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } chunkqueue_remove_finished_chunks(cq); /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { size_t i; size_t len = buffer_string_length(c->mem) - c->offset; const char *b = c->mem->ptr + c->offset; for (i = 0; i < len; ++i) { char ch = b[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { size_t bblen = buffer_string_length(cc->mem) - cc->offset; const char *bb = c->mem->ptr + cc->offset; for ( ; j < bblen; j++) { ch = bb[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { size_t len = buffer_string_length(c->mem) - c->offset; if (c == last_chunk) { len = last_offset; } buffer_append_string_len(con->request.request, c->mem->ptr + c->offset, len); c->offset += len; if (c == last_chunk) break; } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 414"); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: if (0 != chunkqueue_steal_with_tempfiles(srv, dst_cq, cq, con->request.content_length - dst_cq->bytes_in )) { con->http_status = 413; /* Request-Entity too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { connection_set_state(srv, con, CON_STATE_ERROR); } chunkqueue_remove_finished_chunks(cq); return 0; }
static handler_t proxy_write_request(server *srv, handler_ctx *hctx) { data_proxy *host= hctx->host; plugin_data *p = hctx->plugin_data; connection *con = hctx->remote_conn; int ret; if (!host || (!host->host->used || !host->port)) return -1; switch(hctx->state) { case PROXY_STATE_INIT: if (-1 == (hctx->fd = socket(AF_INET, SOCK_STREAM, 0))) { log_error_write(srv, __FILE__, __LINE__, "ss", "socket failed: ", strerror(errno)); return HANDLER_ERROR; } hctx->fde_ndx = -1; srv->cur_fds++; fdevent_register(srv->ev, hctx->fd, proxy_handle_fdevent, hctx); if (-1 == fdevent_fcntl_set(srv->ev, hctx->fd)) { log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno)); return HANDLER_ERROR; } /* fall through */ case PROXY_STATE_CONNECT: /* try to finish the connect() */ if (hctx->state == PROXY_STATE_INIT) { /* first round */ switch (proxy_establish_connection(srv, hctx)) { case 1: proxy_set_state(srv, hctx, PROXY_STATE_CONNECT); /* connection is in progress, wait for an event and call getsockopt() below */ fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT); return HANDLER_WAIT_FOR_EVENT; case -1: /* if ECONNREFUSED choose another connection -> FIXME */ hctx->fde_ndx = -1; return HANDLER_ERROR; default: /* everything is ok, go on */ break; } } else { int socket_error; socklen_t socket_error_len = sizeof(socket_error); /* we don't need it anymore */ fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); /* try to finish the connect() */ if (0 != getsockopt(hctx->fd, SOL_SOCKET, SO_ERROR, &socket_error, &socket_error_len)) { log_error_write(srv, __FILE__, __LINE__, "ss", "getsockopt failed:", strerror(errno)); return HANDLER_ERROR; } if (socket_error != 0) { log_error_write(srv, __FILE__, __LINE__, "ss", "establishing connection failed:", strerror(socket_error), "port:", hctx->host->port); return HANDLER_ERROR; } if (p->conf.debug) { log_error_write(srv, __FILE__, __LINE__, "s", "proxy - connect - delayed success"); } } proxy_set_state(srv, hctx, PROXY_STATE_PREPARE_WRITE); /* fall through */ case PROXY_STATE_PREPARE_WRITE: proxy_create_env(srv, hctx); proxy_set_state(srv, hctx, PROXY_STATE_WRITE); /* fall through */ case PROXY_STATE_WRITE:; ret = srv->network_backend_write(srv, con, hctx->fd, hctx->wb); chunkqueue_remove_finished_chunks(hctx->wb); if (-1 == ret) { if (errno != EAGAIN && errno != EINTR) { log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed:", strerror(errno), errno); return HANDLER_ERROR; } else { fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT); return HANDLER_WAIT_FOR_EVENT; } } if (hctx->wb->bytes_out == hctx->wb->bytes_in) { proxy_set_state(srv, hctx, PROXY_STATE_READ); fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd); fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN); } else { fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT); return HANDLER_WAIT_FOR_EVENT; } return HANDLER_WAIT_FOR_EVENT; case PROXY_STATE_READ: /* waiting for a response */ return HANDLER_WAIT_FOR_EVENT; default: log_error_write(srv, __FILE__, __LINE__, "s", "(debug) unknown state"); return HANDLER_ERROR; } return HANDLER_GO_ON; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ int connection_handle_read_state(server *srv, connection *con) { /** * Recording the connection activity to be used for the purpose of timeout calc * See '/sbin/timeout' * Touching the file '/var/last_tcp_connection_time' */ system("touch /var/last_tcp_connection_time"); connection_state_t ostate = con->state; chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; chunkqueue *dst_cq = con->request_content_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } //log_error_write(srv, __FILE__, __LINE__, "sd", "http_status", con->http_status); switch(con->http_status) { case 416: return 0; default: break; } /* the last chunk might be empty */ for (c = cq->first; c;) { if (cq->first == c && c->mem->used == 0) { /* the first node is empty */ /* ... and it is empty, move it to unused */ cq->first = c->next; if (cq->first == NULL) cq->last = NULL; c->next = cq->unused; cq->unused = c; cq->unused_chunks++; c = cq->first; } else if (c->next && c->next->mem->used == 0) { chunk *fc; /* next node is the last one */ /* ... and it is empty, move it to unused */ fc = c->next; c->next = fc->next; fc->next = cq->unused; cq->unused = fc; cq->unused_chunks++; /* the last node was empty */ if (c->next == NULL) { cq->last = c; } c = c->next; } else { c = c->next; } } /* we might have got several packets at once */ switch(ostate) { case CON_STATE_READ: /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; !last_chunk && c; c = c->next) { buffer b; size_t i; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; for (i = 0; !last_chunk && i < b.used; i++) { char ch = b.ptr[i]; size_t have_chars = 0; switch (ch) { case '\r': /* we have to do a 4 char lookup */ have_chars = b.used - i - 1; if (have_chars >= 4) { /* all chars are in this buffer */ if (0 == strncmp(b.ptr + i, "\r\n\r\n", 4)) { /* found */ last_chunk = c; last_offset = i + 4; break; } } else { chunk *lookahead_chunk = c->next; size_t missing_chars; /* looks like the following chars are not in the same chunk */ missing_chars = 4 - have_chars; if (lookahead_chunk && lookahead_chunk->type == MEM_CHUNK) { /* is the chunk long enough to contain the other chars ? */ if (lookahead_chunk->mem->used > missing_chars) { if (0 == strncmp(b.ptr + i, "\r\n\r\n", have_chars) && 0 == strncmp(lookahead_chunk->mem->ptr, "\r\n\r\n" + have_chars, missing_chars)) { last_chunk = lookahead_chunk; last_offset = missing_chars; break; } } else { /* a splited \r \n */ break; } } } break; } } } /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { buffer b; b.ptr = c->mem->ptr + c->offset; b.used = c->mem->used - c->offset; if (c == last_chunk) { b.used = last_offset + 1; } buffer_append_string_buffer(con->request.request, &b); if (c == last_chunk) { c->offset += last_offset; break; } else { /* the whole packet was copied */ c->offset = c->mem->used - 1; } } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (chunkqueue_length(cq) > 64 * 1024) { log_error_write(srv, __FILE__, __LINE__, "sdd", "oversized request-header", con->http_status, con->file_finished); con->http_status = 414; /* Request-URI too large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; case CON_STATE_READ_POST: for (c = cq->first; c && (dst_cq->bytes_in != (off_t)con->request.content_length); c = c->next) { off_t weWant, weHave, toRead; weWant = con->request.content_length - dst_cq->bytes_in; assert(c->mem->used); weHave = c->mem->used - c->offset - 1; toRead = weHave > weWant ? weWant : weHave; buffer *b; b = chunkqueue_get_append_buffer(dst_cq); buffer_copy_string_len(b, c->mem->ptr + c->offset, toRead); c->offset += toRead; dst_cq->bytes_in += toRead; } /* Content is ready */ if (dst_cq->bytes_in == (off_t)con->request.content_length || dst_cq->bytes_in - dst_cq->bytes_out > srv->srvconf.max_request_size * 1024) { connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } break; default: break; } /* the connection got closed and we didn't got enough data to leave one of the READ states * the only way is to leave here */ if (is_closed && ostate == con->state) { log_error_write(srv, __FILE__, __LINE__, "sdd", "Connection got closed, not enough data to leave one of the READ states", is_closed, ostate, con->state); connection_set_state(srv, con, CON_STATE_ERROR); } chunkqueue_remove_finished_chunks(cq); return 0; }
static handler_t deflate_compress_response(server *srv, connection *con, handler_ctx *hctx) { off_t len, max; int close_stream; /* move all chunk from write_queue into our in_queue, then adjust * counters since con->write_queue is reused for compressed output */ len = chunkqueue_length(con->write_queue); chunkqueue_remove_finished_chunks(con->write_queue); chunkqueue_append_chunkqueue(hctx->in_queue, con->write_queue); con->write_queue->bytes_in -= len; con->write_queue->bytes_out -= len; max = chunkqueue_length(hctx->in_queue); #if 0 /* calculate max bytes to compress for this call */ if (p->conf.sync_flush && max > (len = p->conf.work_block_size << 10)) { max = len; } #endif /* Compress chunks from in_queue into chunks for write_queue */ while (max) { chunk *c = hctx->in_queue->first; switch(c->type) { case MEM_CHUNK: len = buffer_string_length(c->mem) - c->offset; if (len > max) len = max; if (mod_deflate_compress(srv, con, hctx, (unsigned char *)c->mem->ptr+c->offset, len) < 0) { log_error_write(srv, __FILE__, __LINE__, "s", "compress failed."); return HANDLER_ERROR; } break; case FILE_CHUNK: len = c->file.length - c->offset; if (len > max) len = max; if ((len = mod_deflate_file_chunk(srv, con, hctx, c, len)) < 0) { log_error_write(srv, __FILE__, __LINE__, "s", "compress file chunk failed."); return HANDLER_ERROR; } break; default: log_error_write(srv, __FILE__, __LINE__, "ds", c, "type not known"); return HANDLER_ERROR; } max -= len; chunkqueue_mark_written(hctx->in_queue, len); } /*(currently should always be true)*/ /*(current implementation requires response be complete)*/ close_stream = (con->file_finished && chunkqueue_is_empty(hctx->in_queue)); if (mod_deflate_stream_flush(srv, con, hctx, close_stream) < 0) { log_error_write(srv, __FILE__, __LINE__, "s", "flush error"); return HANDLER_ERROR; } return close_stream ? HANDLER_FINISHED : HANDLER_GO_ON; }
/* similar to network_write_file_chunk_mmap, but doesn't use send on windows (because we're on pipes), * also mmaps and sends complete chunk instead of only small parts - the files * are supposed to be temp files with reasonable chunk sizes. * * Also always use mmap; the files are "trusted", as we created them. */ static ssize_t cgi_write_file_chunk_mmap(server *srv, connection *con, int fd, chunkqueue *cq) { chunk* const c = cq->first; off_t offset, toSend, file_end; ssize_t r; size_t mmap_offset, mmap_avail; char *data; force_assert(NULL != c); force_assert(FILE_CHUNK == c->type); force_assert(c->offset >= 0 && c->offset <= c->file.length); offset = c->file.start + c->offset; toSend = c->file.length - c->offset; file_end = c->file.start + c->file.length; /* offset to file end in this chunk */ if (0 == toSend) { chunkqueue_remove_finished_chunks(cq); return 0; } /*(simplified from network_write_no_mmap.c:network_open_file_chunk())*/ UNUSED(con); if (-1 == c->file.fd) { if (-1 == (c->file.fd = fdevent_open_cloexec(c->file.name->ptr, O_RDONLY, 0))) { log_error_write(srv, __FILE__, __LINE__, "ssb", "open failed:", strerror(errno), c->file.name); return -1; } } /* (re)mmap the buffer if range is not covered completely */ if (MAP_FAILED == c->file.mmap.start || offset < c->file.mmap.offset || file_end > (off_t)(c->file.mmap.offset + c->file.mmap.length)) { if (MAP_FAILED != c->file.mmap.start) { munmap(c->file.mmap.start, c->file.mmap.length); c->file.mmap.start = MAP_FAILED; } c->file.mmap.offset = mmap_align_offset(offset); c->file.mmap.length = file_end - c->file.mmap.offset; if (MAP_FAILED == (c->file.mmap.start = mmap(NULL, c->file.mmap.length, PROT_READ, MAP_PRIVATE, c->file.fd, c->file.mmap.offset))) { if (toSend > 65536) toSend = 65536; data = malloc(toSend); force_assert(data); if (-1 == lseek(c->file.fd, offset, SEEK_SET) || 0 >= (toSend = read(c->file.fd, data, toSend))) { if (-1 == toSend) { log_error_write(srv, __FILE__, __LINE__, "ssbdo", "lseek/read failed:", strerror(errno), c->file.name, c->file.fd, offset); } else { /*(0 == toSend)*/ log_error_write(srv, __FILE__, __LINE__, "sbdo", "unexpected EOF (input truncated?):", c->file.name, c->file.fd, offset); } free(data); return -1; } } } if (MAP_FAILED != c->file.mmap.start) { force_assert(offset >= c->file.mmap.offset); mmap_offset = offset - c->file.mmap.offset; force_assert(c->file.mmap.length > mmap_offset); mmap_avail = c->file.mmap.length - mmap_offset; force_assert(toSend <= (off_t) mmap_avail); data = c->file.mmap.start + mmap_offset; } r = write(fd, data, toSend); if (MAP_FAILED == c->file.mmap.start) free(data); if (r < 0) { switch (errno) { case EAGAIN: case EINTR: return 0; case EPIPE: case ECONNRESET: return -2; default: log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed:", strerror(errno), fd); return -1; } } if (r >= 0) { chunkqueue_mark_written(cq, r); } return r; }
//向客户端发送数据,被connection_handle_write函数调用 int network_write_chunkqueue(server *srv, connection *con, chunkqueue *cq) { int ret = -1; off_t written = 0; #ifdef TCP_CORK int corked = 0; #endif server_socket *srv_socket = con->srv_socket; //达到设定的最大数据传输率 if (con->conf.global_kbytes_per_second && *(con->conf.global_bytes_per_second_cnt_ptr) > con->conf.global_kbytes_per_second * 1024) { con->traffic_limit_reached = 1; joblist_append(srv, con); return 1; } written = cq->bytes_out; //设置套接字TCP_CORK选项 #ifdef TCP_CORK /* Linux: put a cork into the socket as we want to combine the write() calls * but only if we really have multiple chunks */ if (cq->first && cq->first->next) { corked = 1; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif //调用数据发送函数 if (srv_socket->is_ssl) { #ifdef USE_OPENSSL ret = srv->network_ssl_backend_write(srv, con, con->ssl, cq); #endif } else { //函数返回本次被发送完在cp链的chunk数目 ret = srv->network_backend_write(srv, con, con->fd, cq); } //清理本次被发送完在cp链的chunk,并判断是否全部chunk都发送完来设置ret if (ret >= 0) { chunkqueue_remove_finished_chunks(cq); ret = chunkqueue_is_empty(cq) ? 0 : 1; } //取消套接字TCP_CORK选项 #ifdef TCP_CORK if (corked) { corked = 0; setsockopt(con->fd, IPPROTO_TCP, TCP_CORK, &corked, sizeof(corked)); } #endif written = cq->bytes_out - written; con->bytes_written += written; con->bytes_written_cur_second += written; *(con->conf.global_bytes_per_second_cnt_ptr) += written; if (con->conf.kbytes_per_second && (con->bytes_written_cur_second > con->conf.kbytes_per_second * 1024)) { /* we reached the traffic limit */ con->traffic_limit_reached = 1; joblist_append(srv, con); } return ret; }
int network_write_file_chunk_sendfile(server *srv, connection *con, int fd, chunkqueue *cq, off_t *p_max_bytes) { chunk* const c = cq->first; off_t offset, written = 0; off_t toSend; int r; force_assert(NULL != c); force_assert(FILE_CHUNK == c->type); force_assert(c->offset >= 0 && c->offset <= c->file.length); offset = c->file.start + c->offset; toSend = c->file.length - c->offset; if (toSend > *p_max_bytes) toSend = *p_max_bytes; if (0 == toSend) { chunkqueue_remove_finished_chunks(cq); return 0; } if (0 != network_open_file_chunk(srv, con, cq)) return -1; /* FreeBSD sendfile() */ if (-1 == (r = sendfile(c->file.fd, fd, offset, toSend, NULL, &written, 0))) { switch(errno) { case EAGAIN: case EINTR: /* for EAGAIN/EINTR written still contains the sent bytes */ break; /* try again later */ case EPIPE: case ENOTCONN: return -2; case EINVAL: case ENOSYS: #if defined(ENOTSUP) \ && (!defined(EOPNOTSUPP) || EOPNOTSUPP != ENOTSUP) case ENOTSUP: #endif #ifdef EOPNOTSUPP case EOPNOTSUPP: #endif #ifdef ESOCKTNOSUPPORT case ESOCKTNOSUPPORT: #endif #ifdef EAFNOSUPPORT case EAFNOSUPPORT: #endif #ifdef USE_MMAP return network_write_file_chunk_mmap(srv, con, fd, cq, p_max_bytes); #else return network_write_file_chunk_no_mmap(srv, con, fd, cq, p_max_bytes); #endif default: log_error_write(srv, __FILE__, __LINE__, "ssd", "sendfile: ", strerror(errno), errno); return -1; } } if (written >= 0) { chunkqueue_mark_written(cq, written); *p_max_bytes -= written; } return (r >= 0 && written == toSend) ? 0 : -3; }
int network_write_file_chunk_sendfile(server *srv, connection *con, int fd, chunkqueue *cq, off_t *p_max_bytes) { chunk* const c = cq->first; ssize_t r; off_t offset; off_t toSend; force_assert(NULL != c); force_assert(FILE_CHUNK == c->type); force_assert(c->offset >= 0 && c->offset <= c->file.length); offset = c->file.start + c->offset; toSend = c->file.length - c->offset; if (toSend > *p_max_bytes) toSend = *p_max_bytes; if (0 == toSend) { chunkqueue_remove_finished_chunks(cq); return 0; } if (0 != network_open_file_chunk(srv, con, cq)) return -1; if (-1 == (r = sendfile(fd, c->file.fd, &offset, toSend))) { switch (errno) { case EAGAIN: case EINTR: break; case EPIPE: case ECONNRESET: return -2; case EINVAL: case ENOSYS: #if defined(ENOTSUP) \ && (!defined(EOPNOTSUPP) || EOPNOTSUPP != ENOTSUP) case ENOTSUP: #endif #ifdef EOPNOTSUPP case EOPNOTSUPP: #endif #ifdef ESOCKTNOSUPPORT case ESOCKTNOSUPPORT: #endif #ifdef EAFNOSUPPORT case EAFNOSUPPORT: #endif #ifdef USE_MMAP return network_write_file_chunk_mmap(srv, con, fd, cq, p_max_bytes); #else return network_write_file_chunk_no_mmap(srv, con, fd, cq, p_max_bytes); #endif default: log_error_write(srv, __FILE__, __LINE__, "ssd", "sendfile failed:", strerror(errno), fd); return -1; } } if (r >= 0) { chunkqueue_mark_written(cq, r); *p_max_bytes -= r; } return (r > 0 && r == toSend) ? 0 : -3; }
static handler_t proxy_http_parse_chunked_stream(server *srv, proxy_session *sess, chunkqueue *in, chunkqueue *out) { protocol_state_data *data = (protocol_state_data *)sess->proxy_con->protocol_data; char *err = NULL; off_t we_have = 0, we_want = 0; off_t chunk_len = 0; off_t offset = 0; buffer *b; chunk *c; char ch = '\0'; int finished = 0; UNUSED(srv); for (c = in->first; c && !finished;) { if(c->mem->used == 0) { c = c->next; continue; } switch(data->chunk_parse_state) { case HTTP_CHUNK_LEN: /* parse chunk len. */ for(offset = c->offset; (size_t)(offset) < (c->mem->used - 1) ; offset++) { ch = c->mem->ptr[offset]; if(!light_isxdigit(ch)) break; } if(offset > c->offset) { buffer_append_string_len(data->buf, (c->mem->ptr + c->offset), offset - c->offset); in->bytes_out += (offset - c->offset); c->offset = offset; } if (!(ch == ' ' || ch == '\r' || ch == ';')) { if (ch == '\0') { /* get next chunk from queue */ break; } /* protocol error. bad http-chunk len */ return HANDLER_ERROR; } data->chunk_len = strtol(BUF_STR(data->buf), &err, 16); data->chunk_offset = 0; buffer_reset(data->buf); data->chunk_parse_state = HTTP_CHUNK_EXTENSION; case HTTP_CHUNK_EXTENSION: /* find CRLF. discard chunk-extension */ for(ch = 0; (size_t)(c->offset) < (c->mem->used - 1) && ch != '\n' ;) { ch = c->mem->ptr[c->offset]; c->offset++; in->bytes_out++; } if(ch != '\n') { /* get next chunk from queue */ break; } if(data->chunk_len > 0) { data->chunk_parse_state = HTTP_CHUNK_DATA; } else { data->chunk_parse_state = HTTP_CHUNK_END; } case HTTP_CHUNK_DATA: chunk_len = data->chunk_len - data->chunk_offset; /* copy chunk_len bytes from in queue to out queue. */ we_have = c->mem->used - c->offset - 1; we_want = chunk_len > we_have ? we_have : chunk_len; if (c->offset == 0 && we_want == we_have) { /* we are copying the whole buffer, just steal it */ chunkqueue_steal_chunk(out, c); /* c is an empty chunk now */ } else { b = chunkqueue_get_append_buffer(out); buffer_copy_string_len(b, c->mem->ptr + c->offset, we_want); c->offset += we_want; } chunk_len -= we_want; out->bytes_in += we_want; in->bytes_out += we_want; data->chunk_offset += we_want; if(chunk_len > 0) { /* get next chunk from queue */ break; } data->chunk_offset = 0; data->chunk_parse_state = HTTP_CHUNK_END; case HTTP_CHUNK_END: /* discard CRLF.*/ for(ch = 0; c->mem->used > 0 && (size_t)(c->offset) < (c->mem->used - 1) && ch != '\n' ;) { ch = c->mem->ptr[c->offset]; c->offset++; in->bytes_out++; } if(ch != '\n') { /* get next chunk from queue */ break; } /* final chunk */ if(data->chunk_len == 0) { finished = 1; } /* finished http-chunk. reset and parse next chunk. */ protocol_state_data_reset(data); break; } if((size_t)(c->offset) == c->mem->used - 1) { c = c->next; } } chunkqueue_remove_finished_chunks(in); if (finished) { sess->is_request_finished = 1; return HANDLER_FINISHED; } /* ran out of data. */ return HANDLER_GO_ON; }
/** * handle all header and content read * * we get called by the state-engine and by the fdevent-handler */ static int connection_handle_read_state(server *srv, connection *con) { chunk *c, *last_chunk; off_t last_offset; chunkqueue *cq = con->read_queue; int is_closed = 0; /* the connection got closed, if we don't have a complete header, -> error */ /* when in CON_STATE_READ: about to receive first byte for a request: */ int is_request_start = chunkqueue_is_empty(cq); if (con->is_readable) { con->read_idle_ts = srv->cur_ts; switch(connection_handle_read(srv, con)) { case -1: return -1; case -2: is_closed = 1; break; default: break; } } chunkqueue_remove_finished_chunks(cq); /* we might have got several packets at once */ /* update request_start timestamp when first byte of * next request is received on a keep-alive connection */ if (con->request_count > 1 && is_request_start) { con->request_start = srv->cur_ts; if (con->conf.high_precision_timestamps) log_clock_gettime_realtime(&con->request_start_hp); } /* if there is a \r\n\r\n in the chunkqueue * * scan the chunk-queue twice * 1. to find the \r\n\r\n * 2. to copy the header-packet * */ last_chunk = NULL; last_offset = 0; for (c = cq->first; c; c = c->next) { size_t i; size_t len = buffer_string_length(c->mem) - c->offset; const char *b = c->mem->ptr + c->offset; for (i = 0; i < len; ++i) { char ch = b[i]; if ('\r' == ch) { /* chec if \n\r\n follows */ size_t j = i+1; chunk *cc = c; const char header_end[] = "\r\n\r\n"; int header_end_match_pos = 1; for ( ; cc; cc = cc->next, j = 0 ) { size_t bblen = buffer_string_length(cc->mem) - cc->offset; const char *bb = cc->mem->ptr + cc->offset; for ( ; j < bblen; j++) { ch = bb[j]; if (ch == header_end[header_end_match_pos]) { header_end_match_pos++; if (4 == header_end_match_pos) { last_chunk = cc; last_offset = j+1; goto found_header_end; } } else { goto reset_search; } } } } reset_search: ; } } found_header_end: /* found */ if (last_chunk) { buffer_reset(con->request.request); for (c = cq->first; c; c = c->next) { size_t len = buffer_string_length(c->mem) - c->offset; if (c == last_chunk) { len = last_offset; } buffer_append_string_len(con->request.request, c->mem->ptr + c->offset, len); c->offset += len; cq->bytes_out += len; if (c == last_chunk) break; } connection_set_state(srv, con, CON_STATE_REQUEST_END); } else if (is_closed) { /* the connection got closed and we didn't got enough data to leave CON_STATE_READ; * the only way is to leave here */ connection_set_state(srv, con, CON_STATE_ERROR); } if ((last_chunk ? buffer_string_length(con->request.request) : (size_t)chunkqueue_length(cq)) > srv->srvconf.max_request_field_size) { log_error_write(srv, __FILE__, __LINE__, "s", "oversized request-header -> sending Status 431"); con->http_status = 431; /* Request Header Fields Too Large */ con->keep_alive = 0; connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST); } chunkqueue_remove_finished_chunks(cq); return 0; }
int network_write_chunkqueue_openssl(server *srv, connection *con, SSL *ssl, chunkqueue *cq, off_t max_bytes) { /* the remote side closed the connection before without shutdown request * - IE * - wget * if keep-alive is disabled */ if (con->keep_alive == 0) { SSL_set_shutdown(ssl, SSL_RECEIVED_SHUTDOWN); } chunkqueue_remove_finished_chunks(cq); while (max_bytes > 0 && NULL != cq->first) { const char *data; size_t data_len; int r; if (0 != load_next_chunk(srv, con, cq, max_bytes, &data, &data_len)) return -1; /** * SSL_write man-page * * WARNING * When an SSL_write() operation has to be repeated because of * SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE, it must be * repeated with the same arguments. */ ERR_clear_error(); r = SSL_write(ssl, data, data_len); if (con->renegotiations > 1 && con->conf.ssl_disable_client_renegotiation) { log_error_write(srv, __FILE__, __LINE__, "s", "SSL: renegotiation initiated by client, killing connection"); return -1; } if (r <= 0) { int ssl_r; unsigned long err; switch ((ssl_r = SSL_get_error(ssl, r))) { case SSL_ERROR_WANT_WRITE: return 0; /* try again later */ case SSL_ERROR_SYSCALL: /* perhaps we have error waiting in our error-queue */ if (0 != (err = ERR_get_error())) { do { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, r, ERR_error_string(err, NULL)); } while((err = ERR_get_error())); } else if (r == -1) { /* no, but we have errno */ switch(errno) { case EPIPE: case ECONNRESET: return -2; default: log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL:", ssl_r, r, errno, strerror(errno)); break; } } else { /* neither error-queue nor errno ? */ log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL (error):", ssl_r, r, errno, strerror(errno)); } break; case SSL_ERROR_ZERO_RETURN: /* clean shutdown on the remote side */ if (r == 0) return -2; /* fall through */ default: while((err = ERR_get_error())) { log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:", ssl_r, r, ERR_error_string(err, NULL)); } break; } return -1; } chunkqueue_mark_written(cq, r); max_bytes -= r; if ((size_t) r < data_len) break; /* try again later */ } return 0; }
int network_write_file_chunk_no_mmap(server *srv, connection *con, int fd, chunkqueue *cq, off_t *p_max_bytes) { chunk* const c = cq->first; off_t offset, toSend; ssize_t r; force_assert(NULL != c); force_assert(FILE_CHUNK == c->type); force_assert(c->offset >= 0 && c->offset <= c->file.length); offset = c->file.start + c->offset; toSend = c->file.length - c->offset; if (toSend > 64*1024) toSend = 64*1024; /* max read 64kb in one step */ if (toSend > *p_max_bytes) toSend = *p_max_bytes; if (0 == toSend) { chunkqueue_remove_finished_chunks(cq); return 0; } if (0 != network_open_file_chunk(srv, con, cq)) return -1; buffer_string_prepare_copy(srv->tmp_buf, toSend); if (-1 == lseek(c->file.fd, offset, SEEK_SET)) { log_error_write(srv, __FILE__, __LINE__, "ss", "lseek: ", strerror(errno)); return -1; } if (-1 == (toSend = read(c->file.fd, srv->tmp_buf->ptr, toSend))) { log_error_write(srv, __FILE__, __LINE__, "ss", "read: ", strerror(errno)); return -1; } #if defined(__WIN32) if ((r = send(fd, srv->tmp_buf->ptr, toSend, 0)) < 0) { int lastError = WSAGetLastError(); switch (lastError) { case WSAEINTR: case WSAEWOULDBLOCK: break; case WSAECONNRESET: case WSAETIMEDOUT: case WSAECONNABORTED: return -2; default: log_error_write(srv, __FILE__, __LINE__, "sdd", "send failed: ", lastError, fd); return -1; } } #else /* __WIN32 */ if ((r = write(fd, srv->tmp_buf->ptr, toSend)) < 0) { switch (errno) { case EAGAIN: case EINTR: break; case EPIPE: case ECONNRESET: return -2; default: log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed:", strerror(errno), fd); return -1; } } #endif /* __WIN32 */ if (r >= 0) { *p_max_bytes -= r; chunkqueue_mark_written(cq, r); } return (r > 0 && r == toSend) ? 0 : -3; }