static int lhp_error(lua_State* L) { lhttp_parser* lparser = check_parser(L, 1); enum http_errno http_errno = lparser->parser.http_errno; lua_pushnumber(L, http_errno); lua_pushstring(L, http_errno_name(http_errno)); lua_pushstring(L, http_errno_description(http_errno)); return 3; }
int http_handle(struct data_node *p_node) { http_parser *hp = &p_node->http_info.hp; struct http_status *p_stat = &p_node->http_info.hs; if ((p_stat->dosize == 0) || (p_stat->step == 0)){ http_parser_init(hp, HTTP_REQUEST); } int todo = (p_node->recv.get_size - p_stat->dosize); // printf("recv.get_size %d dosize %d\n", p_node->recv.get_size, p_stat->dosize); int done = http_parser_execute(hp, &settings, (p_node->recv.buf_addr + p_stat->dosize), todo); p_stat->step++; if (p_stat->over) { p_stat->dosize = 0; }else{ p_stat->dosize += done; } // x_printf("%d of %d\n", done, todo); if (hp->upgrade) { /* handle new protocol 处理新协议*/ //TODO // x_printf("upgrade!\n"); return FALSE; } else { if (done == todo) { return (p_stat->over); }else{ /*it's error request,change to over and shoud broken socket*/ /* Handle error. Usually just close the connection. 处理错误,通常是关闭这个连接*/ p_stat->err = HTTP_PARSER_ERRNO(hp); if (HPE_OK != p_stat->err) { fprintf(stderr, "\n*** server expected %s, but saw %s ***\n%s\n", http_errno_name(HPE_OK), http_errno_name(p_stat->err), (p_node->recv.buf_addr + p_stat->dosize)); } p_stat->dosize = 0; return TRUE; } } }
int HTTPConnectionPeek(HTTPConnectionRef const conn, HTTPEvent *const type, uv_buf_t *const buf) { if(!conn) return UV_EINVAL; if(!type) return UV_EINVAL; if(!buf) return UV_EINVAL; size_t len; int rc; if(HTTPStreamEOF & conn->flags) return UV_EOF; // Repeat previous errors. rc = HTTP_PARSER_ERRNO(conn->parser); if(HPE_OK != rc && HPE_PAUSED != rc) return UV_UNKNOWN; while(HTTPNothing == conn->type) { if(!conn->raw->len) { // It might seem counterintuitive to free the buffer // just before we could reuse it, but the one time we // don't need it is while blocking. We could free it // after a timeout to give us a chance to reuse it, // but even the two second timeout Apache uses causes // a lot of problems... FREE(&conn->buf); *conn->raw = uv_buf_init(NULL, 0); *conn->out = uv_buf_init(NULL, 0); rc = async_read((uv_stream_t *)conn->stream, conn->raw); if(UV_EOF == rc) conn->flags |= HTTPStreamEOF; if(rc < 0) return rc; conn->buf = conn->raw->base; } http_parser_pause(conn->parser, 0); len = http_parser_execute(conn->parser, &settings, conn->raw->base, conn->raw->len); rc = HTTP_PARSER_ERRNO(conn->parser); // HACK: http_parser returns 1 when the input length is 0 (EOF). if(len > conn->raw->len) len = conn->raw->len; conn->raw->base += len; conn->raw->len -= len; if(HPE_OK != rc && HPE_PAUSED != rc) { // TODO: We should convert HPE_* and return them // instead of logging and returning UV_UNKNOWN. fprintf(stderr, "HTTP parse error %s (%d)\n", http_errno_name(rc), HTTP_PARSER_ERRNO_LINE(conn->parser)); // fprintf(stderr, "%s (%lu)\n", strndup(conn->raw->base, conn->raw->len), conn->raw->len); return UV_UNKNOWN; } } assertf(HTTPNothing != conn->type, "HTTPConnectionPeek must return an event"); *type = conn->type; *buf = *conn->out; return 0; }
void log_http_errors(http_parser *parser) { enum http_errno err = HTTP_PARSER_ERRNO(parser); if (err != HPE_OK) { log(log_error, "http_parser error code = %s", http_errno_name(err)); } else { /* no errors */ } }
static int mooa_http_parser_execute(lua_State *L) { size_t nparsed; luaL_checkudata(L, 1, "mooa_http_parser"); luaL_checkstring(L, 2); http_parser *parser = lua_touserdata(L, 1); size_t length; const char *data = lua_tolstring(L, 2, &length); parser->data = L; nparsed = http_parser_execute(parser, &mooa_http_parser_settings, data, length); if (nparsed != length) { enum http_errno error = HTTP_PARSER_ERRNO(parser); return luaL_error(L, "HTTP parsing failed (error %d): %s", error, http_errno_name(error)); } return 0; }
static int parser_parse_request(struct http_server_ctx *ctx, struct net_buf *rx) { int rc; ctx->field_values_ctr = 0; rc = http_parser_execute(&ctx->parser, &ctx->parser_settings, rx->data, rx->len); if (rc < 0) { printf("[%s:%d] http_parser_execute: %s\n\t%s\n", __func__, __LINE__, http_errno_name(ctx->parser.http_errno), http_errno_description(ctx->parser.http_errno)); rc = -EINVAL; goto exit_routine; } exit_routine: return rc; }
int processhttp(char* data, int http_length) { if(!start) start = time(NULL); printf("t=%d\n",t++); _init_c_info(); http_parser_settings settings; size_t nparsed; memset(&settings, 0, sizeof(settings)); settings.on_url = on_url; settings.on_header_field = on_header_field; settings.on_header_value = on_header_value; settings.on_body = on_body; http_parser parser; http_parser_init(&parser, HTTP_REQUEST); nparsed = http_parser_execute(&parser, &settings, data, (size_t)http_length); http.method = parser.method; end = time(NULL); printf("%fms\n", difftime(end, start)/t); //test _print_c_info(); if (nparsed != (size_t)http_length) { printf( "Error: %s (%s)\n", http_errno_description(HTTP_PARSER_ERRNO(&parser)), http_errno_name(HTTP_PARSER_ERRNO(&parser))); } if(content_length != con_len && http.method == 3 && http_length < 4096) { memcpy(http.content, data, http_length); return FALSE; } return TRUE; }
void *hpcd_server_handle_connection ( void *arg ) { char buffer[80 * 1024]; int n, newsockfd = * ( ( int * ) arg ); free ( arg ); /** Set time limit on execution of thread **/ clock_t begin, end; double time_spent = 0; begin = clock(); http_parser_settings settings; hpcd_server_http_request *request_container = (hpcd_server_http_request *) malloc ( sizeof ( hpcd_server_http_request ) ); request_container->complete = 0; memset ( &settings, 0, sizeof ( settings ) ); settings.on_url = hpcd_server_handle_on_url; settings.on_message_complete = hpcd_server_handle_on_message_complete; settings.on_headers_complete = hpcd_server_handle_on_headers_complete; settings.on_header_field = hpcd_server_handle_on_header_field; settings.on_header_value = hpcd_server_handle_on_header_value; /* Clear the buffer */ bzero ( buffer, 80 * 1024 ); http_parser *parser = malloc ( sizeof ( http_parser ) ); http_parser_init ( parser, HTTP_REQUEST ); request_container->sock_fd = &newsockfd; parser->data = request_container; while(!request_container->complete) { /* Reading from buffer */ //printf ( "Reading from buffer: %d\n ", request_container->complete ); n = recv ( newsockfd, buffer, 80 * 1024, 0 ); if ( n < 0 ) { printf ( "ERROR reading from socket %d", n ); exit ( 1 ); } //printf("captured n %d\n", n); size_t nparsed = http_parser_execute ( parser, &settings, buffer, n ); if ( nparsed != ( size_t ) n ) { fprintf ( stderr, "Error: %s (%s)\n", http_errno_description ( HTTP_PARSER_ERRNO ( parser ) ), http_errno_name ( HTTP_PARSER_ERRNO ( parser ) ) ); } bzero ( buffer, n ); /** Thread execution time **/ end = clock(); if (((double)(end - begin) / CLOCKS_PER_SEC) > 60) { printf("Request timed out\n"); close(*request_container->sock_fd); break; } } printf("Loop Closed\n"); return NULL; }
int main(int argc, char* argv[]) { enum http_parser_type file_type; if (argc != 3) { usage(argv[0]); } char* type = argv[1]; if (type[0] != '-') { usage(argv[0]); } switch (type[1]) { /* in the case of "-", type[1] will be NUL */ case 'r': file_type = HTTP_RESPONSE; break; case 'q': file_type = HTTP_REQUEST; break; case 'b': file_type = HTTP_BOTH; break; default: usage(argv[0]); } char* filename = argv[2]; FILE* file = fopen(filename, "r"); if (file == NULL) { perror("fopen"); goto fail; } fseek(file, 0, SEEK_END); long file_length = ftell(file); if (file_length == -1) { perror("ftell"); goto fail; } fseek(file, 0, SEEK_SET); char* data = malloc(file_length); if (fread(data, 1, file_length, file) != (size_t)file_length) { fprintf(stderr, "couldn't read entire file\n"); free(data); goto fail; } http_parser_settings settings; memset(&settings, 0, sizeof(settings)); settings.on_message_begin = on_message_begin; settings.on_url = on_url; settings.on_header_field = on_header_field; settings.on_header_value = on_header_value; settings.on_headers_complete = on_headers_complete; settings.on_body = on_body; settings.on_message_complete = on_message_complete; http_parser parser; http_parser_init(&parser, file_type); size_t nparsed = http_parser_execute(&parser, &settings, data, file_length); free(data); if (nparsed != (size_t)file_length) { fprintf(stderr, "Error: %s (%s)\n", http_errno_description(HTTP_PARSER_ERRNO(&parser)), http_errno_name(HTTP_PARSER_ERRNO(&parser))); goto fail; } return EXIT_SUCCESS; fail: fclose(file); return EXIT_FAILURE; }
const char *http_tokenizer_error_name(http_tokenizer* tokenizer) { return http_errno_name(tokenizer->parser.http_errno); }
/* * The passive http extraction code works by alternately parsing the * passively reconstructed request and response streams. The same callback * (below) is used to drive the parsing of each stream. Parsing begins with * the request stream, and once a complete request has been parsed, the * parser and read watcher for the request stream are paused and the parser * and read watcher for the response stream are activated. Once an entire * response is parsed, the parser and read watcher for the response stream * are paused, and the parser and read watcher for the request stream are * activated. Along the way, response bodies that match the supplied list * of content types are extracted to files. * * This is example code whose purpose is to demonstrate upper layer protocol * processing using libuinet passive sockets functionality. Little to no * attempt is made to deal with a number of ugly realities involved in * robustly parsing http streams in the wild. */ static void passive_extract_cb(struct ev_loop *loop, ev_uinet *w, int revents) { struct connection_context *conn = (struct connection_context *)w->data; struct uinet_iovec iov; struct uinet_uio uio; int max_read; int read_size; int bytes_read; int error; int flags; size_t nparsed; max_read = uinet_soreadable(w->so, 0); if (max_read <= 0) { /* the watcher should never be invoked if there is no error and there no bytes to be read */ assert(max_read != 0); /* * There are no more complete requests/responses to be had, shut everything down. */ if (conn->verbose) printf("%s: can't read, closing\n", conn->label); goto err; } else { read_size = imin(max_read, conn->buffer_size - conn->buffer_index); uio.uio_iov = &iov; iov.iov_base = &conn->buffer[conn->buffer_index]; iov.iov_len = read_size; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = read_size; flags = UINET_MSG_HOLE_BREAK; error = uinet_soreceive(w->so, NULL, &uio, &flags); if (0 != error) { printf("%s: read error (%d), closing\n", conn->label, error); goto err; } if (flags & UINET_MSG_HOLE_BREAK) { printf("%s: hole in data, closing connections\n", conn->label); goto err; } bytes_read = read_size - uio.uio_resid; conn->buffer_count += bytes_read; conn->bytes_read += bytes_read; do { passive_extract_parse_buffer(conn); if (HTTP_PARSER_ERRNO(conn->parser) != HPE_OK) { if (HTTP_PARSER_ERRNO(conn->parser) == HPE_PAUSED) { if (conn->verbose > 1) printf("%s: completed parsing request or response\n", conn->label); http_parser_pause(conn->peer->parser, 0); passive_extract_parse_buffer(conn->peer); if (HTTP_PARSER_ERRNO(conn->peer->parser) == HPE_OK) { if (conn->verbose > 1) printf("%s: peer needs more data\n", conn->label); /* Peer parser needs more data */ ev_uinet_stop(conn->server->loop, &conn->watcher); ev_uinet_start(conn->server->loop, &conn->peer->watcher); break; } else if (HTTP_PARSER_ERRNO(conn->peer->parser) != HPE_PAUSED) { printf("Peer parse failure %s, closing connections\n", http_errno_name(HTTP_PARSER_ERRNO(conn->peer->parser))); goto err; } else { if (conn->verbose > 1) printf("%s: peer completed parsing request or response\n", conn->label); /* * The other parser has paused, so it's time for us to continue * parsing/receiving. */ http_parser_pause(conn->parser, 0); } } else { printf("Parse failure %s, closing connections\n", http_errno_name(HTTP_PARSER_ERRNO(conn->parser))); goto err; } } } while (conn->buffer_count); } return; err: /* * Deliver EOS to each parser. If a parser is paused or otherwise * in an error state, no work will be done. The main reason for * doing this is to correctly handle the case where response parsing * requires an EOS to complete. Under such circumstances, one of * the calls below will complete the work. */ http_parser_execute(conn->parser, conn->parser_settings, NULL, 0); http_parser_execute(conn->peer->parser, conn->peer->parser_settings, NULL, 0); destroy_conn(conn->peer); destroy_conn(conn); }
static void on_read(uv_stream_t* stream, ssize_t nread, uv_buf_t buf) { printf("on_read: <<%.*s\n>>", (int) nread, buf.base); if (nread < 0) { if (buf.base) free(buf.base); printf("uv_shutdown\n"); uv_shutdown_t* req = (uv_shutdown_t*) malloc(sizeof *req); uv_shutdown(req, stream, after_shutdown); return; } if (nread == 0) { /* Everything OK, but nothing read. */ free(buf.base); return; } if (stream->data == NULL) internal_error("stream->data is null in on_read"); Connection* connection = (Connection*) stream->data; if (connection->server != NULL && connection->server->serverType == WEBSOCK) { http_parser* parser = &connection->parser; printf("parsing as http\n"); int parsed = http_parser_execute(parser, &connection->server->parser_settings, buf.base, nread); if (parser->upgrade) { connection->state = WEBSOCK_DUPLEX_STATE; return; } if (HTTP_PARSER_ERRNO(parser) != HPE_OK) { printf("http parse error: [%s] %s\n", http_errno_name(HTTP_PARSER_ERRNO(parser)), http_errno_description(HTTP_PARSER_ERRNO(parser)) ); // handle parse error return; } if (parsed < nread) { printf("TODO: Handle second message?\n"); } } else { circa_string_append_len(&connection->incomingStr, buf.base, nread); try_parse(&connection->incomingStr, &connection->incomingMsgs); } free(buf.base); }
const char * http_request_get_error_name(http_request_t *request) { assert(request); return http_errno_name(HTTP_PARSER_ERRNO(&request->parser)); }
const char *hm_parser_error_name(HMParser *hm_parser) { return http_errno_name(hm_parser->parser.http_errno); }