static ngx_int_t ngx_http_parallel_handler(ngx_http_request_t *r) { ngx_http_parallel_loc_conf_t *conf; ngx_http_parallel_ctx_t* ctx; ngx_http_range_t range = { 0, 0, 0 }; ngx_uint_t header_in_count; ngx_uint_t fiber_count; ngx_uint_t i; ngx_flag_t key_inited = 0; ngx_int_t rc; u_char key[NGX_FIXED_BUFFER_CACHE_KEY_SIZE]; size_t initial_chunk_size; u_char* p; off_t cached_response_length = -1; off_t expected_response_length = -1; // validate method if (!(r->method & (NGX_HTTP_GET | NGX_HTTP_HEAD))) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_handler: " "unsupported method %ui", r->method); return NGX_HTTP_NOT_ALLOWED; } // discard request body, since we don't need it here rc = ngx_http_discard_request_body(r); if (rc != NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: " "ngx_http_discard_request_body failed %i", rc); return rc; } // get fiber count and initial chunk size conf = ngx_http_get_module_loc_conf(r, ngx_http_parallel_module); fiber_count = conf->fiber_count; initial_chunk_size = conf->min_chunk_size; if (r->method == NGX_HTTP_HEAD) { fiber_count = 1; } else if (r->headers_in.range != NULL) { rc = ngx_http_parallel_range_parse(r, &range); if (rc != NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: " "ngx_http_parallel_range_parse failed \"%V\", " "proxying the request as is", &r->headers_in.range->value); fiber_count = 1; } else { range.is_range_request = 1; if (range.end != 0) { expected_response_length = range.end - range.start; } } } if (expected_response_length < 0 && fiber_count != 1 && conf->content_length_cache_zone != NULL) { key_inited = 1; ngx_http_parallel_calculate_key(key, r); ngx_fixed_buffer_cache_fetch( conf->content_length_cache_zone, key, (u_char*)&cached_response_length); expected_response_length = cached_response_length; } if (expected_response_length >= 0) { // optimize the initial chunk size according to the response length if (expected_response_length <= (off_t)(fiber_count * conf->min_chunk_size)) { initial_chunk_size = conf->min_chunk_size; } else if (expected_response_length >= (off_t)(fiber_count * conf->max_chunk_size)) { initial_chunk_size = conf->max_chunk_size; } else { initial_chunk_size = DIV_CEIL(expected_response_length, fiber_count); } // optimize the fiber count according to the response length if (expected_response_length == 0) { fiber_count = 1; } else if (expected_response_length < (off_t)(fiber_count * initial_chunk_size)) { fiber_count = DIV_CEIL(expected_response_length, initial_chunk_size); } } // allocate context ctx = ngx_pcalloc(r->pool, sizeof(*ctx) + sizeof(ngx_http_parallel_fiber_ctx_t) * (fiber_count - 1)); if (ctx == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: ngx_pcalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ctx->error_code = NGX_AGAIN; ctx->initial_chunk_size = initial_chunk_size; ctx->chunk_size = initial_chunk_size; ctx->range = range; ctx->fiber_count = fiber_count; ctx->initial_requested_size = fiber_count * initial_chunk_size; ctx->cached_response_length = cached_response_length; if (key_inited) { ngx_memcpy(ctx->key, key, sizeof(ctx->key)); ctx->key_inited = 1; } ngx_http_set_ctx(r, ctx, ngx_http_parallel_module); // count the number of input headers header_in_count = ngx_http_parallel_list_get_count(&r->headers_in.headers); ctx->original_headers_in = r->headers_in; // build the subrequest uri ctx->sr_uri.data = ngx_pnalloc(r->pool, conf->uri_prefix.len + r->uri.len + 1); if (ctx->sr_uri.data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: ngx_pnalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } p = ngx_copy(ctx->sr_uri.data, conf->uri_prefix.data, conf->uri_prefix.len); p = ngx_copy(p, r->uri.data, r->uri.len); *p = '\0'; ctx->sr_uri.len = p - ctx->sr_uri.data; // init and start the fibers for (i = 0; i < fiber_count; i++) { rc = ngx_http_parallel_init_fiber( r, header_in_count, fiber_count == 1, &ctx->fibers[i]); if (rc != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } rc = ngx_http_parallel_start_fiber(r, &ctx->fibers[i], i); if (rc != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } } ctx->next_request_chunk = fiber_count; return NGX_AGAIN; }
static ngx_int_t ngx_http_parallel_init_chunks( ngx_http_parallel_ctx_t *ctx, ngx_http_request_t *r, ngx_http_headers_out_t* headers_out) { ngx_http_parallel_loc_conf_t *conf; ngx_table_elt_t *h; ngx_str_t* content_range; uint64_t missing_chunks_mask; off_t instance_length; off_t remaining_length; off_t content_length; ngx_int_t rc; // find the instance length content_range = ngx_http_parallel_header_get_value( &headers_out->headers, &content_range_name, content_range_hash); if (content_range == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_init_chunks: failed to get content-range header"); return NGX_HTTP_BAD_GATEWAY; } instance_length = ngx_http_parallel_get_instance_length( content_range, r->connection->log); if (instance_length < 0) { return NGX_HTTP_BAD_GATEWAY; } conf = ngx_http_get_module_loc_conf(r, ngx_http_parallel_module); // update cache if (conf->content_length_cache_zone != NULL && instance_length != ctx->cached_response_length) { if (!ctx->key_inited) { ngx_http_parallel_calculate_key(ctx->key, r); } ngx_fixed_buffer_cache_store( conf->content_length_cache_zone, ctx->key, (u_char*)&instance_length, 1); } // find the content length content_length = instance_length; if (ctx->range.end != 0 && ctx->range.end < content_length) { content_length = ctx->range.end; } if (content_length < ctx->range.start) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_init_chunks: " "unexpected, content length %O less than range start %O", content_length, ctx->range.start); return NGX_HTTP_BAD_GATEWAY; } content_length -= ctx->range.start; // find the chunk size and count if (content_length <= (off_t)ctx->initial_requested_size) { ctx->chunk_count = DIV_CEIL(content_length, ctx->chunk_size); ctx->last_chunk_size = content_length + ctx->chunk_size - ctx->chunk_count * ctx->chunk_size; } else { remaining_length = content_length - ctx->initial_requested_size; if (remaining_length <= (off_t)(ctx->fiber_count * conf->min_chunk_size)) { ctx->chunk_size = conf->min_chunk_size; } else if (remaining_length >= (off_t)(ctx->fiber_count * conf->max_chunk_size)) { ctx->chunk_size = conf->max_chunk_size; } else { ctx->chunk_size = DIV_CEIL(remaining_length, ctx->fiber_count); } ctx->chunk_count = DIV_CEIL(remaining_length, ctx->chunk_size); ctx->last_chunk_size = remaining_length + ctx->chunk_size - ctx->chunk_count * ctx->chunk_size; ctx->chunk_count += ctx->fiber_count; } ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: " "chunk count %uL chunk size %uz last chunk size %uz", ctx->chunk_count, ctx->chunk_size, ctx->last_chunk_size); // check for missing chunks missing_chunks_mask = ULLONG_MAX; if (ctx->chunk_count < 64) { missing_chunks_mask = ((1ULL << ctx->chunk_count) - 1); } if ((ctx->missing_chunks & missing_chunks_mask) != 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_init_chunks: " "missing chunks 0x%uxL chunk count %uL", ctx->missing_chunks, ctx->chunk_count); return NGX_HTTP_BAD_GATEWAY; } // initialize the chunks array (null terminated) ctx->chunks = ngx_pcalloc(r->pool, sizeof(ctx->chunks[0]) * (ctx->chunk_count + 1)); if (ctx->chunks == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pcalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } // save headers for consistency check if (conf->consistency_check_etag) { h = headers_out->etag; if (h != NULL) { ctx->etag.data = ngx_pstrdup(r->pool, &h->value); if (ctx->etag.data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pstrdup failed (1)"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ctx->etag.len = h->value.len; } } if (conf->consistency_check_last_modified) { h = headers_out->last_modified; if (h != NULL) { ctx->last_modified.data = ngx_pstrdup(r->pool, &h->value); if (ctx->last_modified.data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pstrdup failed (2)"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ctx->last_modified.len = h->value.len; } } // build the response headers r->headers_out = *headers_out; if (ctx->range.is_range_request) { // leave the status as 206 and update the content range content_range->data = ngx_pnalloc( r->pool, sizeof(CONTENT_RANGE_FORMAT) + 3 * NGX_OFF_T_LEN); if (content_range->data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pnalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } content_range->len = ngx_sprintf(content_range->data, CONTENT_RANGE_FORMAT, ctx->range.start, ctx->range.start + content_length - 1, instance_length) - content_range->data; } else { // change status to 200 and clear the content range r->headers_out.status = NGX_HTTP_OK; r->headers_out.status_line.len = 0; ngx_http_parallel_header_clear_value( &r->headers_out.headers, &content_range_name, content_range_hash); } ngx_http_clear_content_length(r); r->headers_out.content_length_n = content_length; // send the response headers rc = ngx_http_send_header(r); if (rc == NGX_ERROR || rc > NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_http_send_header failed %i", rc); return rc; } return NGX_OK; }
/* TODO */ u32 run_testfile(struct Lexer *lexer, struct Printer *p) { u32 err = 0; u32 lineno = 1; /* TODO Arena */ static u32 c2s[256]; static u32 eqc_bitmat[DIV_CEIL(8 * ARRAY_COUNT(c2s), BITSOF(u64))]; struct regex_eqc eqc; static struct regex_nfaparser_stackpost nfaparser_stack[4096]; static struct NFA_state nfa_states[4*4096]; struct NFA nfa; u32 nfa_fin_state; struct regex_nfaparser parser; u32 regex_lex_state = REGEX_DFA_START_STATE; static u32 dfa_acc[POW2(20)]; static u32 dfa_mat[ARRAY_COUNT(dfa_acc) * ARRAY_COUNT(c2s)]; struct DFA_mat dfa; struct nfa2dfa_data nfa2dfa_dat; static u32 nfa2dfa_dat_epsstack[ARRAY_COUNT(nfa_states)]; static u32 nfa2dfa_dat_visited[DIV_CEIL(ARRAY_COUNT(nfa_states), BITSOF(u32))]; static u32 nfa2dfa_dat_setoff[ARRAY_COUNT(dfa_acc)]; static u32 nfa2dfa_dat_hashtable[ARRAY_COUNT(dfa_acc)]; static u32 nfa2dfa_dat_stack[ARRAY_COUNT(dfa_acc)]; static u32 nfa2dfa_dat_setdata[16 * ARRAY_COUNT(dfa_acc)]; { nfa.states = nfa_states; nfa.cap = ARRAY_COUNT(nfa_states); nfa.len = 0; nfa.start_state = MAX_U32; eqc.bitmat = eqc_bitmat; eqc.width = ARRAY_COUNT(c2s); eqc.cap = 8; eqc.len = 0; parser.nfa = &nfa; parser.stack = nfaparser_stack; parser.stack_cap = ARRAY_COUNT(nfaparser_stack); parser.stack_len = 0; parser.eqc = &eqc; assert(nfa.len < nfa.cap); nfa_fin_state = nfa.len++; nfa.states[nfa_fin_state].c = NFA_FIN; nfa.states[nfa_fin_state].val.fin = 0; parser.stack[parser.stack_len].end = nfa_fin_state; parser.stack[parser.stack_len].start = MAX_U32; parser.stack[parser.stack_len].seq_first = MAX_U32; parser.stack[parser.stack_len].seq_last = MAX_U32; parser.stack[parser.stack_len].unary_first = MAX_U32; parser.stack[parser.stack_len].unary_last = MAX_U32; parser.stack[parser.stack_len].eqc = MAX_U32; /* no need to set eqc_* if eqc == MAX_U32 */ ++parser.stack_len; } while (1) { u32 acc_val; u32 c = lexer->c; switch (c) { case '\n': c = READER_EOF; break; case READER_EOF: return PUTERR("error: unexpected EOF"), 1; case READER_ERROR: return PUTERRNO("read"), 1; } regex_lex_state = regex_dfa_delta(regex_lex_state, c); acc_val = regex_dfa_acc(regex_lex_state); if (REGEX_DFA_ERROR_STATE(regex_lex_state)) { PUTERR("error: regex_lex"); return 1; } if (acc_val) { struct regex_token tok; if (acc_val - 1 < REGEX_TOKEN_TYPE_COUNT) { tok.type = acc_val - 1; tok.c = 0; /* tok.c may be whatever */ assert(tok.type != REGEX_TOK_C); } else { tok.type = REGEX_TOK_C; tok.c = (acc_val - 1) - REGEX_TOKEN_TYPE_COUNT; } regex_lex_state = REGEX_DFA_START_STATE; if (regex_nfaparse(&parser, tok)) { (void)writer_puterr_prefix(stde(), __FILE__, __LINE__); (void)writer_puts(stde(), "error: "); (void)writer_puts(stde(), regex_nfaparse_status2string(parser.error)); (void)writer_putc(stde(), '\n'); (void)writer_flush(stde()); return 1; } } if (lexer->c == '\n') { lexer_next(lexer); break; } lexer_next(lexer); } ++lineno; if (parser.stack_len) { PUTERR("error: regex_parse unexpected end of regex"); return 1; } nfa.start_state = parser.start; if (0&&DEBUG) { pr_str(p, "nfalen = "); pr_u32(p, nfa.len); pr_str(p, "\n"); } if (0&&DEBUG) { p->err = p->err || nfa_print_dot(&nfa, p->writer, nfa2dfa_dat_epsstack, nfa2dfa_dat_visited); } { dfa.mat = dfa_mat; dfa.width = ARRAY_COUNT(c2s); dfa.tot_elems = ARRAY_COUNT(dfa_mat); dfa.len = 0; dfa.acc = dfa_acc; dfa.acc_cap = ARRAY_COUNT(dfa_acc); STRUCT_ZERO(nfa2dfa_dat); nfa2dfa_dat.nfa = &nfa; nfa2dfa_dat.dfa = &dfa; nfa2dfa_dat.eqc = &eqc; nfa2dfa_dat.epsstack = nfa2dfa_dat_epsstack; nfa2dfa_dat.visited = nfa2dfa_dat_visited; nfa2dfa_dat.setoff = nfa2dfa_dat_setoff; nfa2dfa_dat.hashtable = nfa2dfa_dat_hashtable; nfa2dfa_dat.stack = nfa2dfa_dat_stack; nfa2dfa_dat.stack_cap = ARRAY_COUNT(nfa2dfa_dat_stack); nfa2dfa_dat.setdata = nfa2dfa_dat_setdata; nfa2dfa_dat.setdata_cap = ARRAY_COUNT(nfa2dfa_dat_setdata); } switch (nfa2dfa(&nfa2dfa_dat)) { case NFA2DFA_STATUS_OK: break; case NFA2DFA_STATUS_OUT_OF_SETDATA_MEM: return PUTERR("error: nfa2dfa: out of setdata mem"), 1; case NFA2DFA_STATUS_OUT_OF_STACK_MEM: return PUTERR("error: nfa2dfa: out of stack mem"), 1; case NFA2DFA_STATUS_OUT_OF_DFA_MEM: return PUTERR("error: nfa2dfa: out of dfa mem"), 1; case NFA2DFA_STATUS_BROKEN: return assert(0), PUTERR("error: nfa2dfa: broken"), 1; } if (1) { STATIC_ASSERT_LOCAL(ARRAY_COUNT(nfa2dfa_dat_epsstack) >= ARRAY_COUNT(c2s), can_reuse_epsstack_in_c2s); dfa_c2s(&dfa, nfa2dfa_dat_epsstack, c2s); } else { u32 i; for (i = 0; i < ARRAY_COUNT(c2s); ++i) { c2s[i] = i; } } if (1&&DEBUG) { pr_str(p, "dfalen = "); pr_u32(p, dfa.len); pr_str(p, "\n"); pr_str(p, "dfawidth = "); pr_u32(p, dfa.width); pr_str(p, "\n"); } if (1&&DEBUG) { p->err = p->err || dfa_print_dot(&dfa, p->writer, NULL); } if (1) { dfa_minimize(&dfa, nfa2dfa_dat_setoff, nfa2dfa_dat_stack, nfa2dfa_dat_hashtable); /* do c2s again */ if (0) { u32 i; u32 *extra_c2s = nfa2dfa_dat_setoff; STATIC_ASSERT_LOCAL(ARRAY_COUNT(nfa2dfa_dat_epsstack) >= ARRAY_COUNT(c2s), can_reuse_epsstack_in_min_c2s); STATIC_ASSERT_LOCAL(ARRAY_COUNT(nfa2dfa_dat_setoff) >= ARRAY_COUNT(c2s), can_reuse_setoff_in_min_c2s); dfa_c2s(&dfa, nfa2dfa_dat_epsstack, extra_c2s); for (i = 0; i < ARRAY_COUNT(c2s); ++i) { c2s[i] = extra_c2s[c2s[i]]; } } if (1&&DEBUG) { pr_str(p, "mindfalen = "); pr_u32(p, dfa.len); pr_str(p, "\n"); pr_str(p, "mindfawidth = "); pr_u32(p, dfa.width); pr_str(p, "\n"); } if (1&&DEBUG) { p->err = p->err || dfa_print_dot(&dfa, p->writer, NULL); } } lexer_skip_space(lexer); switch (lexer->c) { case '%': lexer_next(lexer); break; case READER_EOF: return PUTERR("error: unexpected EOF"), 1; case READER_ERROR: return PUTERRNO("read"), 1; default: return PUTERR("error: expected '%'"), 1; } while (lexer->c != '\n') { if (lexer->c == READER_EOF) { return PUTERR("error: unexpected EOF"), 1; } else if (lexer->c == READER_ERROR) { return PUTERRNO("read"), 1; } else if (!lexer_is_space(lexer->c)) { return PUTERR("error: expected whitespace or EOL"), 1; } lexer_next(lexer); } lexer_next(lexer); ++lineno; { /* strings expected to be accepted */ while (lexer->c != '%') { u32 state = dfa.start_state; while (lexer->c != '\n') { if (lexer->c == READER_EOF) { return PUTERR("error: unexpected EOF"), 1; } else if (lexer->c == READER_ERROR) { return PUTERRNO("read"), 1; } state = dfa_delta(&dfa, state, c2s[lexer->c]); lexer_next(lexer); } if (!(state < dfa.len && dfa.acc[state])) { err = 1; pr_str(p, "fail: line "); pr_u32(p, lineno); pr_str(p, ": expected ACC got REJ\n"); } ++lineno; lexer_next(lexer); } lexer_next(lexer); } while (lexer->c != '\n') { if (lexer->c == READER_EOF) { return PUTERR("error: unexpected EOF"), 1; } else if (lexer->c == READER_ERROR) { return PUTERRNO("read"), 1; } else if (!lexer_is_space(lexer->c)) { return PUTERR("error: expected whitespace or EOL"), 1; } lexer_next(lexer); } lexer_next(lexer); ++lineno; { /* strings expected to be rejected */ while (lexer->c != READER_EOF) { u32 state = dfa.start_state; while (lexer->c != '\n' && lexer->c != READER_EOF) { if (lexer->c == READER_ERROR) { return PUTERRNO("read"), 1; } if (lexer->c == '%') { return PUTERR("error: % in pattern"), 1; } state = dfa_delta(&dfa, state, c2s[lexer->c]); lexer_next(lexer); } if (state < dfa.len && dfa.acc[state]) { err = 1; pr_str(p, "fail: line "); pr_u32(p, lineno); pr_str(p, ": expected REJ got ACC\n"); } if (lexer->c != READER_EOF) { lexer_next(lexer); ++lineno; } } } return err; }