static ngx_int_t ngx_child_request_header_filter(ngx_http_request_t *r) { ngx_child_request_context_t* ctx; ngx_http_request_t* pr = r->parent; // if the request is not a child of a vod request, ignore if (pr == NULL || pr->header_sent || ngx_http_get_module_ctx(pr, ngx_http_vod_module) == NULL) { return ngx_http_next_header_filter(r); } // if the request is not a vod request or it's in memory, ignore ctx = ngx_http_get_module_ctx(r, ngx_http_vod_module); if (ctx == NULL || is_in_memory(ctx)) { return ngx_http_next_header_filter(r); } if (r->headers_out.status != 0) { // send the parent request headers pr->headers_out = r->headers_out; ctx->send_header_result = ngx_http_send_header(pr); } else { // no status code, this can happen in case the proxy module got an invalid status line // and assumed it's HTTP/0.9, just don't send any header and close the connection when done ctx->dont_send_header = 1; pr->keepalive = 0; } return ngx_http_next_header_filter(r); }
void envTest() { ups_key_t key = {}; ups_record_t rec = {}; RecnoType recno; key.data = &recno; key.size = sizeof(recno); key.flags = UPS_KEY_USER_ALLOC; teardown(); if (sizeof(RecnoType) == 4) require_create(m_flags, nullptr, UPS_RECORD_NUMBER32, nullptr); else require_create(m_flags, nullptr, UPS_RECORD_NUMBER64, nullptr); REQUIRE(0 == ups_db_insert(db, 0, &key, &rec, 0)); REQUIRE((RecnoType)1ull == *(RecnoType *)key.data); if (!is_in_memory()) { reopen(); REQUIRE(0 == ups_db_insert(db, 0, &key, &rec, 0)); REQUIRE((RecnoType)2ull == *(RecnoType *)key.data); } }
static void ngx_child_request_wev_handler(ngx_http_request_t *r) { ngx_child_request_context_t* ctx; ngx_http_upstream_t *u; ngx_int_t rc; off_t content_length; ctx = ngx_http_get_module_ctx(r, ngx_http_vod_module); // restore the write event handler r->write_event_handler = ctx->original_write_event_handler; ctx->original_write_event_handler = NULL; // restore the original context ngx_http_set_ctx(r, ctx->original_context, ngx_http_vod_module); // get the completed upstream u = ctx->upstream; ctx->upstream = NULL; if (u == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_child_request_wev_handler: unexpected, upstream is null"); return; } // code taken from echo-nginx-module to work around nginx subrequest issues if (r == r->connection->data && r->postponed) { if (r->postponed->request) { r->connection->data = r->postponed->request; #if defined(nginx_version) && nginx_version >= 8012 ngx_http_post_request(r->postponed->request, NULL); #else ngx_http_post_request(r->postponed->request); #endif } else { ngx_http_output_filter(r, NULL); } } // get the final error code rc = ctx->error_code; if (rc == NGX_OK && is_in_memory(ctx)) { if (u->headers_in.status_n != NGX_HTTP_OK && u->headers_in.status_n != NGX_HTTP_PARTIAL_CONTENT) { if (u->headers_in.status_n != 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_child_request_wev_handler: upstream returned a bad status %ui", u->headers_in.status_n); } else { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_child_request_wev_handler: failed to get upstream status"); } rc = NGX_HTTP_BAD_GATEWAY; } else if (u->length != 0 && u->length != -1 && !u->headers_in.chunked) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_child_request_wev_handler: upstream connection was closed with %O bytes left to read", u->length); rc = NGX_HTTP_BAD_GATEWAY; } } else if (rc == NGX_ERROR) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_child_request_wev_handler: got error -1, changing to 502"); rc = NGX_HTTP_BAD_GATEWAY; } if (ctx->send_header_result != NGX_OK) { rc = ctx->send_header_result; } // get the content length if (is_in_memory(ctx)) { content_length = u->buffer.last - u->buffer.pos; } else if (u->state != NULL) { content_length = u->state->response_length; } else { content_length = 0; } if (ctx->callback != NULL) { // notify the caller ctx->callback(ctx->callback_context, rc, &u->buffer, content_length); } else { if (r->header_sent || ctx->dont_send_header) { // flush the buffer and close the request ngx_http_send_special(r, NGX_HTTP_LAST); ngx_http_finalize_request(r, NGX_OK); } else { // finalize the request ngx_http_finalize_request(r, rc); } } }
ngx_int_t ngx_child_request_start( ngx_http_request_t *r, ngx_child_request_callback_t callback, void* callback_context, ngx_str_t* internal_location, ngx_child_request_params_t* params, ngx_buf_t* response_buffer) { ngx_child_request_context_t* child_ctx; ngx_http_post_subrequest_t *psr; ngx_http_request_t *sr; ngx_uint_t flags; ngx_str_t uri; ngx_int_t rc; u_char* p; // create the child context child_ctx = ngx_pcalloc(r->pool, sizeof(*child_ctx)); if (child_ctx == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_child_request_start: ngx_pcalloc failed"); return NGX_ERROR; } child_ctx->callback = callback; child_ctx->callback_context = callback_context; child_ctx->response_buffer = response_buffer; // build the subrequest uri uri.data = ngx_pnalloc(r->pool, internal_location->len + params->base_uri.len + 1); if (uri.data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_child_request_start: ngx_palloc failed (2)"); return NGX_ERROR; } p = ngx_copy(uri.data, internal_location->data, internal_location->len); p = ngx_copy(p, params->base_uri.data, params->base_uri.len); *p = '\0'; uri.len = p - uri.data; // create the subrequest psr = ngx_palloc(r->pool, sizeof(ngx_http_post_subrequest_t)); if (psr == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_child_request_start: ngx_palloc failed (3)"); return NGX_ERROR; } psr->handler = ngx_child_request_finished_handler; psr->data = r; if (is_in_memory(child_ctx)) { if (ngx_list_init(&child_ctx->upstream_headers, r->pool, 8, sizeof(ngx_table_elt_t)) != NGX_OK) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_child_request_start: ngx_list_init failed"); return NGX_ERROR; } flags = NGX_HTTP_SUBREQUEST_WAITED | NGX_HTTP_SUBREQUEST_IN_MEMORY; } else { flags = NGX_HTTP_SUBREQUEST_WAITED; } rc = ngx_http_subrequest(r, &uri, ¶ms->extra_args, &sr, psr, flags); if (rc == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_child_request_start: ngx_http_subrequest failed %i", rc); return rc; } // set the context of the subrequest ngx_http_set_ctx(sr, child_ctx, ngx_http_vod_module); // change the write_event_handler in order to inject the response buffer into the upstream // (this can be done only after the proxy module allocates the upstream) if (is_in_memory(child_ctx)) { sr->write_event_handler = ngx_child_request_initial_wev_handler; } // Note: ngx_http_subrequest always sets the subrequest method to GET if (params->method == NGX_HTTP_HEAD) { sr->method = NGX_HTTP_HEAD; sr->method_name = ngx_http_vod_head_method; } // build the request headers rc = ngx_child_request_copy_headers(r, params, &sr->headers_in, &r->headers_in); if (rc != NGX_OK) { return rc; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_child_request_start: completed successfully sr=%p", sr); return NGX_AGAIN; }
/** * @brief Use a timeout_ms value of 0 to wait until a page appears or * the tuple_fifo is closed (normal behavior). Use a negative * timeout_ms value to avoid waiting. If the tuple_fifo contains no * pages, we return immediately with a value of 0. Use a positive * timeout_ms value to wait for a max length of time. * * @return 1 if we got a page. -1 if the tuple_fifo has been * closed. If 'timeout_ms' is negative and this method returns 0, it * means the tuple_fifo is empty. If the timeout_ms value is positive * and we return 0, it means we timed out. */ int tuple_fifo::_get_read_page(int timeout_ms) { // * * * BEGIN CRITICAL SECTION * * * critical_section_t cs(_lock); _termination_check(); /* Free the page so the writer can use it. */ if (is_in_memory() && (_read_page != SENTINEL_PAGE)) { /* We are still maintaining an in-memory page list from which we are pulling pages. We release them to _free_pages as we are done with them. */ _read_page->clear(); _free_pages.push_back(_read_page.release()); _set_read_page(SENTINEL_PAGE); } /* If 'wait_on_empty' and the buffer is currently empty, we must wait for space to open up. Once we start waiting we continue waiting until either space for '_threshold' pages is available OR the writer has invoked send_eof() or terminate(). */ for(size_t t=1; (timeout_ms >= 0) && !is_done_writing() && (_available_fifo_reads() < t); t = _threshold) { /* We are to either wait for a page or wait for timeout_ms. */ if(!wait_for_writer(timeout_ms)) /* Timed out! */ break; _termination_check(); } TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "available reads = %d\n", (int)_available_fifo_reads()); if(_available_fifo_reads() == 0) { /* If we are here, we exited the loop above because one of the other conditions failed. We either noticed that the tuple_fifo has been closed or we've timed out. */ if(is_done_writing()) { /* notify caller that the tuple_fifo is closed */ TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "Returning -1\n"); return -1; } if(timeout_ms != 0) /* notify caller that we timed out */ return 0; unreachable(); } switch(_state.current()) { case tuple_fifo_state_t::IN_MEMORY: case tuple_fifo_state_t::IN_MEMORY_DONE_WRITING: { /* pull the page from page_list */ assert(!_pages.empty()); _set_read_page(_pages.front()); _pages.pop_front(); assert(_pages_in_memory > 0); _pages_in_memory--; break; } case tuple_fifo_state_t::ON_DISK: case tuple_fifo_state_t::ON_DISK_DONE_WRITING: { /* We are on disk. We should not be releasing _read_page after iterating over its entries. However, we still need to be prepared against code which extracts pages from the tuple_fifo using get_page(). get_page() sets _read_page to the SENTINEL_PAGE. */ if (_read_page == SENTINEL_PAGE) _set_read_page(_alloc_page()); else { /* We are reusing the same read page... do a reset */ _read_page->clear(); _set_read_page(_read_page.release()); } /* Make sure that at this point, we are not dealing with the SENTINAL_PAGE. */ assert(_read_page != SENTINEL_PAGE); assert(_read_page->page_size() == malloc_page_pool::instance()->page_size()); /* read page from disk file */ _read_page->clear(); TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "_next_page = %d\n", (int)_next_page); TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "_file_head_page = %d\n", (int)_file_head_page); unsigned long seek_pos = (_next_page - _file_head_page) * get_default_page_size(); TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "fseek to %lu\n", seek_pos); int fseek_ret = fseek(_page_file, seek_pos, SEEK_SET); assert(!fseek_ret); if (fseek_ret) THROW2(FileException, "fseek to %lu", seek_pos); int fread_ret = _read_page->fread_full_page(_page_file); assert(fread_ret); _set_read_page(_read_page.release()); size_t page_size = _read_page->page_size(); if (TRACE_ALWAYS&TRACE_MASK_DISK) { page* pg = _read_page.release(); unsigned char* pg_bytes = (unsigned char*)pg; for (size_t i = 0; i < page_size; i++) { printf("%02x", pg_bytes[i]); if (i % 2 == 0) printf("\t"); if (i % 16 == 0) printf("\n"); } _set_read_page(pg); } TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "Read %d %d-byte tuples\n", (int)_read_page->tuple_count(), (int)_read_page->tuple_size()); break; } default: unreachable(); } /* endof switch statement */ assert(_pages_in_fifo > 0); _pages_in_fifo--; _next_page++; /* wake the writer if necessary */ if(!FLUSH_TO_DISK_ON_FULL && (_available_in_memory_writes() >= _threshold) && !is_done_writing()) ensure_writer_running(); // * * * END CRITICAL SECTION * * * return 1; }