static void ved_include(struct req *preq, const char *src, const char *host, struct ecx *ecx) { struct worker *wrk; struct req *req; enum req_fsm_nxt s; struct transport xp; CHECK_OBJ_NOTNULL(preq, REQ_MAGIC); CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC); wrk = preq->wrk; if (preq->esi_level >= cache_param->max_esi_depth) return; req = Req_New(wrk, preq->sp); req->req_body_status = REQ_BODY_NONE; AZ(req->vsl->wid); req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER); VSLb(req->vsl, SLT_Begin, "req %u esi", VXID(preq->vsl->wid)); VSLb(preq->vsl, SLT_Link, "req %u esi", VXID(req->vsl->wid)); req->esi_level = preq->esi_level + 1; if (preq->esi_level == 0) assert(preq->top == preq); else CHECK_OBJ_NOTNULL(preq->top, REQ_MAGIC); req->top = preq->top; HTTP_Copy(req->http0, preq->http0); req->http0->ws = req->ws; req->http0->vsl = req->vsl; req->http0->logtag = SLT_ReqMethod; req->http0->conds = 0; http_SetH(req->http0, HTTP_HDR_URL, src); if (host != NULL && *host != '\0') { http_Unset(req->http0, H_Host); http_SetHeader(req->http0, host); } http_ForceField(req->http0, HTTP_HDR_METHOD, "GET"); http_ForceField(req->http0, HTTP_HDR_PROTO, "HTTP/1.1"); /* Don't allow conditionalss, we can't use a 304 */ http_Unset(req->http0, H_If_Modified_Since); http_Unset(req->http0, H_If_None_Match); /* Don't allow Range */ http_Unset(req->http0, H_Range); /* Set Accept-Encoding according to what we want */ http_Unset(req->http0, H_Accept_Encoding); if (ecx->isgzip) http_ForceHeader(req->http0, H_Accept_Encoding, "gzip"); /* Client content already taken care of */ http_Unset(req->http0, H_Content_Length); /* Reset request to status before we started messing with it */ HTTP_Copy(req->http, req->http0); req->vcl = preq->vcl; preq->vcl = NULL; req->wrk = preq->wrk; /* * XXX: We should decide if we should cache the director * XXX: or not (for session/backend coupling). Until then * XXX: make sure we don't trip up the check in vcl_recv. */ req->req_step = R_STP_RECV; req->t_req = preq->t_req; assert(isnan(req->t_first)); assert(isnan(req->t_prev)); INIT_OBJ(&xp, TRANSPORT_MAGIC); xp.deliver = VED_Deliver; req->transport = &xp; req->transport_priv = ecx; THR_SetRequest(req); VSLb_ts_req(req, "Start", W_TIM_real(wrk)); req->ws_req = WS_Snapshot(req->ws); while (1) { req->wrk = wrk; s = CNT_Request(wrk, req); if (s == REQ_FSM_DONE) break; DSL(DBG_WAITINGLIST, req->vsl->wid, "loop waiting for ESI (%d)", (int)s); assert(s == REQ_FSM_DISEMBARK); AZ(req->wrk); (void)usleep(10000); } VRTPRIV_dynamic_kill(req->sp->privs, (uintptr_t)req); CNT_AcctLogCharge(wrk->stats, req); VSL_End(req->vsl); preq->vcl = req->vcl; req->vcl = NULL; req->wrk = NULL; THR_SetRequest(preq); Req_Release(req); }
void HTTP1_Session(struct worker *wrk, struct req *req) { enum htc_status_e hs; struct sess *sp; const char *st; int i; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); sp = req->sp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); /* * Whenever we come in from the acceptor or waiter, we need to set * blocking mode. It would be simpler to do this in the acceptor * or waiter, but we'd rather do the syscall in the worker thread. * On systems which return errors for ioctl, we close early */ if (http1_getstate(sp) == H1NEWREQ && VTCP_blocking(sp->fd)) { if (errno == ECONNRESET) SES_Close(sp, SC_REM_CLOSE); else SES_Close(sp, SC_TX_ERROR); AN(Req_Cleanup(sp, wrk, req)); return; } while (1) { st = http1_getstate(sp); if (st == H1NEWREQ) { assert(isnan(req->t_prev)); assert(isnan(req->t_req)); AZ(req->vcl); AZ(req->esi_level); hs = HTC_RxStuff(req->htc, HTTP1_Complete, &req->t_first, &req->t_req, sp->t_idle + cache_param->timeout_linger, sp->t_idle + cache_param->timeout_idle, cache_param->http_req_size); XXXAZ(req->htc->ws->r); if (hs < HTC_S_EMPTY) { req->acct.req_hdrbytes += req->htc->rxbuf_e - req->htc->rxbuf_b; CNT_AcctLogCharge(wrk->stats, req); Req_Release(req); switch(hs) { case HTC_S_CLOSE: SES_Delete(sp, SC_REM_CLOSE, NAN); return; case HTC_S_TIMEOUT: SES_Delete(sp, SC_RX_TIMEOUT, NAN); return; case HTC_S_OVERFLOW: SES_Delete(sp, SC_RX_OVERFLOW, NAN); return; case HTC_S_EOF: SES_Delete(sp, SC_REM_CLOSE, NAN); return; default: WRONG("htc_status (bad)"); } } if (hs == HTC_S_IDLE) { wrk->stats->sess_herd++; Req_Release(req); SES_Wait(sp, &HTTP1_transport); return; } if (hs != HTC_S_COMPLETE) WRONG("htc_status (nonbad)"); i = http1_dissect(wrk, req); req->acct.req_hdrbytes += req->htc->rxbuf_e - req->htc->rxbuf_b; if (i) { SES_Close(req->sp, req->doclose); http1_setstate(sp, H1CLEANUP); } else { req->req_step = R_STP_RECV; http1_setstate(sp, H1PROC); } } else if (st == H1BUSY) { /* * Return from waitinglist. * Check to see if the remote has left. */ if (VTCP_check_hup(sp->fd)) { AN(req->hash_objhead); (void)HSH_DerefObjHead(wrk, &req->hash_objhead); AZ(req->hash_objhead); SES_Close(sp, SC_REM_CLOSE); AN(Req_Cleanup(sp, wrk, req)); return; } http1_setstate(sp, H1PROC); } else if (st == H1PROC) { req->transport = &HTTP1_transport; if (CNT_Request(wrk, req) == REQ_FSM_DISEMBARK) { req->task.func = http1_req; req->task.priv = req; http1_setstate(sp, H1BUSY); return; } req->transport = NULL; http1_setstate(sp, H1CLEANUP); } else if (st == H1CLEANUP) { if (Req_Cleanup(sp, wrk, req)) return; HTC_RxInit(req->htc, req->ws); if (req->htc->rxbuf_e != req->htc->rxbuf_b) wrk->stats->sess_readahead++; http1_setstate(sp, H1NEWREQ); } else { WRONG("Wrong H1 session state"); } } }
static void HTTP1_Session(struct worker *wrk, struct req *req) { enum htc_status_e hs; struct sess *sp; const char *st; int i; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); sp = req->sp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); /* * Whenever we come in from the acceptor or waiter, we need to set * blocking mode. It would be simpler to do this in the acceptor * or waiter, but we'd rather do the syscall in the worker thread. */ if (http1_getstate(sp) == H1NEWREQ) VTCP_blocking(sp->fd); req->transport = &HTTP1_transport; while (1) { st = http1_getstate(sp); if (st == H1NEWREQ) { CHECK_OBJ_NOTNULL(req->transport, TRANSPORT_MAGIC); assert(isnan(req->t_prev)); assert(isnan(req->t_req)); AZ(req->vcl); AZ(req->esi_level); AN(req->htc->ws->r); hs = HTC_RxStuff(req->htc, HTTP1_Complete, &req->t_first, &req->t_req, sp->t_idle + cache_param->timeout_linger, sp->t_idle + cache_param->timeout_idle, NAN, cache_param->http_req_size); AZ(req->htc->ws->r); if (hs < HTC_S_EMPTY) { req->acct.req_hdrbytes += req->htc->rxbuf_e - req->htc->rxbuf_b; Req_Cleanup(sp, wrk, req); Req_Release(req); switch (hs) { case HTC_S_CLOSE: SES_Delete(sp, SC_REM_CLOSE, NAN); return; case HTC_S_TIMEOUT: SES_Delete(sp, SC_RX_TIMEOUT, NAN); return; case HTC_S_OVERFLOW: SES_Delete(sp, SC_RX_OVERFLOW, NAN); return; case HTC_S_EOF: SES_Delete(sp, SC_REM_CLOSE, NAN); return; default: WRONG("htc_status (bad)"); } } if (hs == HTC_S_IDLE) { wrk->stats->sess_herd++; Req_Cleanup(sp, wrk, req); Req_Release(req); SES_Wait(sp, &HTTP1_transport); return; } if (hs != HTC_S_COMPLETE) WRONG("htc_status (nonbad)"); if (H2_prism_complete(req->htc) == HTC_S_COMPLETE) { if (!FEATURE(FEATURE_HTTP2)) { SES_Close(req->sp, SC_REQ_HTTP20); AZ(req->ws->r); AZ(wrk->aws->r); http1_setstate(sp, H1CLEANUP); continue; } http1_setstate(sp, NULL); H2_PU_Sess(wrk, sp, req); return; } i = http1_dissect(wrk, req); req->acct.req_hdrbytes += req->htc->rxbuf_e - req->htc->rxbuf_b; if (i) { assert(req->doclose > 0); SES_Close(req->sp, req->doclose); AZ(req->ws->r); AZ(wrk->aws->r); http1_setstate(sp, H1CLEANUP); continue; } if (http_HdrIs(req->http, H_Upgrade, "h2c")) { if (!FEATURE(FEATURE_HTTP2)) { VSLb(req->vsl, SLT_Debug, "H2 upgrade attempt"); } else if (req->htc->body_status != BS_NONE) { VSLb(req->vsl, SLT_Debug, "H2 upgrade attempt has body"); } else { http1_setstate(sp, NULL); req->err_code = 2; H2_OU_Sess(wrk, sp, req); return; } } req->req_step = R_STP_TRANSPORT; http1_setstate(sp, H1PROC); } else if (st == H1PROC) { req->task.func = http1_req; req->task.priv = req; wrk->stats->client_req++; CNT_Embark(wrk, req); if (req->req_step == R_STP_TRANSPORT) VCL_TaskEnter(req->vcl, req->privs); if (CNT_Request(req) == REQ_FSM_DISEMBARK) return; AZ(req->vcl0); req->task.func = NULL; req->task.priv = NULL; AZ(req->ws->r); AZ(wrk->aws->r); http1_setstate(sp, H1CLEANUP); } else if (st == H1CLEANUP) { AZ(wrk->aws->r); AZ(req->ws->r); if (sp->fd >= 0 && req->doclose != SC_NULL) SES_Close(sp, req->doclose); if (sp->fd < 0) { wrk->stats->sess_closed++; Req_Cleanup(sp, wrk, req); Req_Release(req); SES_Delete(sp, SC_NULL, NAN); return; } Req_Cleanup(sp, wrk, req); HTC_RxInit(req->htc, req->ws); if (req->htc->rxbuf_e != req->htc->rxbuf_b) wrk->stats->sess_readahead++; http1_setstate(sp, H1NEWREQ); } else { WRONG("Wrong H1 session state"); } } }
void HTTP1_Session(struct worker *wrk, struct req *req) { enum req_fsm_nxt nxt = REQ_FSM_MORE; struct sess *sp; enum http1_cleanup_ret sdr; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); sp = req->sp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); /* * Whenever we come in from the acceptor or waiter, we need to set * blocking mode. It would be simpler to do this in the acceptor * or waiter, but we'd rather do the syscall in the worker thread. * On systems which return errors for ioctl, we close early */ if (sp->sess_step == S_STP_NEWREQ && VTCP_blocking(sp->fd)) { if (errno == ECONNRESET) SES_Close(sp, SC_REM_CLOSE); else SES_Close(sp, SC_TX_ERROR); sdr = http1_cleanup(sp, wrk, req); assert(sdr == SESS_DONE_RET_GONE); return; } /* * Return from waitinglist. Check to see if the remote has left. */ if (req->req_step == R_STP_LOOKUP && VTCP_check_hup(sp->fd)) { AN(req->hash_objhead); (void)HSH_DerefObjHead(wrk, &req->hash_objhead); AZ(req->hash_objhead); SES_Close(sp, SC_REM_CLOSE); sdr = http1_cleanup(sp, wrk, req); assert(sdr == SESS_DONE_RET_GONE); return; } if (sp->sess_step == S_STP_NEWREQ) { HTTP1_Init(req->htc, req->ws, sp->fd, req->vsl, cache_param->http_req_size, cache_param->http_req_hdr_len); } while (1) { assert( sp->sess_step == S_STP_NEWREQ || req->req_step == R_STP_LOOKUP || req->req_step == R_STP_RECV); if (sp->sess_step == S_STP_WORKING) { if (req->req_step == R_STP_RECV) nxt = http1_dissect(wrk, req); if (nxt == REQ_FSM_MORE) nxt = CNT_Request(wrk, req); if (nxt == REQ_FSM_DISEMBARK) return; assert(nxt == REQ_FSM_DONE); sdr = http1_cleanup(sp, wrk, req); switch (sdr) { case SESS_DONE_RET_GONE: return; case SESS_DONE_RET_WAIT: sp->sess_step = S_STP_NEWREQ; break; case SESS_DONE_RET_START: sp->sess_step = S_STP_WORKING; req->req_step = R_STP_RECV; break; default: WRONG("Illegal enum http1_cleanup_ret"); } } if (sp->sess_step == S_STP_NEWREQ) { nxt = http1_wait(sp, wrk, req); if (nxt != REQ_FSM_MORE) return; sp->sess_step = S_STP_WORKING; req->req_step = R_STP_RECV; } } }