vbe_dir_finish(const struct director *d, struct worker *wrk, struct busyobj *bo) { struct backend *bp; struct vbc *vbc; CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); CAST_OBJ_NOTNULL(vbc, bo->htc->priv, VBC_MAGIC); bo->htc->priv = NULL; if (vbc->state != VBC_STATE_USED) VBT_Wait(wrk, vbc); if (bo->htc->doclose != SC_NULL) { VSLb(bo->vsl, SLT_BackendClose, "%d %s", vbc->fd, bp->display_name); VBT_Close(bp->tcp_pool, &vbc); Lck_Lock(&bp->mtx); } else { VSLb(bo->vsl, SLT_BackendReuse, "%d %s", vbc->fd, bp->display_name); Lck_Lock(&bp->mtx); VSC_C_main->backend_recycle++; VBT_Recycle(wrk, bp->tcp_pool, &vbc); } assert(bp->n_conn > 0); bp->n_conn--; #define ACCT(foo) bp->vsc->foo += bo->acct.foo; #include "tbl/acct_fields_bereq.h" #undef ACCT Lck_Unlock(&bp->mtx); bo->htc = NULL; }
void HSH_AddString(struct req *req, const char *str) { int l; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); if (str == NULL) str = ""; l = strlen(str); AN(req->sha256ctx); SHA256_Update(req->sha256ctx, str, l); SHA256_Update(req->sha256ctx, "#", 1); VSLb(req->vsl, SLT_Hash, "%s", str); }
static void vrt_do_string(const struct http *hp, int fld, const char *err, const char *p, va_list ap) { const char *b; CHECK_OBJ_NOTNULL(hp, HTTP_MAGIC); b = VRT_String(hp->ws, NULL, p, ap); if (b == NULL || *b == '\0') { VSLb(hp->vsl, SLT_LostHeader, "%s", err); WS_MarkOverflow(hp->ws); return; } http_SetH(hp, fld, b); }
static void bes_conn_try(struct busyobj *bo, struct vbc *vc, const struct vdi_simple *vs) { int s; struct backend *bp = vs->backend; char abuf1[VTCP_ADDRBUFSIZE]; char pbuf1[VTCP_PORTBUFSIZE]; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC); Lck_Lock(&bp->mtx); bp->refcount++; bp->n_conn++; /* It mostly works */ Lck_Unlock(&bp->mtx); s = -1; assert(bp->ipv6 != NULL || bp->ipv4 != NULL); /* release lock during stuff that can take a long time */ if (cache_param->prefer_ipv6 && bp->ipv6 != NULL) { s = vbe_TryConnect(bo, PF_INET6, bp->ipv6, vs); vc->addr = bp->ipv6; } if (s == -1 && bp->ipv4 != NULL) { s = vbe_TryConnect(bo, PF_INET, bp->ipv4, vs); vc->addr = bp->ipv4; } if (s == -1 && !cache_param->prefer_ipv6 && bp->ipv6 != NULL) { s = vbe_TryConnect(bo, PF_INET6, bp->ipv6, vs); vc->addr = bp->ipv6; } vc->fd = s; if (s < 0) { Lck_Lock(&bp->mtx); bp->n_conn--; bp->refcount--; /* Only keep ref on success */ Lck_Unlock(&bp->mtx); vc->addr = NULL; } else { VTCP_myname(s, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1); VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s ", vc->fd, vs->backend->display_name, abuf1, pbuf1); } }
void VRT_count(VRT_CTX, unsigned u) { CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC); CHECK_OBJ_NOTNULL(ctx->vcl->conf, VCL_CONF_MAGIC); assert(u < ctx->vcl->conf->nref); if (ctx->vsl != NULL) VSLb(ctx->vsl, SLT_VCL_trace, "%s %u %u.%u.%u", ctx->vcl->loaded_name, u, ctx->vcl->conf->ref[u].source, ctx->vcl->conf->ref[u].line, ctx->vcl->conf->ref[u].pos); else VSL(SLT_VCL_trace, 0, "%s %u %u.%u.%u", ctx->vcl->loaded_name, u, ctx->vcl->conf->ref[u].source, ctx->vcl->conf->ref[u].line, ctx->vcl->conf->ref[u].pos); }
static void http_PutField(const struct http *to, int field, const char *string) { char *p; CHECK_OBJ_NOTNULL(to, HTTP_MAGIC); p = WS_Copy(to->ws, string, -1); if (p == NULL) { http_fail(to); VSLb(to->vsl, SLT_LostHeader, "%s", string); return; } to->hd[field].b = p; to->hd[field].e = strchr(p, '\0'); to->hdf[field] = 0; http_VSLH(to, field); }
int VRT_re_match(struct req *req, const char *s, void *re) { vre_t *t; int i; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); if (s == NULL) s = ""; AN(re); t = re; i = VRE_exec(t, s, strlen(s), 0, 0, NULL, 0, &cache_param->vre_limits); if (i >= 0) return (1); if (i < VRE_ERROR_NOMATCH ) VSLb(req->vsl, SLT_VCL_Error, "Regexp matching returned %d", i); return (0); }
void VRT_l_client_identity(const struct vrt_ctx *ctx, const char *str, ...) { va_list ap; const char *b; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC); va_start(ap, str); b = VRT_String(ctx->req->http->ws, NULL, str, ap); va_end(ap); if (b == NULL) { VSLb(ctx->vsl, SLT_LostHeader, "client.identity"); WS_MarkOverflow(ctx->req->http->ws); return; } ctx->req->client_identity = b; }
void VRT_l_beresp_storage_hint(const struct vrt_ctx *ctx, const char *str, ...) { va_list ap; const char *b; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); va_start(ap, str); b = VRT_String(ctx->bo->ws, NULL, str, ap); // XXX: ctx->ws ? va_end(ap); if (b == NULL) { VSLb(ctx->vsl, SLT_LostHeader, "storage.hint"); WS_MarkOverflow(ctx->bo->beresp->ws); return; } ctx->bo->storage_hint = b; }
int STV_NewObject(struct worker *wrk, struct objcore *oc, const struct stevedore *stv, unsigned wsl) { CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); assert(wsl > 0); wrk->strangelove = cache_param->nuke_limit; AN(stv->allocobj); if (stv->allocobj(wrk, stv, oc, wsl) == 0) return (0); wrk->stats->n_object++; VSLb(wrk->vsl, SLT_Storage, "%s %s", oc->stobj->stevedore->name, oc->stobj->stevedore->ident); return (1); }
vfp_esi_init(struct vfp_ctx *vc, struct vfp_entry *vfe) { struct vef_priv *vef; CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); CHECK_OBJ_NOTNULL(vc->req, HTTP_MAGIC); if (http_GetStatus(vc->resp) == 206) { VSLb(vc->wrk->vsl, SLT_VCL_Error, "Attempted ESI on partial (206) response"); return (VFP_ERROR); } ALLOC_OBJ(vef, VEF_MAGIC); if (vef == NULL) return (VFP_ERROR); vc->obj_flags |= OF_ESIPROC; vef->vep = VEP_Init(vc, vc->req, NULL, NULL); vfe->priv1 = vef; return (VFP_OK); }
vmod_append(VRT_CTX, VCL_HEADER hdr, const char *fmt, ...) { va_list ap; struct http *hp; const char *b; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); assert(fmt != NULL); hp = VRT_selecthttp(ctx, hdr->where); va_start(ap, fmt); b = VRT_String(hp->ws, hdr->what + 1, fmt, ap); if (b == NULL) VSLb(ctx->vsl, SLT_LostHeader, "vmod_header: %s", hdr->what + 1); else http_SetHeader(hp, b); va_end(ap); }
VCL_IP vmod_ip(VRT_CTX, VCL_STRING s, VCL_IP d) { struct addrinfo hints, *res0 = NULL; const struct addrinfo *res; int error; void *p; struct suckaddr *r; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); AN(d); assert(VSA_Sane(d)); p = WS_Alloc(ctx->ws, vsa_suckaddr_len); if (p == NULL) { VSLb(ctx->vsl, SLT_VCL_Error, "vmod std.ip(): insufficient workspace"); return d; } r = NULL; if (s != NULL) { memset(&hints, 0, sizeof(hints)); hints.ai_family = PF_UNSPEC; hints.ai_socktype = SOCK_STREAM; error = getaddrinfo(s, "80", &hints, &res0); if (!error) { for (res = res0; res != NULL; res = res->ai_next) { r = VSA_Build(p, res->ai_addr, res->ai_addrlen); if (r != NULL) break; } } } if (r == NULL) { r = p; memcpy(r, d, vsa_suckaddr_len); } if (res0 != NULL) freeaddrinfo(res0); return (r); }
static enum sess_close vbe_dir_http1pipe(const struct director *d, struct req *req, struct busyobj *bo) { int i; enum sess_close retval; struct backend *bp; struct v1p_acct v1a; struct vbc *vbc; CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); memset(&v1a, 0, sizeof v1a); /* This is hackish... */ v1a.req = req->acct.req_hdrbytes; req->acct.req_hdrbytes = 0; req->res_mode = RES_PIPE; vbc = vbe_dir_getfd(req->wrk, bp, bo); if (vbc == NULL) { VSLb(bo->vsl, SLT_FetchError, "no backend connection"); retval = SC_TX_ERROR; } else { i = V1F_SendReq(req->wrk, bo, &v1a.bereq, 1); VSLb_ts_req(req, "Pipe", W_TIM_real(req->wrk)); if (vbc->state == VBC_STATE_STOLEN) VBT_Wait(req->wrk, vbc); if (i == 0) V1P_Process(req, vbc->fd, &v1a); VSLb_ts_req(req, "PipeSess", W_TIM_real(req->wrk)); bo->htc->doclose = SC_TX_PIPE; vbe_dir_finish(d, req->wrk, bo); retval = SC_TX_PIPE; } V1P_Charge(req, &v1a, bp->vsc); return (retval); }
/* Possible error returns: * * H2E_COMPRESSION_ERROR: Lost compression state due to incomplete header * block. This is a connection level error. * * H2E_ENHANCE_YOUR_CALM: Ran out of workspace or http header space. This * is a stream level error. */ int h2h_decode_fini(const struct h2_sess *h2, struct h2h_decode *d) { int ret; CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); CHECK_OBJ_NOTNULL(h2->new_req, REQ_MAGIC); CHECK_OBJ_NOTNULL(d, H2H_DECODE_MAGIC); WS_ReleaseP(h2->new_req->http->ws, d->out); if (d->vhd_ret != VHD_OK) { /* HPACK header block didn't finish at an instruction boundary */ VSLb(h2->new_req->http->vsl, SLT_BogoHeader, "HPACK compression error (%s)", VHD_Error(d->vhd_ret)); ret = H2E_COMPRESSION_ERROR; } else ret = d->error; d->magic = 0; return (ret); }
static uint16_t htc_request_check_host_hdr(const struct http *hp) { int u; int seen_host = 0; for (u = HTTP_HDR_FIRST; u < hp->nhd; u++) { if (hp->hd[u].b == NULL) continue; AN(hp->hd[u].b); AN(hp->hd[u].e); if (http_IsHdr(&hp->hd[u], H_Host)) { if (seen_host) { VSLb(hp->vsl, SLT_Error, "Duplicated Host header"); return (400); } seen_host = 1; } } return (0); }
static void pipecharge(struct req *req, const struct acct_pipe *a, struct VSC_C_vbe *b) { VSLb(req->vsl, SLT_PipeAcct, "%ju %ju %ju %ju", (uintmax_t)a->req, (uintmax_t)a->bereq, (uintmax_t)a->in, (uintmax_t)a->out); Lck_Lock(&pipestat_mtx); VSC_C_main->s_pipe_hdrbytes += a->req; VSC_C_main->s_pipe_in += a->in; VSC_C_main->s_pipe_out += a->out; if (b != NULL) { b->pipe_hdrbytes += a->bereq; b->pipe_out += a->in; b->pipe_in += a->out; } Lck_Unlock(&pipestat_mtx); }
static void http_PutField(const struct http *to, int field, const char *string) { char *p; unsigned l; CHECK_OBJ_NOTNULL(to, HTTP_MAGIC); l = strlen(string); p = WS_Alloc(to->ws, l + 1); if (p == NULL) { VSLb(to->vsl, SLT_LostHeader, "%s", string); to->hd[field].b = NULL; to->hd[field].e = NULL; to->hdf[field] = 0; } else { memcpy(p, string, l + 1L); to->hd[field].b = p; to->hd[field].e = p + l; to->hdf[field] = 0; } }
static void vbf_fetch_thread(struct worker *wrk, void *priv) { struct busyobj *bo; enum fetch_step stp; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); THR_SetBusyobj(bo); stp = F_STP_MKBEREQ; while (stp != F_STP_DONE) { CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); bo->step = stp; switch(stp) { #define FETCH_STEP(l, U, arg) \ case F_STP_##U: \ stp = vbf_stp_##l arg; \ break; #include "tbl/steps.h" #undef FETCH_STEP default: WRONG("Illegal fetch_step"); } VSLb(bo->vsl, SLT_Debug, "%s -> %s", vbf_step_name(bo->step), vbf_step_name(stp)); } assert(WRW_IsReleased(wrk)); if (bo->state == BOS_FAILED) assert(bo->fetch_objcore->flags & OC_F_FAILED); if (bo->ims_obj != NULL) (void)HSH_DerefObj(&wrk->stats, &bo->ims_obj); VBO_DerefBusyObj(wrk, &bo); THR_SetBusyobj(NULL); }
enum vgzret_e VGZ_Gzip(struct vgz *vg, const void **pptr, ssize_t *plen, enum vgz_flag flags) { int i; int zflg; ssize_t l; const uint8_t *before; CHECK_OBJ_NOTNULL(vg, VGZ_MAGIC); *pptr = NULL; *plen = 0; AN(vg->vz.next_out); AN(vg->vz.avail_out); before = vg->vz.next_out; switch(flags) { case VGZ_NORMAL: zflg = Z_NO_FLUSH; break; case VGZ_ALIGN: zflg = Z_SYNC_FLUSH; break; case VGZ_RESET: zflg = Z_FULL_FLUSH; break; case VGZ_FINISH: zflg = Z_FINISH; break; default: INCOMPL(); } i = deflate(&vg->vz, zflg); if (i == Z_OK || i == Z_STREAM_END) { *pptr = before; l = (const uint8_t *)vg->vz.next_out - before; *plen = l; } vg->last_i = i; if (i == Z_OK) return (VGZ_OK); if (i == Z_STREAM_END) return (VGZ_END); if (i == Z_BUF_ERROR) return (VGZ_STUCK); VSLb(vg->vsl, SLT_Gzip, "Gzip error: %d (%s)", i, vgz_msg(vg)); return (VGZ_ERROR); }
void VRT_hashdata(const struct vrt_ctx *ctx, const char *str, ...) { va_list ap; const char *p; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC); HSH_AddString(ctx->req, str); va_start(ap, str); while (1) { p = va_arg(ap, const char *); if (p == vrt_magic_string_end) break; HSH_AddString(ctx->req, p); VSLb(ctx->vsl, SLT_Hash, "%s", str); } /* * Add a 'field-separator' to make it more difficult to * manipulate the hash. */ HSH_AddString(ctx->req, NULL); }
void http_CopyHome(const struct http *hp) { unsigned u, l; char *p; for (u = 0; u < hp->nhd; u++) { if (hp->hd[u].b == NULL) continue; if (hp->hd[u].b >= hp->ws->s && hp->hd[u].e <= hp->ws->e) continue; l = Tlen(hp->hd[u]); p = WS_Copy(hp->ws, hp->hd[u].b, l + 1L); if (p == NULL) { http_fail(hp); VSLb(hp->vsl, SLT_LostHeader, "%s", hp->hd[u].b); return; } hp->hd[u].b = p; hp->hd[u].e = p + l; } }
static enum req_fsm_nxt cnt_restart(struct worker *wrk, struct req *req) { CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); req->director_hint = NULL; if (++req->restarts >= cache_param->max_restarts) { VSLb(req->vsl, SLT_VCL_Error, "Too many restarts"); req->err_code = 503; req->req_step = R_STP_SYNTH; } else { // XXX: ReqEnd + ReqAcct ? VSLb_ts_req(req, "Restart", W_TIM_real(wrk)); VSL_ChgId(req->vsl, "req", "restart", VXID_Get(wrk, VSL_CLIENTMARKER)); VSLb_ts_req(req, "Start", req->t_prev); req->err_code = 0; req->req_step = R_STP_RECV; } return (REQ_FSM_MORE); }
void VBE_CloseFd(struct vbc **vbp, const struct acct_bereq *acct_bereq) { struct backend *bp; struct vbc *vc; AN(vbp); vc = *vbp; *vbp = NULL; CHECK_OBJ_NOTNULL(vc, VBC_MAGIC); CHECK_OBJ_NOTNULL(vc->backend, BACKEND_MAGIC); assert(vc->fd >= 0); bp = vc->backend; VSLb(vc->vsl, SLT_BackendClose, "%d %s", vc->fd, bp->display_name); vc->vsl = NULL; VTCP_close(&vc->fd); VBE_DropRefConn(bp, acct_bereq); vc->backend = NULL; VBE_ReleaseConn(vc); }
void http_PrintfHeader(struct http *to, const char *fmt, ...) { va_list ap; unsigned l, n; CHECK_OBJ_NOTNULL(to, HTTP_MAGIC); l = WS_Reserve(to->ws, 0); va_start(ap, fmt); n = vsnprintf(to->ws->f, l, fmt, ap); va_end(ap); if (n + 1 >= l || to->nhd >= to->shd) { VSC_C_main->losthdr++; VSLb(to->vsl, SLT_LostHeader, "%s", to->ws->f); WS_Release(to->ws, 0); } else { to->hd[to->nhd].b = to->ws->f; to->hd[to->nhd].e = to->ws->f + n; to->hdf[to->nhd] = 0; WS_Release(to->ws, n + 1); to->nhd++; } }
int VGZ_Destroy(struct vgz **vgp) { struct vgz *vg; int i; vg = *vgp; CHECK_OBJ_NOTNULL(vg, VGZ_MAGIC); *vgp = NULL; VSLb(vg->vsl, SLT_Gzip, "%s %jd %jd %jd %jd %jd", vg->id, (intmax_t)vg->vz.total_in, (intmax_t)vg->vz.total_out, (intmax_t)vg->vz.start_bit, (intmax_t)vg->vz.last_bit, (intmax_t)vg->vz.stop_bit); if (vg->tmp != NULL) WS_Reset(vg->tmp, vg->tmp_snapshot); if (vg->dir == VGZ_GZ) i = deflateEnd(&vg->vz); else i = inflateEnd(&vg->vz); if (vg->last_i == Z_STREAM_END && i == Z_OK) i = Z_STREAM_END; if (vg->m_buf) free(vg->m_buf); FREE_OBJ(vg); if (i == Z_OK) return (VGZ_OK); if (i == Z_STREAM_END) return (VGZ_END); if (i == Z_BUF_ERROR) return (VGZ_STUCK); return (VGZ_ERROR); }
static void vbe_RecycleFd(struct vbc **vbp, const struct acct_bereq *acct_bereq) { struct backend *bp; struct vbc *vc; AN(vbp); vc = *vbp; *vbp = NULL; CHECK_OBJ_NOTNULL(vc, VBC_MAGIC); CHECK_OBJ_NOTNULL(vc->backend, BACKEND_MAGIC); assert(vc->fd >= 0); bp = vc->backend; VSLb(vc->vsl, SLT_BackendReuse, "%d %s", vc->fd, bp->display_name); vc->vsl = NULL; Lck_Lock(&bp->mtx); VSC_C_main->backend_recycle++; VTAILQ_INSERT_HEAD(&bp->connlist, vc, list); VBE_DropRefLocked(bp, acct_bereq); }
static void ved_include(struct req *preq, const char *src, const char *host, struct ecx *ecx) { struct worker *wrk; struct req *req; enum req_fsm_nxt s; struct transport xp; CHECK_OBJ_NOTNULL(preq, REQ_MAGIC); CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC); wrk = preq->wrk; if (preq->esi_level >= cache_param->max_esi_depth) return; req = Req_New(wrk, preq->sp); req->req_body_status = REQ_BODY_NONE; AZ(req->vsl->wid); req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER); VSLb(req->vsl, SLT_Begin, "req %u esi", VXID(preq->vsl->wid)); VSLb(preq->vsl, SLT_Link, "req %u esi", VXID(req->vsl->wid)); req->esi_level = preq->esi_level + 1; if (preq->esi_level == 0) assert(preq->top == preq); else CHECK_OBJ_NOTNULL(preq->top, REQ_MAGIC); req->top = preq->top; HTTP_Copy(req->http0, preq->http0); req->http0->ws = req->ws; req->http0->vsl = req->vsl; req->http0->logtag = SLT_ReqMethod; req->http0->conds = 0; http_SetH(req->http0, HTTP_HDR_URL, src); if (host != NULL && *host != '\0') { http_Unset(req->http0, H_Host); http_SetHeader(req->http0, host); } http_ForceField(req->http0, HTTP_HDR_METHOD, "GET"); http_ForceField(req->http0, HTTP_HDR_PROTO, "HTTP/1.1"); /* Don't allow conditionalss, we can't use a 304 */ http_Unset(req->http0, H_If_Modified_Since); http_Unset(req->http0, H_If_None_Match); /* Don't allow Range */ http_Unset(req->http0, H_Range); /* Set Accept-Encoding according to what we want */ http_Unset(req->http0, H_Accept_Encoding); if (ecx->isgzip) http_ForceHeader(req->http0, H_Accept_Encoding, "gzip"); /* Client content already taken care of */ http_Unset(req->http0, H_Content_Length); /* Reset request to status before we started messing with it */ HTTP_Copy(req->http, req->http0); req->vcl = preq->vcl; preq->vcl = NULL; req->wrk = preq->wrk; /* * XXX: We should decide if we should cache the director * XXX: or not (for session/backend coupling). Until then * XXX: make sure we don't trip up the check in vcl_recv. */ req->req_step = R_STP_RECV; req->t_req = preq->t_req; assert(isnan(req->t_first)); assert(isnan(req->t_prev)); INIT_OBJ(&xp, TRANSPORT_MAGIC); xp.deliver = VED_Deliver; req->transport = &xp; req->transport_priv = ecx; THR_SetRequest(req); VSLb_ts_req(req, "Start", W_TIM_real(wrk)); req->ws_req = WS_Snapshot(req->ws); while (1) { req->wrk = wrk; s = CNT_Request(wrk, req); if (s == REQ_FSM_DONE) break; DSL(DBG_WAITINGLIST, req->vsl->wid, "loop waiting for ESI (%d)", (int)s); assert(s == REQ_FSM_DISEMBARK); AZ(req->wrk); (void)usleep(10000); } VRTPRIV_dynamic_kill(req->sp->privs, (uintptr_t)req); CNT_AcctLogCharge(wrk->stats, req); VSL_End(req->vsl); preq->vcl = req->vcl; req->vcl = NULL; req->wrk = NULL; THR_SetRequest(preq); Req_Release(req); }
VDP_ESI(struct req *req, enum vdp_action act, void **priv, const void *ptr, ssize_t len) { uint8_t *q, *r; ssize_t l = 0; uint32_t icrc = 0; uint8_t tailbuf[8 + 5]; const uint8_t *pp; struct ecx *ecx, *pecx; int retval = 0; if (act == VDP_INIT) { AZ(*priv); ALLOC_OBJ(ecx, ECX_MAGIC); AN(ecx); ecx->preq = req; *priv = ecx; return (0); } CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); if (act == VDP_FINI) { FREE_OBJ(ecx); *priv = NULL; return (0); } pp = ptr; while (1) { switch (ecx->state) { case 0: ecx->p = ObjGetattr(req->wrk, req->objcore, OA_ESIDATA, &l); AN(ecx->p); assert(l > 0); ecx->e = ecx->p + l; if (*ecx->p == VEC_GZ) { ecx->isgzip = 1; ecx->p++; } if (req->esi_level == 0) { /* * Only the top level document gets to * decide this. */ if (ecx->isgzip) { assert(sizeof gzip_hdr == 10); /* Send out the gzip header */ retval = VDP_bytes(req, VDP_NULL, gzip_hdr, 10); ecx->l_crc = 0; ecx->crc = crc32(0L, Z_NULL, 0); } } ecx->state = 1; break; case 1: if (ecx->p >= ecx->e) { ecx->state = 2; break; } switch (*ecx->p) { case VEC_V1: case VEC_V2: case VEC_V8: ecx->l = ved_decode_len(req, &ecx->p); if (ecx->l < 0) return (-1); if (ecx->isgzip) { assert(*ecx->p == VEC_C1 || *ecx->p == VEC_C2 || *ecx->p == VEC_C8); l = ved_decode_len(req, &ecx->p); if (l < 0) return (-1); icrc = vbe32dec(ecx->p); ecx->p += 4; if (ecx->isgzip) { ecx->crc = crc32_combine( ecx->crc, icrc, l); ecx->l_crc += l; } } ecx->state = 3; break; case VEC_S1: case VEC_S2: case VEC_S8: ecx->l = ved_decode_len(req, &ecx->p); if (ecx->l < 0) return (-1); Debug("SKIP1(%d)\n", (int)ecx->l); ecx->state = 4; break; case VEC_INCL: ecx->p++; q = (void*)strchr((const char*)ecx->p, '\0'); AN(q); q++; r = (void*)strchr((const char*)q, '\0'); AN(r); if (VDP_bytes(req, VDP_FLUSH, NULL, 0)) { ecx->p = ecx->e; break; } Debug("INCL [%s][%s] BEGIN\n", q, ecx->p); ved_include(req, (const char*)q, (const char*)ecx->p, ecx); Debug("INCL [%s][%s] END\n", q, ecx->p); ecx->p = r + 1; break; default: VSLb(req->vsl, SLT_Error, "ESI corruption line %d 0x%02x [%s]\n", __LINE__, *ecx->p, ecx->p); WRONG("ESI-codes: Illegal code"); } break; case 2: if (ecx->isgzip && req->esi_level == 0) { /* * We are bytealigned here, so simply emit * a gzip literal block with finish bit set. */ tailbuf[0] = 0x01; tailbuf[1] = 0x00; tailbuf[2] = 0x00; tailbuf[3] = 0xff; tailbuf[4] = 0xff; /* Emit CRC32 */ vle32enc(tailbuf + 5, ecx->crc); /* MOD(2^32) length */ vle32enc(tailbuf + 9, ecx->l_crc); (void)VDP_bytes(req, VDP_NULL, tailbuf, 13); } if (req->transport->deliver == VED_Deliver) { CAST_OBJ_NOTNULL(pecx, req->transport_priv, ECX_MAGIC); pecx->crc = crc32_combine(pecx->crc, ecx->crc, ecx->l_crc); pecx->l_crc += ecx->l_crc; } retval = VDP_bytes(req, VDP_FLUSH, NULL, 0); ecx->state = 99; return (retval); case 3: case 4: /* * There is no guarantee that the 'l' bytes are all * in the same storage segment, so loop over storage * until we have processed them all. */ if (ecx->l <= len) { if (ecx->state == 3) retval = VDP_bytes(req, act, pp, ecx->l); len -= ecx->l; pp += ecx->l; ecx->state = 1; break; } if (ecx->state == 3 && len > 0) retval = VDP_bytes(req, act, pp, len); ecx->l -= len; return (retval); case 99: /* * VEP does not account for the PAD+CRC+LEN * so we can see up to approx 15 bytes here. */ return (retval); default: WRONG("FOO"); break; } if (retval) return (retval); } }
void VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, struct objcore *oldoc, enum vbf_fetch_mode_e mode) { struct boc *boc; struct busyobj *bo; const char *how; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); AN(oc->flags & OC_F_BUSY); CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC); bo = VBO_GetBusyObj(wrk, req); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); boc = HSH_RefBoc(oc); CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); switch (mode) { case VBF_PASS: how = "pass"; bo->do_pass = 1; break; case VBF_NORMAL: how = "fetch"; break; case VBF_BACKGROUND: how = "bgfetch"; bo->is_bgfetch = 1; break; default: WRONG("Wrong fetch mode"); } VSLb(bo->vsl, SLT_Begin, "bereq %u %s", VXID(req->vsl->wid), how); VSLb(req->vsl, SLT_Link, "bereq %u %s", VXID(bo->vsl->wid), how); THR_SetBusyobj(bo); bo->sp = req->sp; SES_Ref(bo->sp); AN(bo->vcl); oc->boc->vary = req->vary_b; req->vary_b = NULL; HSH_Ref(oc); AZ(bo->fetch_objcore); bo->fetch_objcore = oc; AZ(bo->stale_oc); if (oldoc != NULL) { assert(oldoc->refcnt > 0); HSH_Ref(oldoc); bo->stale_oc = oldoc; } AZ(bo->req); bo->req = req; bo->fetch_task.priv = bo; bo->fetch_task.func = vbf_fetch_thread; if (Pool_Task(wrk->pool, &bo->fetch_task, TASK_QUEUE_BO)) { wrk->stats->fetch_no_thread++; (void)vbf_stp_fail(req->wrk, bo); if (bo->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); HSH_DerefBoc(wrk, oc); SES_Rel(bo->sp); VBO_ReleaseBusyObj(wrk, &bo); } else { bo = NULL; /* ref transferred to fetch thread */ if (mode == VBF_BACKGROUND) { ObjWaitState(oc, BOS_REQ_DONE); (void)VRB_Ignore(req); } else { ObjWaitState(oc, BOS_STREAM); if (oc->boc->state == BOS_FAILED) { AN((oc->flags & OC_F_FAILED)); } else { AZ(oc->flags & OC_F_BUSY); } } } AZ(bo); VSLb_ts_req(req, "Fetch", W_TIM_real(wrk)); assert(oc->boc == boc); HSH_DerefBoc(wrk, oc); if (mode == VBF_BACKGROUND) (void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY); THR_SetBusyobj(NULL); }