void VBO_DerefBusyObj(struct worker *wrk, struct busyobj **pbo) { struct busyobj *bo; struct objcore *oc = NULL; unsigned r; CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC); AN(pbo); bo = *pbo; *pbo = NULL; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_ORNULL(bo->fetch_objcore, OBJCORE_MAGIC); CHECK_OBJ_ORNULL(bo->fetch_obj, OBJECT_MAGIC); if (bo->fetch_objcore != NULL) { oc = bo->fetch_objcore; CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC); Lck_Lock(&oc->objhead->mtx); assert(bo->refcount > 0); r = --bo->refcount; Lck_Unlock(&oc->objhead->mtx); } else { Lck_Lock(&bo->mtx); assert(bo->refcount > 0); r = --bo->refcount; Lck_Unlock(&bo->mtx); } if (r) return; VSLb(bo->vsl, SLT_BereqAcct, "%ju %ju %ju %ju %ju %ju", (uintmax_t)bo->acct.bereq_hdrbytes, (uintmax_t)bo->acct.bereq_bodybytes, (uintmax_t)(bo->acct.bereq_hdrbytes + bo->acct.bereq_bodybytes), (uintmax_t)bo->acct.beresp_hdrbytes, (uintmax_t)bo->acct.beresp_bodybytes, (uintmax_t)(bo->acct.beresp_hdrbytes + bo->acct.beresp_bodybytes)); VSL_End(bo->vsl); if (bo->fetch_objcore != NULL) { AN(wrk); (void)HSH_DerefObjCore(&wrk->stats, &bo->fetch_objcore); } VCL_Rel(&bo->vcl); if (bo->vary != NULL) free(bo->vary); memset(&bo->refcount, 0, sizeof *bo - offsetof(struct busyobj, refcount)); if (cache_param->bo_cache && wrk != NULL && wrk->nbo == NULL) wrk->nbo = bo; else VBO_Free(&bo); }
enum req_fsm_nxt CNT_Request(struct worker *wrk, struct req *req) { enum req_fsm_nxt nxt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * Possible entrance states */ assert( req->req_step == R_STP_LOOKUP || req->req_step == R_STP_RECV); AN(req->vsl->wid & VSL_CLIENTMARKER); req->wrk = wrk; wrk->vsl = req->vsl; for (nxt = REQ_FSM_MORE; nxt == REQ_FSM_MORE; ) { /* * This is a good place to be paranoid about the various * pointers still pointing to the things we expect. */ CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * We don't want the thread workspace to be used for * anything of long duration, so mandate that it be * empty on state-transitions. */ WS_Assert(wrk->aws); assert(wrk->aws->s == wrk->aws->f); switch (req->req_step) { #define REQ_STEP(l,u,arg) \ case R_STP_##u: \ if (DO_DEBUG(DBG_REQ_STATE)) \ cnt_diag(req, #u); \ nxt = cnt_##l arg; \ break; #include "tbl/steps.h" #undef REQ_STEP default: WRONG("State engine misfire"); } WS_Assert(wrk->aws); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); } wrk->vsl = NULL; if (nxt == REQ_FSM_DONE) { AN(req->vsl->wid); VRB_Free(req); req->wrk = NULL; } return (nxt); }
void CNT_Session(struct sess *sp) { int done; struct worker *w; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); w = sp->wrk; CHECK_OBJ_NOTNULL(w, WORKER_MAGIC); /* * Possible entrance states */ assert( sp->step == STP_FIRST || sp->step == STP_START || sp->step == STP_LOOKUP || sp->step == STP_RECV); /* * Whenever we come in from the acceptor we need to set blocking * mode, but there is no point in setting it when we come from * ESI or when a parked sessions returns. * It would be simpler to do this in the acceptor, but we'd rather * do the syscall in the worker thread. */ if (sp->step == STP_FIRST || sp->step == STP_START) TCP_blocking(sp->fd); for (done = 0; !done; ) { assert(sp->wrk == w); /* * This is a good place to be paranoid about the various * pointers still pointing to the things we expect. */ CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); CHECK_OBJ_ORNULL(sp->obj, OBJECT_MAGIC); CHECK_OBJ_NOTNULL(sp->wrk, WORKER_MAGIC); CHECK_OBJ_ORNULL(w->nobjhead, OBJHEAD_MAGIC); WS_Assert(w->ws); switch (sp->step) { #define STEP(l,u) \ case STP_##u: \ if (params->diag_bitmap & 0x01) \ cnt_diag(sp, #u); \ done = cnt_##l(sp); \ break; #include "steps.h" #undef STEP default: WRONG("State engine misfire"); } WS_Assert(w->ws); CHECK_OBJ_ORNULL(w->nobjhead, OBJHEAD_MAGIC); } WSL_Flush(w, 0); AZ(w->wfd); }
VCL_BOOL vmod_match_acl(VRT_CTX, VCL_ACL acl, VCL_IP ip) { CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_ORNULL(acl, VRT_ACL_MAGIC); assert(VSA_Sane(ip)); return (VRT_acl_match(ctx, acl, ip)); }
vmod_hash_backend(VRT_CTX, struct vmod_directors_hash *rr, const char *arg, ...) { struct SHA256Context sha_ctx; va_list ap; const char *p; unsigned char sha256[SHA256_LEN]; VCL_BACKEND be; double r; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(rr, VMOD_DIRECTORS_HASH_MAGIC); SHA256_Init(&sha_ctx); va_start(ap, arg); p = arg; while (p != vrt_magic_string_end) { if (p != NULL && *p != '\0') SHA256_Update(&sha_ctx, p, strlen(p)); p = va_arg(ap, const char *); } va_end(ap); SHA256_Final(sha256, &sha_ctx); r = vbe32dec(sha256); r = scalbn(r, -32); assert(r >= 0 && r <= 1.0); be = vdir_pick_be(rr->vd, r, ctx->bo); return (be); }
unsigned vdir_any_healthy(struct vdir *vd, const struct busyobj *bo, double *changed) { unsigned retval = 0; VCL_BACKEND be; unsigned u; double c; CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); CHECK_OBJ_ORNULL(bo, BUSYOBJ_MAGIC); vdir_lock(vd); if (changed != NULL) *changed = 0; for (u = 0; u < vd->n_backend; u++) { be = vd->backend[u]; CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); retval = be->healthy(be, bo, &c); if (changed != NULL && c > *changed) *changed = c; if (retval) break; } vdir_unlock(vd); return (retval); }
VED_Deliver(struct req *req, struct busyobj *bo, int wantbody) { int i; struct ecx *ecx; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_ORNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); if (wantbody == 0) return; req->res_mode |= RES_ESI_CHILD; i = ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED); if (ecx->isgzip && i && !(req->res_mode & RES_ESI)) { ved_stripgzip(req, bo); } else { if (ecx->isgzip && !i) VDP_push(req, ved_pretend_gzip, ecx, 1); else VDP_push(req, ved_vdp_bytes, ecx->preq, 1); (void)VDP_DeliverObj(req); } VDP_close(req); }
V1D_Deliver(struct req *req, struct boc *boc, int sendbody) { int err = 0; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_ORNULL(boc, BOC_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); if (sendbody) { if (http_GetHdr(req->resp, H_Content_Length, NULL)) req->res_mode |= RES_LEN; else if (req->http->protover == 11) { req->res_mode |= RES_CHUNKED; http_SetHeader(req->resp, "Transfer-Encoding: chunked"); } else { req->res_mode |= RES_EOF; req->doclose = SC_TX_EOF; } } VSLb(req->vsl, SLT_Debug, "RES_MODE %x", req->res_mode); if (req->doclose) { if (!http_HdrIs(req->resp, H_Connection, "close")) { http_Unset(req->resp, H_Connection); http_SetHeader(req->resp, "Connection: close"); } } else if (!http_GetHdr(req->resp, H_Connection, NULL)) http_SetHeader(req->resp, "Connection: keep-alive"); if (sendbody && req->resp_len != 0) VDP_push(req, v1d_bytes, NULL, 1); AZ(req->wrk->v1l); V1L_Reserve(req->wrk, req->ws, &req->sp->fd, req->vsl, req->t_prev); if (WS_Overflowed(req->ws)) { v1d_error(req, "workspace_client overflow"); AZ(req->wrk->v1l); return; } req->acct.resp_hdrbytes += HTTP1_Write(req->wrk, req->resp, HTTP1_Resp); if (DO_DEBUG(DBG_FLUSH_HEAD)) (void)V1L_Flush(req->wrk); if (sendbody && req->resp_len != 0) { if (req->res_mode & RES_CHUNKED) V1L_Chunked(req->wrk); err = VDP_DeliverObj(req); if (!err && (req->res_mode & RES_CHUNKED)) V1L_EndChunk(req->wrk); } if ((V1L_FlushRelease(req->wrk) || err) && req->sp->fd >= 0) SES_Close(req->sp, SC_REM_CLOSE); AZ(req->wrk->v1l); VDP_close(req); }
int V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr, int onlycached) { struct http *hp; int j; ssize_t i; struct http_conn *htc; int do_chunked = 0; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); CHECK_OBJ_ORNULL(bo->req, REQ_MAGIC); htc = bo->htc; hp = bo->bereq; if (bo->req != NULL && bo->req->req_body_status == REQ_BODY_WITHOUT_LEN) { http_PrintfHeader(hp, "Transfer-Encoding: chunked"); do_chunked = 1; } (void)VTCP_blocking(htc->fd); /* XXX: we should timeout instead */ V1L_Reserve(wrk, wrk->aws, &htc->fd, bo->vsl, bo->t_prev); *ctr += HTTP1_Write(wrk, hp, HTTP1_Req); /* Deal with any message-body the request might (still) have */ i = 0; if (bo->req != NULL && (bo->req->req_body_status == REQ_BODY_CACHED || !onlycached)) { if (do_chunked) V1L_Chunked(wrk); i = VRB_Iterate(bo->req, vbf_iter_req_body, bo); if (bo->req->req_body_status == REQ_BODY_FAIL) { assert(i < 0); VSLb(bo->vsl, SLT_FetchError, "req.body read error: %d (%s)", errno, strerror(errno)); bo->req->doclose = SC_RX_BODY; } if (do_chunked) V1L_EndChunk(wrk); } j = V1L_FlushRelease(wrk); if (j != 0 || i < 0) { VSLb(bo->vsl, SLT_FetchError, "backend write error: %d (%s)", errno, strerror(errno)); VSLb_ts_busyobj(bo, "Bereq", W_TIM_real(wrk)); htc->doclose = SC_TX_ERROR; return (-1); } VSLb_ts_busyobj(bo, "Bereq", W_TIM_real(wrk)); return (0); }
VCL_VOID vmod_test_probe(VRT_CTX, VCL_PROBE probe, VCL_PROBE same) { CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(probe, VRT_BACKEND_PROBE_MAGIC); CHECK_OBJ_ORNULL(same, VRT_BACKEND_PROBE_MAGIC); AZ(same == NULL || probe == same); }
vbe_dir_healthy(const struct director *d, const struct busyobj *bo, double *changed) { struct backend *be; CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); CHECK_OBJ_ORNULL(bo, BUSYOBJ_MAGIC); CAST_OBJ_NOTNULL(be, d->priv, BACKEND_MAGIC); return (VBE_Healthy(be, changed)); }
void VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, struct object *oldobj, enum vbf_fetch_mode_e mode) { struct busyobj *bo; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); CHECK_OBJ_ORNULL(oldobj, OBJECT_MAGIC); bo = VBO_GetBusyObj(wrk, req); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); bo->refcount = 2; oc->busyobj = bo; CHECK_OBJ_NOTNULL(bo->vcl, VCL_CONF_MAGIC); if (mode == VBF_PASS) bo->do_pass = 1; bo->vary = req->vary_b; req->vary_b = NULL; HSH_Ref(oc); bo->fetch_objcore = oc; if (oldobj != NULL) { if (http_GetHdr(oldobj->http, H_Last_Modified, NULL) || http_GetHdr(oldobj->http, H_ETag, NULL)) { HSH_Ref(oldobj->objcore); bo->ims_obj = oldobj; } } AZ(bo->req); bo->req = req; bo->fetch_task.priv = bo; bo->fetch_task.func = vbf_fetch_thread; if (Pool_Task(wrk->pool, &bo->fetch_task, POOL_QUEUE_FRONT)) vbf_fetch_thread(wrk, bo); if (mode == VBF_BACKGROUND) { VBO_waitstate(bo, BOS_REQ_DONE); } else { VBO_waitstate(bo, BOS_FETCHING); if (!bo->do_stream) VBO_waitstate(bo, BOS_FINISHED); assert(bo->state != BOS_FAILED || (oc->flags & OC_F_FAILED)); } VBO_DerefBusyObj(wrk, &bo); }
int STV__iter(struct stevedore ** const pp) { AN(pp); CHECK_OBJ_ORNULL(*pp, STEVEDORE_MAGIC); if (*pp != NULL) *pp = VTAILQ_NEXT(*pp, list); else *pp = VTAILQ_FIRST(&stevedores); return (*pp != NULL); }
static void * exp_timer(struct sess *sp, void *priv) { struct objcore *oc; struct object *o; double t; struct objcore_head *lru; (void)priv; AZ(sleep(10)); /* XXX: Takes time for VCL to arrive */ VCL_Get(&sp->vcl); t = TIM_real(); while (1) { Lck_Lock(&exp_mtx); oc = binheap_root(exp_heap); CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); if (oc == NULL || oc->timer_when > t) { /* XXX: > or >= ? */ Lck_Unlock(&exp_mtx); WSL_Flush(sp->wrk, 0); WRK_SumStat(sp->wrk); AZ(sleep(1)); VCL_Refresh(&sp->vcl); t = TIM_real(); continue; } o = oc->obj; CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); CHECK_OBJ_NOTNULL(o->objhead, OBJHEAD_MAGIC); assert(oc->flags & OC_F_ONLRU); assert(oc->timer_idx != BINHEAP_NOIDX); binheap_delete(exp_heap, oc->timer_idx); assert(oc->timer_idx == BINHEAP_NOIDX); lru = STV_lru(o->objstore); AN(lru); VTAILQ_REMOVE(lru, o->objcore, lru_list); oc->flags &= ~OC_F_ONLRU; { /* Sanity checking */ struct objcore *oc2 = binheap_root(exp_heap); if (oc2 != NULL) { assert(oc2->timer_idx != BINHEAP_NOIDX); assert(oc2->timer_when >= oc->timer_when); } } VSL_stats->n_expired++; Lck_Unlock(&exp_mtx); WSL(sp->wrk, SLT_ExpKill, 0, "%u %d", o->xid, (int)(o->ttl - t)); HSH_Deref(sp->wrk, &o); } }
void current_vmod(struct vmod_priv *priv) { struct vmod_disco *vd = priv->priv; CHECK_OBJ_ORNULL(vd, VMOD_DISCO_MAGIC); if (!vd) { AZ(pthread_mutex_lock(&global_mtx)); vd = default_mod ? default_mod : warmed_mod; CHECK_OBJ_NOTNULL(vd, VMOD_DISCO_MAGIC); priv->priv = default_mod; AZ(pthread_mutex_unlock(&global_mtx)); } }
double Wait_HeapDue(const struct waiter *w, struct waited **wpp) { struct waited *wp; wp = binheap_root(w->heap); CHECK_OBJ_ORNULL(wp, WAITED_MAGIC); if (wp == NULL) { if (wpp != NULL) *wpp = NULL; return (0); } if (wpp != NULL) *wpp = wp; return(Wait_When(wp)); }
void SES_Delete(struct sess *sp, const char *reason, double now) { struct acct *b; struct worker *wrk; struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); wrk = sp->wrk; CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC); if (reason != NULL) SES_Close(sp, reason); if (isnan(now)) now = VTIM_real(); assert(!isnan(sp->t_open)); assert(sp->fd < 0); if (sp->req != NULL) { AZ(sp->req->vcl); SES_ReleaseReq(sp); } if (*sp->addr == '\0') strcpy(sp->addr, "-"); if (*sp->port == '\0') strcpy(sp->addr, "-"); b = &sp->acct_ses; VSL(SLT_StatSess, sp->vsl_id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju", sp->addr, sp->port, now - sp->t_open, b->sess, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); MPL_Free(pp->mpl_sess, sp); }
exp_thread(struct worker *wrk, void *priv) { struct objcore *oc; double t = 0, tnext = 0; struct exp_priv *ep; unsigned flags = 0; CAST_OBJ_NOTNULL(ep, priv, EXP_PRIV_MAGIC); ep->wrk = wrk; VSL_Setup(&ep->vsl, NULL, 0); ep->heap = binheap_new(NULL, object_cmp, object_update); AN(ep->heap); while (1) { Lck_Lock(&ep->mtx); oc = VSTAILQ_FIRST(&ep->inbox); CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); if (oc != NULL) { assert(oc->refcnt >= 1); VSTAILQ_REMOVE(&ep->inbox, oc, objcore, exp_list); VSC_C_main->exp_received++; tnext = 0; flags = oc->exp_flags; if (flags & OC_EF_REMOVE) oc->exp_flags = 0; else oc->exp_flags &= OC_EF_REFD; } else if (tnext > t) { VSL_Flush(&ep->vsl, 0); Pool_Sumstat(wrk); (void)Lck_CondWait(&ep->condvar, &ep->mtx, tnext); } Lck_Unlock(&ep->mtx); t = VTIM_real(); if (oc != NULL) exp_inbox(ep, oc, flags); else tnext = exp_expire(ep, t); } NEEDLESS(return NULL); }
static int cnt_deliver(struct worker *wrk, struct req *req) { struct busyobj *bo; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(req->obj, OBJECT_MAGIC); bo = req->busyobj; CHECK_OBJ_ORNULL(bo, BUSYOBJ_MAGIC); if (bo != NULL) { while (bo->state < BOS_FAILED) (void)usleep(10000); assert(bo->state >= BOS_FAILED); if (bo->state == BOS_FAILED) { HSH_Deref(&wrk->stats, NULL, &req->obj); VBO_DerefBusyObj(wrk, &req->busyobj); req->err_code = 503; req->req_step = R_STP_ERROR; return (0); } VBO_DerefBusyObj(wrk, &req->busyobj); } AZ(req->busyobj); req->director = NULL; req->restarts = 0; RES_WriteObj(req); /* No point in saving the body if it is hit-for-pass */ if (req->obj->objcore->flags & OC_F_PASS) STV_Freestore(req->obj); assert(WRW_IsReleased(wrk)); (void)HSH_Deref(&wrk->stats, NULL, &req->obj); http_Teardown(req->resp); return (1); }
static const struct director * vdi_resolve(struct worker *wrk, struct busyobj *bo, const struct director *d) { CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); if (d == NULL) { VSLb(bo->vsl, SLT_FetchError, "No backend"); return (NULL); } while (d != NULL && d->resolve != NULL) { CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); d = d->resolve(d, wrk, bo); } CHECK_OBJ_ORNULL(d, DIRECTOR_MAGIC); if (d == NULL) VSLb(bo->vsl, SLT_FetchError, "Backend selection failed"); bo->director_resp = d; return (d); }
int CNT_Request(struct worker *wrk, struct req *req) { int done; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * Possible entrance states */ assert( req->req_step == R_STP_LOOKUP || req->req_step == R_STP_RECV); AN(req->vsl->wid & VSL_CLIENTMARKER); req->wrk = wrk; for (done = 0; !done; ) { /* * This is a good place to be paranoid about the various * pointers still pointing to the things we expect. */ CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * We don't want the thread workspace to be used for * anything of long duration, so mandate that it be * empty on state-transitions. */ WS_Assert(wrk->aws); assert(wrk->aws->s == wrk->aws->f); switch (req->req_step) { #define REQ_STEP(l,u,arg) \ case R_STP_##u: \ if (DO_DEBUG(DBG_REQ_STATE)) \ cnt_diag(req, #u); \ done = cnt_##l arg; \ break; #include "tbl/steps.h" #undef REQ_STEP default: WRONG("State engine misfire"); } WS_Assert(wrk->aws); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); } if (done == 1) { /* XXX: Workaround for pipe */ if (req->sp->fd >= 0) { VSLb(req->vsl, SLT_Length, "%ju", (uintmax_t)req->req_bodybytes); } VSLb(req->vsl, SLT_ReqEnd, "%.9f %.9f %.9f %.9f %.9f", req->t_req, req->sp->t_idle, req->sp->t_idle - req->t_resp, req->t_resp - req->t_req, req->sp->t_idle - req->t_resp); /* done == 2 was charged by cache_hash.c */ SES_Charge(wrk, req); /* * Nuke the VXID, cache_http1_fsm.c::http1_dissect() will * allocate a new one when necessary. */ req->vsl->wid = 0; } req->wrk = NULL; assert(WRW_IsReleased(wrk)); return (done); }
void VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, struct objcore *oldoc, enum vbf_fetch_mode_e mode) { struct boc *boc; struct busyobj *bo; const char *how; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); AN(oc->flags & OC_F_BUSY); CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC); bo = VBO_GetBusyObj(wrk, req); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); boc = HSH_RefBoc(oc); CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); switch (mode) { case VBF_PASS: how = "pass"; bo->do_pass = 1; break; case VBF_NORMAL: how = "fetch"; break; case VBF_BACKGROUND: how = "bgfetch"; bo->is_bgfetch = 1; break; default: WRONG("Wrong fetch mode"); } VSLb(bo->vsl, SLT_Begin, "bereq %u %s", VXID(req->vsl->wid), how); VSLb(req->vsl, SLT_Link, "bereq %u %s", VXID(bo->vsl->wid), how); THR_SetBusyobj(bo); bo->sp = req->sp; SES_Ref(bo->sp); AN(bo->vcl); oc->boc->vary = req->vary_b; req->vary_b = NULL; HSH_Ref(oc); AZ(bo->fetch_objcore); bo->fetch_objcore = oc; AZ(bo->stale_oc); if (oldoc != NULL) { assert(oldoc->refcnt > 0); HSH_Ref(oldoc); bo->stale_oc = oldoc; } AZ(bo->req); bo->req = req; bo->fetch_task.priv = bo; bo->fetch_task.func = vbf_fetch_thread; if (Pool_Task(wrk->pool, &bo->fetch_task, TASK_QUEUE_BO)) { wrk->stats->fetch_no_thread++; (void)vbf_stp_fail(req->wrk, bo); if (bo->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); HSH_DerefBoc(wrk, oc); SES_Rel(bo->sp); VBO_ReleaseBusyObj(wrk, &bo); } else { bo = NULL; /* ref transferred to fetch thread */ if (mode == VBF_BACKGROUND) { ObjWaitState(oc, BOS_REQ_DONE); (void)VRB_Ignore(req); } else { ObjWaitState(oc, BOS_STREAM); if (oc->boc->state == BOS_FAILED) { AN((oc->flags & OC_F_FAILED)); } else { AZ(oc->flags & OC_F_BUSY); } } } AZ(bo); VSLb_ts_req(req, "Fetch", W_TIM_real(wrk)); assert(oc->boc == boc); HSH_DerefBoc(wrk, oc); if (mode == VBF_BACKGROUND) (void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY); THR_SetBusyobj(NULL); }
vmod_sleep(VRT_CTX, VCL_DURATION t) { CHECK_OBJ_ORNULL(ctx, VRT_CTX_MAGIC); VTIM_sleep(t); }
void SES_Delete(struct sess *sp, const char *reason, double now) { struct acct *b; struct sessmem *sm; struct worker *wrk; struct sesspool *pp; pp = ses_getpool(sp); sm = sp->mem; CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC); wrk = sp->wrk; CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC); if (reason != NULL) SES_Close(sp, reason); if (isnan(now)) now = VTIM_real(); assert(!isnan(sp->t_open)); assert(sp->fd < 0); if (sp->req != NULL) { AZ(sp->req->vcl); SES_ReleaseReq(sp); } if (*sp->addr == '\0') strcpy(sp->addr, "-"); if (*sp->port == '\0') strcpy(sp->addr, "-"); b = &sp->acct_ses; VSL(SLT_StatSess, sp->vsl_id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju", sp->addr, sp->port, now - sp->t_open, b->sess, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); if (sm->workspace != cache_param->sess_workspace || sm->nhttp != (uint16_t)cache_param->http_max_hdr || pp->nsess > cache_param->max_sess) { free(sm); Lck_Lock(&pp->mtx); if (wrk != NULL) wrk->stats.sessmem_free++; else pp->dly_free_cnt++; pp->nsess--; Lck_Unlock(&pp->mtx); } else { /* Clean and prepare for reuse */ ses_setup(sm); Lck_Lock(&pp->mtx); if (wrk != NULL) { wrk->stats.sessmem_free += pp->dly_free_cnt; pp->dly_free_cnt = 0; } VTAILQ_INSERT_HEAD(&pp->freelist, sm, list); Lck_Unlock(&pp->mtx); } }
int V1F_FetchRespHdr(struct busyobj *bo) { struct http *hp; int i; double t; struct http_conn *htc; enum htc_status_e hs; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); CHECK_OBJ_ORNULL(bo->req, REQ_MAGIC); htc = bo->htc; assert(*htc->rfd > 0); VSC_C_main->backend_req++; /* Receive response */ HTC_RxInit(htc, bo->ws); CHECK_OBJ_NOTNULL(htc, HTTP_CONN_MAGIC); CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); t = VTIM_real() + htc->first_byte_timeout; hs = HTC_RxStuff(htc, HTTP1_Complete, NULL, NULL, t, t + htc->between_bytes_timeout, cache_param->http_resp_size); if (hs != HTC_S_COMPLETE) { bo->acct.beresp_hdrbytes += htc->rxbuf_e - htc->rxbuf_b; switch (hs) { case HTC_S_JUNK: VSLb(bo->vsl, SLT_FetchError, "Received junk"); htc->doclose = SC_RX_JUNK; break; case HTC_S_CLOSE: VSLb(bo->vsl, SLT_FetchError, "backend closed"); htc->doclose = SC_RESP_CLOSE; break; case HTC_S_TIMEOUT: VSLb(bo->vsl, SLT_FetchError, "timeout"); htc->doclose = SC_RX_TIMEOUT; break; case HTC_S_OVERFLOW: VSLb(bo->vsl, SLT_FetchError, "overflow"); htc->doclose = SC_RX_OVERFLOW; break; default: VSLb(bo->vsl, SLT_FetchError, "HTC %s (%d)", HTC_Status(hs), hs); htc->doclose = SC_RX_BAD; break; } return (htc->rxbuf_e == htc->rxbuf_b ? 1 : -1); } VTCP_set_read_timeout(*htc->rfd, htc->between_bytes_timeout); hp = bo->beresp; i = HTTP1_DissectResponse(htc, hp, bo->bereq); bo->acct.beresp_hdrbytes += htc->rxbuf_e - htc->rxbuf_b; if (i) { VSLb(bo->vsl, SLT_FetchError, "http format error"); htc->doclose = SC_RX_JUNK; return (-1); } htc->doclose = http_DoConnection(hp); RFC2616_Response_Body(bo->wrk, bo); assert(bo->vfc->resp == bo->beresp); if (bo->htc->body_status != BS_NONE && bo->htc->body_status != BS_ERROR) if (V1F_Setup_Fetch(bo->vfc, bo->htc)) { VSLb(bo->vsl, SLT_FetchError, "overflow"); htc->doclose = SC_RX_OVERFLOW; return (-1); } return (0); }
void VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, struct object *oldobj, enum vbf_fetch_mode_e mode) { struct busyobj *bo; const char *how; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); CHECK_OBJ_ORNULL(oldobj, OBJECT_MAGIC); switch(mode) { case VBF_PASS: how = "pass"; break; case VBF_NORMAL: how = "fetch"; break; case VBF_BACKGROUND: how = "bgfetch"; break; default: WRONG("Wrong fetch mode"); } bo = VBO_GetBusyObj(wrk, req); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); VSLb(bo->vsl, SLT_Begin, "bereq %u %s ", VXID(req->vsl->wid), how); VSLb(req->vsl, SLT_Link, "bereq %u %s ", VXID(bo->vsl->wid), how); THR_SetBusyobj(bo); bo->refcount = 2; oc->busyobj = bo; CHECK_OBJ_NOTNULL(bo->vcl, VCL_CONF_MAGIC); if (mode == VBF_PASS) bo->do_pass = 1; bo->vary = req->vary_b; req->vary_b = NULL; if (mode != VBF_BACKGROUND) HSH_Ref(oc); bo->fetch_objcore = oc; AZ(bo->ims_obj); if (oldobj != NULL) { if (http_GetHdr(oldobj->http, H_Last_Modified, NULL) || http_GetHdr(oldobj->http, H_ETag, NULL)) { assert(oldobj->objcore->refcnt > 0); HSH_Ref(oldobj->objcore); bo->ims_obj = oldobj; } } AZ(bo->req); bo->req = req; bo->fetch_task.priv = bo; bo->fetch_task.func = vbf_fetch_thread; if (Pool_Task(wrk->pool, &bo->fetch_task, POOL_QUEUE_FRONT)) vbf_fetch_thread(wrk, bo); if (mode == VBF_BACKGROUND) { VBO_waitstate(bo, BOS_REQ_DONE); } else { VBO_waitstate(bo, BOS_STREAM); if (bo->state == BOS_FAILED) { AN((oc->flags & OC_F_FAILED)); } else { AZ(bo->fetch_objcore->flags & OC_F_BUSY); } } VSLb_ts_req(req, "Fetch", W_TIM_real(wrk)); THR_SetBusyobj(NULL); VBO_DerefBusyObj(wrk, &bo); }
int V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes, uint64_t *ctr_bodybytes, int onlycached, char *abuf, char *pbuf) { struct http *hp; int j; ssize_t i; uint64_t bytes, hdrbytes; struct http_conn *htc; int do_chunked = 0; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); CHECK_OBJ_ORNULL(bo->req, REQ_MAGIC); AN(ctr_hdrbytes); AN(ctr_bodybytes); htc = bo->htc; assert(*htc->rfd > 0); hp = bo->bereq; if (bo->req != NULL && bo->req->req_body_status == REQ_BODY_WITHOUT_LEN) { http_PrintfHeader(hp, "Transfer-Encoding: chunked"); do_chunked = 1; } VSLb(bo->vsl, SLT_BackendStart, "%s %s", abuf, pbuf); (void)VTCP_blocking(*htc->rfd); /* XXX: we should timeout instead */ V1L_Open(wrk, wrk->aws, htc->rfd, bo->vsl, bo->t_prev, 0); hdrbytes = HTTP1_Write(wrk, hp, HTTP1_Req); /* Deal with any message-body the request might (still) have */ i = 0; if (bo->req != NULL && (bo->req->req_body_status == REQ_BODY_CACHED || !onlycached)) { if (do_chunked) V1L_Chunked(wrk); i = VRB_Iterate(bo->req, vbf_iter_req_body, bo); if (bo->req->req_body_status == REQ_BODY_FAIL) { assert(i < 0); VSLb(bo->vsl, SLT_FetchError, "req.body read error: %d (%s)", errno, vstrerror(errno)); bo->req->doclose = SC_RX_BODY; } if (do_chunked) V1L_EndChunk(wrk); } j = V1L_Close(wrk, &bytes); /* Bytes accounting */ if (bytes < hdrbytes) *ctr_hdrbytes += bytes; else { *ctr_hdrbytes += hdrbytes; *ctr_bodybytes += bytes - hdrbytes; } if (j != 0 || i < 0) { VSLb(bo->vsl, SLT_FetchError, "backend write error: %d (%s)", errno, vstrerror(errno)); VSLb_ts_busyobj(bo, "Bereq", W_TIM_real(wrk)); htc->doclose = SC_TX_ERROR; return (-1); } VSLb_ts_busyobj(bo, "Bereq", W_TIM_real(wrk)); return (0); }
h2_deliver(struct req *req, struct boc *boc, int sendbody) { ssize_t sz, sz1; uint8_t *p; unsigned u; const char *r; struct http *hp; struct sess *sp; struct h2_req *r2; int i, err; const struct hpack_static *hps; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_ORNULL(boc, BOC_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); CAST_OBJ_NOTNULL(r2, req->transport_priv, H2_REQ_MAGIC); sp = req->sp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); (void)sendbody; VSLb(req->vsl, SLT_Debug, "H2: Deliver"); (void)WS_Reserve(req->ws, 0); p = (void*)req->ws->f; switch (req->resp->status) { case 200: *p++ = 0x80 | 8; break; case 204: *p++ = 0x80 | 9; break; case 206: *p++ = 0x80 | 10; break; case 304: *p++ = 0x80 | 11; break; case 400: *p++ = 0x80 | 12; break; case 404: *p++ = 0x80 | 13; break; case 500: *p++ = 0x80 | 14; break; default: *p++ = 0x18; *p++ = 0x03; (void)sprintf((char*)p, "%03d", req->resp->status); p += 3; break; } hp = req->resp; for (u = HTTP_HDR_FIRST; u < hp->nhd; u++) { assert((char*)p < req->ws->e); r = strchr(hp->hd[u].b, ':'); AN(r); hps = hp_idx[tolower(*hp->hd[u].b)]; sz = 1 + r - hp->hd[u].b; assert(sz > 0); while (hps != NULL && hps->idx > 0) { i = strncasecmp(hps->name, hp->hd[u].b, sz); if (i < 0) { hps++; continue; } if (i > 0) hps = NULL; break; } if (hps != NULL) { VSLb(req->vsl, SLT_Debug, "HP {%d, \"%s\", \"%s\"} <%s>", hps->idx, hps->name, hps->val, hp->hd[u].b); if (hps->idx < 15) { *p++ = 0x10 | hps->idx; } else { *p++ = 0x1f; *p++ = hps->idx - 0x0f; } } else { *p++ = 0x10; sz--; if (sz < 127) { *p++ = (uint8_t)sz; } else { *p++ = 0x7f; *p++ = (uint8_t)sz - 0x7f; } for(sz1 = 0; sz1 < sz; sz1++) *p++ = (uint8_t)tolower(hp->hd[u].b[sz1]); } while(vct_islws(*++r)) continue; sz = hp->hd[u].e - r; assert(sz <= 254); if (sz < 127) { *p++ = (uint8_t)sz; } else if (sz < 127 * 2) { *p++ = 0x7f; *p++ = (uint8_t)sz - 0x7f; } memcpy(p, r, sz); p += sz; assert((char*)p < req->ws->e); } sz = (char*)p - req->ws->f; /* XXX: Optimize !sendbody case */ H2_Send(req->wrk, r2, 1, H2_FRAME_HEADERS, H2FF_HEADERS_END_HEADERS, sz, req->ws->f); WS_Release(req->ws, 0); if (sendbody && req->resp_len != 0) VDP_push(req, h2_bytes, NULL, 1, "H2"); AZ(req->wrk->v1l); if (sendbody && req->resp_len != 0) err = VDP_DeliverObj(req); /*XXX*/(void)err; H2_Send(req->wrk, r2, 1, H2_FRAME_DATA, H2FF_DATA_END_STREAM, 0, NULL); AZ(req->wrk->v1l); VDP_close(req); }
static int cnt_prepresp(struct worker *wrk, struct req *req) { struct busyobj *bo; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); bo = req->busyobj; CHECK_OBJ_ORNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(req->obj, OBJECT_MAGIC); CHECK_OBJ_NOTNULL(req->vcl, VCL_CONF_MAGIC); req->res_mode = 0; if (bo == NULL) { if (!req->disable_esi && req->obj->esidata != NULL) { /* In ESI mode, we can't know the aggregate length */ req->res_mode &= ~RES_LEN; req->res_mode |= RES_ESI; } else { req->res_mode |= RES_LEN; } } else { AZ(bo->do_esi); } if (req->esi_level > 0) { /* Included ESI object, always CHUNKED or EOF */ req->res_mode &= ~RES_LEN; req->res_mode |= RES_ESI_CHILD; } if (cache_param->http_gzip_support && req->obj->gziped && !RFC2616_Req_Gzip(req->http)) { /* * We don't know what it uncompresses to * XXX: we could cache that */ req->res_mode &= ~RES_LEN; req->res_mode |= RES_GUNZIP; } if (!(req->res_mode & (RES_LEN|RES_CHUNKED|RES_EOF))) { /* We havn't chosen yet, do so */ if (!req->wantbody) { /* Nothing */ } else if (req->http->protover >= 11) { req->res_mode |= RES_CHUNKED; } else { req->res_mode |= RES_EOF; req->doclose = SC_TX_EOF; } } req->t_resp = W_TIM_real(wrk); if (req->obj->objcore->objhead != NULL) { if ((req->t_resp - req->obj->last_lru) > cache_param->lru_timeout && EXP_Touch(req->obj->objcore)) req->obj->last_lru = req->t_resp; if (!cache_param->obj_readonly) req->obj->last_use = req->t_resp; /* XXX: locking ? */ } HTTP_Setup(req->resp, req->ws, req->vsl, HTTP_Resp); RES_BuildHttp(req); VCL_deliver_method(req); switch (req->handling) { case VCL_RET_DELIVER: break; case VCL_RET_RESTART: if (req->restarts >= cache_param->max_restarts) break; if (bo != NULL) { AN(bo->do_stream); (void)HSH_Deref(&wrk->stats, NULL, &req->obj); VBO_DerefBusyObj(wrk, &req->busyobj); } else { (void)HSH_Deref(&wrk->stats, NULL, &req->obj); } AZ(req->obj); http_Teardown(req->resp); req->req_step = R_STP_RESTART; return (0); default: WRONG("Illegal action in vcl_deliver{}"); } req->req_step = R_STP_DELIVER; return (0); }
enum req_fsm_nxt CNT_Request(struct worker *wrk, struct req *req) { enum req_fsm_nxt nxt; struct storage *st; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * Possible entrance states */ assert( req->req_step == R_STP_LOOKUP || req->req_step == R_STP_RECV); AN(req->vsl->wid & VSL_CLIENTMARKER); req->wrk = wrk; for (nxt = REQ_FSM_MORE; nxt == REQ_FSM_MORE; ) { /* * This is a good place to be paranoid about the various * pointers still pointing to the things we expect. */ CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * We don't want the thread workspace to be used for * anything of long duration, so mandate that it be * empty on state-transitions. */ WS_Assert(wrk->aws); assert(wrk->aws->s == wrk->aws->f); switch (req->req_step) { #define REQ_STEP(l,u,arg) \ case R_STP_##u: \ if (DO_DEBUG(DBG_REQ_STATE)) \ cnt_diag(req, #u); \ nxt = cnt_##l arg; \ break; #include "tbl/steps.h" #undef REQ_STEP default: WRONG("State engine misfire"); } WS_Assert(wrk->aws); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); } if (nxt == REQ_FSM_DONE) { AN(req->vsl->wid); if (req->res_mode & (RES_ESI|RES_ESI_CHILD)) VSLb(req->vsl, SLT_ESI_BodyBytes, "%ju", (uintmax_t)req->resp_bodybytes); while (!VTAILQ_EMPTY(&req->body->list)) { st = VTAILQ_FIRST(&req->body->list); VTAILQ_REMOVE(&req->body->list, st, list); STV_free(st); } req->wrk = NULL; } assert(WRW_IsReleased(wrk)); return (nxt); }