void HSH_DeleteObjHead(struct worker *wrk, struct objhead *oh) { AZ(oh->refcnt); assert(VTAILQ_EMPTY(&oh->objcs)); assert(VTAILQ_EMPTY(&oh->waitinglist)); Lck_Delete(&oh->mtx); wrk->stats->n_objecthead--; FREE_OBJ(oh); }
void VRT_rel_vcl(VRT_CTX, struct vclref **refp) { struct vcl *vcl; struct vclref *ref; AN(refp); ref = *refp; *refp = NULL; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(ref, VCLREF_MAGIC); vcl = ctx->vcl; CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); assert(vcl == ref->vcl); /* NB: A VCL may be released by a VMOD at any time, but it must happen * after a warmup and before the end of a cooldown. The release may or * may not happen while the same thread holds the temperature lock, so * instead we check that all references are gone in VCL_Nuke. */ Lck_Lock(&vcl_mtx); assert(!VTAILQ_EMPTY(&vcl->ref_list)); VTAILQ_REMOVE(&vcl->ref_list, ref, list); vcl->nrefs--; /* No garbage collection here, for the same reasons as in VCL_Rel. */ Lck_Unlock(&vcl_mtx); FREE_OBJ(ref); }
void SES_Delete(struct sess *sp, enum sess_close reason, double now) { CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); if (reason != SC_NULL) SES_Close(sp, reason); assert(sp->fd < 0); if (isnan(now)) now = VTIM_real(); AZ(isnan(sp->t_open)); if (now < sp->t_open) { VSL(SLT_Debug, sp->vxid, "Clock step (now=%f < t_open=%f)", now, sp->t_open); if (now + cache_param->clock_step < sp->t_open) WRONG("Clock step detected"); now = sp->t_open; /* Do not log negatives */ } if (reason == SC_NULL) reason = (enum sess_close)-sp->fd; assert(VTAILQ_EMPTY(&sp->privs->privs)); VSL(SLT_SessClose, sp->vxid, "%s %.3f", sess_close_2str(reason, 0), now - sp->t_open); VSL(SLT_End, sp->vxid, "%s", ""); SES_Rel(sp); }
static struct pool * pool_mkpool(unsigned pool_no) { struct pool *pp; ALLOC_OBJ(pp, POOL_MAGIC); if (pp == NULL) return (NULL); pp->a_stat = calloc(1, sizeof *pp->a_stat); AN(pp->a_stat); pp->b_stat = calloc(1, sizeof *pp->b_stat); AN(pp->b_stat); Lck_New(&pp->mtx, lck_wq); VTAILQ_INIT(&pp->idle_queue); VTAILQ_INIT(&pp->front_queue); VTAILQ_INIT(&pp->back_queue); AZ(pthread_cond_init(&pp->herder_cond, NULL)); AZ(pthread_create(&pp->herder_thr, NULL, pool_herder, pp)); while (VTAILQ_EMPTY(&pp->idle_queue)) (void)usleep(10000); pp->sesspool = SES_NewPool(pp, pool_no); AN(pp->sesspool); return (pp); }
void SES_Delete(struct sess *sp, enum sess_close reason, double now) { struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); if (reason != SC_NULL) SES_Close(sp, reason); assert(sp->fd < 0); if (isnan(now)) now = VTIM_real(); AZ(isnan(sp->t_open)); if (reason == SC_NULL) reason = (enum sess_close)-sp->fd; assert(now >= sp->t_open); assert(VTAILQ_EMPTY(&sp->privs->privs)); VSL(SLT_SessClose, sp->vxid, "%s %.3f", sess_close_2str(reason, 0), now - sp->t_open); VSL(SLT_End, sp->vxid, "%s", ""); Lck_Delete(&sp->mtx); MPL_Free(pp->mpl_sess, sp); }
void VDP_close(struct req *req) { CHECK_OBJ_NOTNULL(req, REQ_MAGIC); while (!VTAILQ_EMPTY(&req->vdp)) vdp_pop(req, VTAILQ_FIRST(&req->vdp)->func); }
void HSH_DeleteObjHead(struct dstat *ds, struct objhead *oh) { AZ(oh->refcnt); assert(VTAILQ_EMPTY(&oh->objcs)); Lck_Delete(&oh->mtx); ds->n_objecthead--; FREE_OBJ(oh); }
static void pan_busyobj(struct vsb *vsb, const struct busyobj *bo) { struct vfp_entry *vfe; const char *p; VSB_printf(vsb, "busyobj = %p {\n", bo); VSB_indent(vsb, 2); pan_ws(vsb, bo->ws); VSB_printf(vsb, "refcnt = %u,\n", bo->refcount); VSB_printf(vsb, "retries = %d, ", bo->retries); VSB_printf(vsb, "failed = %d, ", bo->vfc->failed); VSB_printf(vsb, "state = %d,\n", (int)bo->state); VSB_printf(vsb, "flags = {"); p = ""; /*lint -save -esym(438,p) */ #define BO_FLAG(l, r, w, d) \ if(bo->l) { VSB_printf(vsb, "%s" #l, p); p = ", "; } #include "tbl/bo_flags.h" #undef BO_FLAG /*lint -restore */ VSB_printf(vsb, "},\n"); if (VALID_OBJ(bo->htc, HTTP_CONN_MAGIC)) pan_htc(vsb, bo->htc); if (!VTAILQ_EMPTY(&bo->vfc->vfp)) { VSB_printf(vsb, "filters ="); VTAILQ_FOREACH(vfe, &bo->vfc->vfp, list) VSB_printf(vsb, " %s=%d", vfe->vfp->name, (int)vfe->closed); VSB_printf(vsb, "\n"); } VDI_Panic(bo->director_req, vsb, "director_req"); if (bo->director_resp == bo->director_req) VSB_printf(vsb, "director_resp = director_req,\n"); else VDI_Panic(bo->director_resp, vsb, "director_resp"); if (bo->bereq != NULL && bo->bereq->ws != NULL) pan_http(vsb, "bereq", bo->bereq); if (bo->beresp != NULL && bo->beresp->ws != NULL) pan_http(vsb, "beresp", bo->beresp); if (bo->fetch_objcore) pan_objcore(vsb, "fetch", bo->fetch_objcore); if (bo->stale_oc) pan_objcore(vsb, "ims", bo->stale_oc); VCL_Panic(vsb, bo->vcl); VSB_indent(vsb, -2); VSB_printf(vsb, "},\n"); }
static void vsl_IX_free(vslf_list *list) { struct vslf *vslf; while (!VTAILQ_EMPTY(list)) { vslf = VTAILQ_FIRST(list); CHECK_OBJ_NOTNULL(vslf, VSLF_MAGIC); VTAILQ_REMOVE(list, vslf, list); AN(vslf->vre); VRE_free(&vslf->vre); AZ(vslf->vre); } }
static void vsl_IX_free(vslf_list *filters) { struct vslf *vslf; while (!VTAILQ_EMPTY(filters)) { vslf = VTAILQ_FIRST(filters); CHECK_OBJ_NOTNULL(vslf, VSLF_MAGIC); VTAILQ_REMOVE(filters, vslf, list); if (vslf->tags) vbit_destroy(vslf->tags); AN(vslf->vre); VRE_free(&vslf->vre); AZ(vslf->vre); } }
static void vxp_Delete(struct vxp **pvxp) { struct vxp *vxp; struct membit *mb; TAKE_OBJ_NOTNULL(vxp, pvxp, VXP_MAGIC); while (!VTAILQ_EMPTY(&vxp->membits)) { mb = VTAILQ_FIRST(&vxp->membits); VTAILQ_REMOVE(&vxp->membits, mb, list); free(mb->ptr); free(mb); } FREE_OBJ(vxp); }
void SES_DeletePool(struct sesspool *sp, struct worker *wrk) { struct sessmem *sm; CHECK_OBJ_NOTNULL(sp, SESSPOOL_MAGIC); CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); Lck_Lock(&sp->mtx); while (!VTAILQ_EMPTY(&sp->freelist)) { sm = VTAILQ_FIRST(&sp->freelist); CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC); VTAILQ_REMOVE(&sp->freelist, sm, list); FREE_OBJ(sm); wrk->stats.sessmem_free++; sp->nsess--; } AZ(sp->nsess); Lck_Unlock(&sp->mtx); Lck_Delete(&sp->mtx); FREE_OBJ(sp); }
static void pan_busyobj(const struct busyobj *bo) { struct vfp_entry *vfe; VSB_printf(pan_vsp, " busyobj = %p {\n", bo); pan_ws(bo->ws, 4); VSB_printf(pan_vsp, " refcnt = %u\n", bo->refcount); VSB_printf(pan_vsp, " retries = %d\n", bo->retries); VSB_printf(pan_vsp, " failed = %d\n", bo->vfc->failed); VSB_printf(pan_vsp, " state = %d\n", (int)bo->state); #define BO_FLAG(l, r, w, d) if(bo->l) VSB_printf(pan_vsp, " is_" #l "\n"); #include "tbl/bo_flags.h" #undef BO_FLAG VSB_printf(pan_vsp, " bodystatus = %d (%s),\n", bo->htc.body_status, body_status_2str(bo->htc.body_status)); if (!VTAILQ_EMPTY(&bo->vfc->vfp)) { VSB_printf(pan_vsp, " filters ="); VTAILQ_FOREACH(vfe, &bo->vfc->vfp, list) VSB_printf(pan_vsp, " %s=%d", vfe->vfp->name, (int)vfe->closed); VSB_printf(pan_vsp, "\n"); } VSB_printf(pan_vsp, " },\n"); if (VALID_OBJ(bo->vbc, BACKEND_MAGIC)) pan_vbc(bo->vbc); if (bo->bereq->ws != NULL) pan_http("bereq", bo->bereq, 4); if (bo->beresp->ws != NULL) pan_http("beresp", bo->beresp, 4); pan_ws(bo->ws_o, 4); if (bo->fetch_objcore) pan_objcore("FETCH", bo->fetch_objcore); if (bo->fetch_obj) pan_object("FETCH", bo->fetch_obj); if (bo->ims_obj) pan_object("IMS", bo->ims_obj); VSB_printf(pan_vsp, " }\n"); }
void mgt_cli_master(const char *M_arg) { const char *err; int error; AN(M_arg); error = VSS_resolver(M_arg, NULL, marg_cb, NULL, &err); if (err != NULL) ARGV_ERR("Could resolve -M argument to address\n\t%s\n", err); AZ(error); if (VTAILQ_EMPTY(&m_addr_list)) ARGV_ERR("Could not resolve -M argument to address\n"); AZ(M_poker); M_poker = vev_new(); AN(M_poker); M_poker->timeout = M_poll; M_poker->callback = Marg_poker; M_poker->name = "-M poker"; AZ(vev_add(mgt_evb, M_poker)); }
enum req_fsm_nxt CNT_Request(struct worker *wrk, struct req *req) { enum req_fsm_nxt nxt; struct storage *st; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * Possible entrance states */ assert( req->req_step == R_STP_LOOKUP || req->req_step == R_STP_RECV); AN(req->vsl->wid & VSL_CLIENTMARKER); req->wrk = wrk; for (nxt = REQ_FSM_MORE; nxt == REQ_FSM_MORE; ) { /* * This is a good place to be paranoid about the various * pointers still pointing to the things we expect. */ CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * We don't want the thread workspace to be used for * anything of long duration, so mandate that it be * empty on state-transitions. */ WS_Assert(wrk->aws); assert(wrk->aws->s == wrk->aws->f); switch (req->req_step) { #define REQ_STEP(l,u,arg) \ case R_STP_##u: \ if (DO_DEBUG(DBG_REQ_STATE)) \ cnt_diag(req, #u); \ nxt = cnt_##l arg; \ break; #include "tbl/steps.h" #undef REQ_STEP default: WRONG("State engine misfire"); } WS_Assert(wrk->aws); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); } if (nxt == REQ_FSM_DONE) { AN(req->vsl->wid); if (req->res_mode & (RES_ESI|RES_ESI_CHILD)) VSLb(req->vsl, SLT_ESI_BodyBytes, "%ju", (uintmax_t)req->resp_bodybytes); while (!VTAILQ_EMPTY(&req->body->list)) { st = VTAILQ_FIRST(&req->body->list); VTAILQ_REMOVE(&req->body->list, st, list); STV_free(st); } req->wrk = NULL; } assert(WRW_IsReleased(wrk)); return (nxt); }
int FetchBody(struct worker *wrk, struct object *obj) { int cls; struct storage *st; int mklen; ssize_t cl; struct http_conn *htc; struct busyobj *bo; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); bo = wrk->busyobj; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); AZ(bo->fetch_obj); CHECK_OBJ_NOTNULL(bo->vbc, VBC_MAGIC); CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); CHECK_OBJ_NOTNULL(obj->http, HTTP_MAGIC); htc = &bo->htc; if (bo->vfp == NULL) bo->vfp = &vfp_nop; AssertObjCorePassOrBusy(obj->objcore); AZ(bo->vgz_rx); AZ(VTAILQ_FIRST(&obj->store)); bo->fetch_obj = obj; bo->fetch_failed = 0; /* XXX: pick up estimate from objdr ? */ cl = 0; switch (bo->body_status) { case BS_NONE: cls = 0; mklen = 0; break; case BS_ZERO: cls = 0; mklen = 1; break; case BS_LENGTH: cl = fetch_number(bo->h_content_length, 10); bo->vfp->begin(wrk, cl > 0 ? cl : 0); cls = fetch_straight(wrk, htc, cl); mklen = 1; if (bo->vfp->end(wrk)) cls = -1; break; case BS_CHUNKED: bo->vfp->begin(wrk, cl); cls = fetch_chunked(wrk, htc); mklen = 1; if (bo->vfp->end(wrk)) cls = -1; break; case BS_EOF: bo->vfp->begin(wrk, cl); cls = fetch_eof(wrk, htc); mklen = 1; if (bo->vfp->end(wrk)) cls = -1; break; case BS_ERROR: cls = 1; mklen = 0; break; default: cls = 0; mklen = 0; INCOMPL(); } AZ(bo->vgz_rx); /* * It is OK for ->end to just leave the last storage segment * sitting on wrk->storage, we will always call vfp_nop_end() * to get it trimmed or thrown out if empty. */ AZ(vfp_nop_end(wrk)); bo->fetch_obj = NULL; WSLB(wrk, SLT_Fetch_Body, "%u(%s) cls %d mklen %d", bo->body_status, body_status(bo->body_status), cls, mklen); if (bo->body_status == BS_ERROR) { VDI_CloseFd(wrk, &bo->vbc); return (__LINE__); } if (cls < 0) { wrk->stats.fetch_failed++; /* XXX: Wouldn't this store automatically be released ? */ while (!VTAILQ_EMPTY(&obj->store)) { st = VTAILQ_FIRST(&obj->store); VTAILQ_REMOVE(&obj->store, st, list); STV_free(st); } VDI_CloseFd(wrk, &bo->vbc); obj->len = 0; return (__LINE__); } AZ(bo->fetch_failed); if (cls == 0 && bo->should_close) cls = 1; WSLB(wrk, SLT_Length, "%zd", obj->len); { /* Sanity check fetch methods accounting */ ssize_t uu; uu = 0; VTAILQ_FOREACH(st, &obj->store, list) uu += st->len; if (bo->do_stream) /* Streaming might have started freeing stuff */ assert (uu <= obj->len); else assert(uu == obj->len); } if (mklen > 0) { http_Unset(obj->http, H_Content_Length); http_PrintfHeader(wrk, bo->vbc->vsl_id, obj->http, "Content-Length: %zd", obj->len); } if (cls) VDI_CloseFd(wrk, &bo->vbc); else VDI_RecycleFd(wrk, &bo->vbc); return (0); }
void VFP_Fetch_Body(struct busyobj *bo, ssize_t est) { ssize_t l; enum vfp_status vfps = VFP_ERROR; struct storage *st = NULL; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); AN(bo->vfp_nxt); if (est < 0) est = 0; if (vfp_suck_init(bo) != VFP_OK) { (void)VFP_Error(bo, "Fetch Pipeline failed to initialize"); bo->should_close = 1; return; } do { if (bo->abandon) { /* * A pass object and delivery was terminted * We don't fail the fetch, in order for hit-for-pass * objects to be created. */ AN(bo->fetch_objcore->flags & OC_F_PASS); VSLb(bo->vsl, SLT_FetchError, "Pass delivery abandoned"); vfps = VFP_END; bo->should_close = 1; break; } AZ(bo->failed); if (st == NULL) { st = VFP_GetStorage(bo, est); est = 0; } if (st == NULL) { bo->should_close = 1; (void)VFP_Error(bo, "Out of storage"); break; } CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); assert(st == VTAILQ_LAST(&bo->fetch_obj->store, storagehead)); l = st->space - st->len; AZ(bo->failed); vfps = VFP_Suck(bo, st->ptr + st->len, &l); if (l > 0 && vfps != VFP_ERROR) { AZ(VTAILQ_EMPTY(&bo->fetch_obj->store)); VBO_extend(bo, l); } if (st->len == st->space) st = NULL; } while (vfps == VFP_OK); if (vfps == VFP_ERROR) { AN(bo->failed); (void)VFP_Error(bo, "Fetch Pipeline failed to process"); bo->should_close = 1; } vfp_suck_fini(bo); /* * Trim or delete the last segment, if any */ st = VTAILQ_LAST(&bo->fetch_obj->store, storagehead); /* XXX: Temporary: Only trim if we are not streaming */ if (st != NULL && !bo->do_stream) { /* None of this this is safe under streaming */ if (st->len == 0) { VTAILQ_REMOVE(&bo->fetch_obj->store, st, list); STV_free(st); } else if (st->len < st->space) { STV_trim(st, st->len, 1); } } }
enum req_fsm_nxt CNT_Request(struct worker *wrk, struct req *req) { enum req_fsm_nxt nxt; struct storage *st; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * Possible entrance states */ assert( req->req_step == R_STP_LOOKUP || req->req_step == R_STP_RECV); AN(req->vsl->wid & VSL_CLIENTMARKER); req->wrk = wrk; for (nxt = REQ_FSM_MORE; nxt == REQ_FSM_MORE; ) { /* * This is a good place to be paranoid about the various * pointers still pointing to the things we expect. */ CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); /* * We don't want the thread workspace to be used for * anything of long duration, so mandate that it be * empty on state-transitions. */ WS_Assert(wrk->aws); assert(wrk->aws->s == wrk->aws->f); switch (req->req_step) { #define REQ_STEP(l,u,arg) \ case R_STP_##u: \ if (DO_DEBUG(DBG_REQ_STATE)) \ cnt_diag(req, #u); \ nxt = cnt_##l arg; \ break; #include "tbl/steps.h" #undef REQ_STEP default: WRONG("State engine misfire"); } WS_Assert(wrk->aws); CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC); } if (nxt == REQ_FSM_DONE) { /* XXX: Workaround for pipe */ if (req->sp->fd >= 0) { VSLb(req->vsl, SLT_Length, "%ju", (uintmax_t)req->resp_bodybytes); } VSLb(req->vsl, SLT_ReqEnd, "%.9f %.9f %.9f %.9f %.9f", req->t_req, req->sp->t_idle, req->sp->t_idle - req->t_resp, req->t_resp - req->t_req, req->sp->t_idle - req->t_resp); while (!VTAILQ_EMPTY(&req->body)) { st = VTAILQ_FIRST(&req->body); VTAILQ_REMOVE(&req->body, st, list); STV_free(st); } /* done == 2 was charged by cache_hash.c */ SES_Charge(wrk, req); /* * Nuke the VXID, cache_http1_fsm.c::http1_dissect() will * allocate a new one when necessary. */ VSLb(req->vsl, SLT_End, "%s", ""); req->vsl->wid = 0; } req->wrk = NULL; assert(WRW_IsReleased(wrk)); return (nxt); }
static void * mpl_guard(void *priv) { struct mempool *mpl; struct memitem *mi = NULL; double mpl_slp __state_variable__(mpl_slp); double last = 0; CAST_OBJ_NOTNULL(mpl, priv, MEMPOOL_MAGIC); mpl_slp = 0.15; // random while (1) { VTIM_sleep(mpl_slp); mpl_slp = 0.814; // random mpl->t_now = VTIM_real(); if (mi != NULL && (mpl->n_pool > mpl->param->max_pool || mi->size < *mpl->cur_size)) { FREE_OBJ(mi); mi = NULL; } if (mi == NULL && mpl->n_pool < mpl->param->min_pool) mi = mpl_alloc(mpl); if (mpl->n_pool < mpl->param->min_pool && mi != NULL) { /* can do */ } else if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { /* can do */ } else if (!VTAILQ_EMPTY(&mpl->surplus)) { /* can do */ } else if (last + .1 * mpl->param->max_age < mpl->t_now) { /* should do */ } else if (mpl->self_destruct) { /* can do */ } else { continue; /* nothing to do */ } mpl_slp = 0.314; // random if (Lck_Trylock(&mpl->mtx)) continue; if (mpl->self_destruct) { AZ(mpl->live); while (1) { if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); if (mi != NULL) { mpl->vsc->pool = --mpl->n_pool; VTAILQ_REMOVE(&mpl->list, mi, list); } } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) VTAILQ_REMOVE(&mpl->surplus, mi, list); } if (mi == NULL) break; FREE_OBJ(mi); mi = NULL; } VSM_Free(mpl->vsc); Lck_Unlock(&mpl->mtx); Lck_Delete(&mpl->mtx); FREE_OBJ(mpl); break; } if (mpl->n_pool < mpl->param->min_pool && mi != NULL && mi->size >= *mpl->cur_size) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = ++mpl->n_pool; mi->touched = mpl->t_now; VTAILQ_INSERT_HEAD(&mpl->list, mi, list); mi = NULL; mpl_slp = .01; // random } if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = --mpl->n_pool; mpl->vsc->surplus++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); VTAILQ_REMOVE(&mpl->surplus, mi, list); mpl_slp = .01; // random } } if (mi == NULL && mpl->n_pool > mpl->param->min_pool) { mi = VTAILQ_LAST(&mpl->list, memhead_s); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); if (mi->touched + mpl->param->max_age < mpl->t_now) { mpl->vsc->pool = --mpl->n_pool; mpl->vsc->timeout++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } else { mi = NULL; last = mpl->t_now; } } else if (mpl->n_pool <= mpl->param->min_pool) { last = mpl->t_now; } Lck_Unlock(&mpl->mtx); if (mi != NULL) { FREE_OBJ(mi); mi = NULL; } } return (NULL); }
void V1F_fetch_body(struct worker *wrk, struct busyobj *bo) { int cls; struct storage *st; ssize_t cl; struct http_conn *htc; struct object *obj; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); htc = &bo->htc; CHECK_OBJ_ORNULL(bo->vbc, VBC_MAGIC); obj = bo->fetch_obj; CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); CHECK_OBJ_NOTNULL(obj->http, HTTP_MAGIC); assert(bo->state == BOS_FETCHING); /* * XXX: The busyobj needs a dstat, but it is not obvious which one * XXX: it should be (own/borrowed). For now borrow the wrk's. */ AZ(bo->stats); bo->stats = &wrk->stats; AN(bo->vfp); AZ(bo->vgz_rx); assert(VTAILQ_EMPTY(&obj->store)); /* XXX: pick up estimate from objdr ? */ cl = 0; cls = bo->should_close; switch (htc->body_status) { case BS_NONE: break; case BS_ZERO: break; case BS_LENGTH: cl = vbf_fetch_number(bo->h_content_length, 10); bo->vfp->begin(bo, cl); if (bo->state == BOS_FETCHING && cl > 0) cls |= vbf_fetch_straight(bo, htc, cl); if (bo->vfp->end(bo)) assert(bo->state == BOS_FAILED); break; case BS_CHUNKED: bo->vfp->begin(bo, cl > 0 ? cl : 0); if (bo->state == BOS_FETCHING) cls |= vbf_fetch_chunked(bo, htc); if (bo->vfp->end(bo)) assert(bo->state == BOS_FAILED); break; case BS_EOF: bo->vfp->begin(bo, cl > 0 ? cl : 0); if (bo->state == BOS_FETCHING) vbf_fetch_eof(bo, htc); cls = 1; if (bo->vfp->end(bo)) assert(bo->state == BOS_FAILED); break; case BS_ERROR: cls |= VFP_Error(bo, "error incompatible Transfer-Encoding"); break; default: INCOMPL(); } bo->t_body = VTIM_mono(); AZ(bo->vgz_rx); /* * Trim or delete the last segment, if any */ st = VTAILQ_LAST(&bo->fetch_obj->store, storagehead); /* XXX: Temporary: Only trim if we are not streaming */ if (st != NULL && !bo->do_stream) { /* XXX: is any of this safe under streaming ? */ if (st->len == 0) { VTAILQ_REMOVE(&bo->fetch_obj->store, st, list); STV_free(st); } else if (st->len < st->space) { STV_trim(st, st->len, 1); } } bo->vfp = NULL; VSLb(bo->vsl, SLT_Fetch_Body, "%u(%s) cls %d", htc->body_status, body_status_2str(htc->body_status), cls); http_Teardown(bo->bereq); http_Teardown(bo->beresp); if (bo->vbc != NULL) { if (cls) VDI_CloseFd(&bo->vbc); else VDI_RecycleFd(&bo->vbc); } AZ(bo->vbc); if (bo->state == BOS_FAILED) { wrk->stats.fetch_failed++; } else { assert(bo->state == BOS_FETCHING); VSLb(bo->vsl, SLT_Length, "%zd", obj->len); { /* Sanity check fetch methods accounting */ ssize_t uu; uu = 0; VTAILQ_FOREACH(st, &obj->store, list) uu += st->len; if (bo->do_stream) /* Streaming might have started freeing stuff */ assert(uu <= obj->len); else assert(uu == obj->len); } } bo->stats = NULL; }
static enum fetch_step vbf_stp_error(struct worker *wrk, struct busyobj *bo) { ssize_t l, ll, o; double now; uint8_t *ptr; struct vsb *synth_body; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC); AN(bo->fetch_objcore->flags & OC_F_BUSY); assert(bo->director_state == DIR_S_NULL); wrk->stats->fetch_failed++; now = W_TIM_real(wrk); VSLb_ts_busyobj(bo, "Error", now); if (bo->fetch_objcore->stobj->stevedore != NULL) ObjFreeObj(bo->wrk, bo->fetch_objcore); if (bo->storage == NULL) bo->storage = STV_next(); // XXX: reset all beresp flags ? HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); http_PutResponse(bo->beresp, "HTTP/1.1", 503, "Backend fetch failed"); http_TimeHeader(bo->beresp, "Date: ", now); http_SetHeader(bo->beresp, "Server: Varnish"); bo->fetch_objcore->t_origin = now; if (!VTAILQ_EMPTY(&bo->fetch_objcore->objhead->waitinglist)) { /* * If there is a waitinglist, it means that there is no * grace-able object, so cache the error return for a * short time, so the waiting list can drain, rather than * each objcore on the waiting list sequentially attempt * to fetch from the backend. */ bo->fetch_objcore->ttl = 1; bo->fetch_objcore->grace = 5; bo->fetch_objcore->keep = 5; } else { bo->fetch_objcore->ttl = 0; bo->fetch_objcore->grace = 0; bo->fetch_objcore->keep = 0; } synth_body = VSB_new_auto(); AN(synth_body); VCL_backend_error_method(bo->vcl, wrk, NULL, bo, synth_body); AZ(VSB_finish(synth_body)); if (wrk->handling == VCL_RET_ABANDON || wrk->handling == VCL_RET_FAIL) { VSB_destroy(&synth_body); return (F_STP_FAIL); } if (wrk->handling == VCL_RET_RETRY) { VSB_destroy(&synth_body); if (bo->retries++ < cache_param->max_retries) return (F_STP_RETRY); VSLb(bo->vsl, SLT_VCL_Error, "Too many retries, failing"); return (F_STP_FAIL); } assert(wrk->handling == VCL_RET_DELIVER); assert(bo->vfc->wrk == bo->wrk); assert(bo->vfc->oc == bo->fetch_objcore); assert(bo->vfc->resp == bo->beresp); assert(bo->vfc->req == bo->bereq); if (vbf_beresp2obj(bo)) { (void)VFP_Error(bo->vfc, "Could not get storage"); VSB_destroy(&synth_body); return (F_STP_FAIL); } ll = VSB_len(synth_body); o = 0; while (ll > 0) { l = ll; if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK) break; if (l > ll) l = ll; memcpy(ptr, VSB_data(synth_body) + o, l); VFP_Extend(bo->vfc, l); ll -= l; o += l; } AZ(ObjSetU64(wrk, bo->fetch_objcore, OA_LEN, o)); VSB_destroy(&synth_body); HSH_Unbusy(wrk, bo->fetch_objcore); ObjSetState(wrk, bo->fetch_objcore, BOS_FINISHED); return (F_STP_DONE); }
int FetchBody(struct sess *sp) { int cls; struct storage *st; struct worker *w; int mklen; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); CHECK_OBJ_NOTNULL(sp->wrk, WORKER_MAGIC); w = sp->wrk; CHECK_OBJ_NOTNULL(sp->obj, OBJECT_MAGIC); CHECK_OBJ_NOTNULL(sp->obj->http, HTTP_MAGIC); if (w->vfp == NULL) w->vfp = &vfp_nop; AN(sp->director); AssertObjCorePassOrBusy(sp->obj->objcore); AZ(w->vgz_rx); AZ(VTAILQ_FIRST(&sp->obj->store)); switch (w->body_status) { case BS_NONE: cls = 0; mklen = 0; break; case BS_ZERO: cls = 0; mklen = 1; break; case BS_LENGTH: cls = fetch_straight(sp, w->htc, w->h_content_length); mklen = 1; XXXAZ(w->vfp->end(sp)); break; case BS_CHUNKED: cls = fetch_chunked(sp, w->htc); mklen = 1; XXXAZ(w->vfp->end(sp)); break; case BS_EOF: cls = fetch_eof(sp, w->htc); mklen = 1; XXXAZ(w->vfp->end(sp)); break; case BS_ERROR: cls = 1; mklen = 0; break; default: cls = 0; mklen = 0; INCOMPL(); } AZ(w->vgz_rx); /* * It is OK for ->end to just leave the last storage segment * sitting on w->storage, we will always call vfp_nop_end() * to get it trimmed or thrown out if empty. */ AZ(vfp_nop_end(sp)); WSL(w, SLT_Fetch_Body, sp->vbc->vsl_id, "%u(%s) cls %d mklen %u", w->body_status, body_status(w->body_status), cls, mklen); if (w->body_status == BS_ERROR) { VDI_CloseFd(sp); return (__LINE__); } if (cls < 0) { w->stats.fetch_failed++; /* XXX: Wouldn't this store automatically be released ? */ while (!VTAILQ_EMPTY(&sp->obj->store)) { st = VTAILQ_FIRST(&sp->obj->store); VTAILQ_REMOVE(&sp->obj->store, st, list); STV_free(st); } VDI_CloseFd(sp); sp->obj->len = 0; return (__LINE__); } if (cls == 0 && w->do_close) cls = 1; WSL(w, SLT_Length, sp->vbc->vsl_id, "%u", sp->obj->len); { /* Sanity check fetch methods accounting */ ssize_t uu; uu = 0; VTAILQ_FOREACH(st, &sp->obj->store, list) uu += st->len; if (sp->objcore == NULL || (sp->objcore->flags & OC_F_PASS)) /* Streaming might have started freeing stuff */ assert (uu <= sp->obj->len); else assert(uu == sp->obj->len); } if (mklen > 0) { http_Unset(sp->obj->http, H_Content_Length); http_PrintfHeader(w, sp->vsl_id, sp->obj->http, "Content-Length: %jd", (intmax_t)sp->obj->len); } if (cls) VDI_CloseFd(sp); else VDI_RecycleFd(sp); return (0); }