int SES_Schedule(struct sess *sp) { struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); AZ(sp->wrk); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); AZ(sp->wrk); sp->task.func = ses_pool_task; sp->task.priv = sp; if (Pool_Task(pp->pool, &sp->task, POOL_QUEUE_FRONT)) { VSC_C_main->client_drop_late++; sp->t_idle = VTIM_real(); if (sp->req != NULL && sp->req->vcl != NULL) { /* * A session parked on a busy object can come here * after it wakes up. Loose the VCL reference. */ VCL_Rel(&sp->req->vcl); } SES_Delete(sp, "dropped", sp->t_idle); return (1); } return (0); }
int SES_Schedule(struct sess *sp) { struct sesspool *pp; pp = ses_getpool(sp); AZ(sp->wrk); AN(pp->pool); if (Pool_Schedule(pp->pool, sp)) { VSC_C_main->client_drop_late++; sp->t_idle = VTIM_real(); if (sp->req->vcl != NULL) { /* * A session parked on a busy object can come here * after it wakes up. Loose the VCL reference. */ VCL_Rel(&sp->req->vcl); } SES_Delete(sp, "dropped", sp->t_idle); return (1); } return (0); }
static void ses_pool_task(struct worker *wrk, void *arg) { struct sess *sp; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(sp, arg, SESS_MAGIC); AZ(wrk->aws->r); wrk->lastused = NAN; THR_SetSession(sp); if (wrk->sp == NULL) wrk->sp = sp; else assert(wrk->sp == sp); AZ(sp->wrk); sp->wrk = wrk; CNT_Session(sp); sp = NULL; /* Cannot access sp now */ THR_SetSession(NULL); wrk->sp = NULL; WS_Assert(wrk->aws); AZ(wrk->busyobj); AZ(wrk->wrw); assert(wrk->vsl->wlp == wrk->vsl->wlb); if (cache_param->diag_bitmap & 0x00040000) { if (wrk->vcl != NULL) VCL_Rel(&wrk->vcl); } }
void VBO_DerefBusyObj(struct worker *wrk, struct busyobj **pbo) { struct busyobj *bo; struct objcore *oc = NULL; unsigned r; CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC); AN(pbo); bo = *pbo; *pbo = NULL; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_ORNULL(bo->fetch_objcore, OBJCORE_MAGIC); CHECK_OBJ_ORNULL(bo->fetch_obj, OBJECT_MAGIC); if (bo->fetch_objcore != NULL) { oc = bo->fetch_objcore; CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC); Lck_Lock(&oc->objhead->mtx); assert(bo->refcount > 0); r = --bo->refcount; Lck_Unlock(&oc->objhead->mtx); } else { Lck_Lock(&bo->mtx); assert(bo->refcount > 0); r = --bo->refcount; Lck_Unlock(&bo->mtx); } if (r) return; VSLb(bo->vsl, SLT_BereqAcct, "%ju %ju %ju %ju %ju %ju", (uintmax_t)bo->acct.bereq_hdrbytes, (uintmax_t)bo->acct.bereq_bodybytes, (uintmax_t)(bo->acct.bereq_hdrbytes + bo->acct.bereq_bodybytes), (uintmax_t)bo->acct.beresp_hdrbytes, (uintmax_t)bo->acct.beresp_bodybytes, (uintmax_t)(bo->acct.beresp_hdrbytes + bo->acct.beresp_bodybytes)); VSL_End(bo->vsl); if (bo->fetch_objcore != NULL) { AN(wrk); (void)HSH_DerefObjCore(&wrk->stats, &bo->fetch_objcore); } VCL_Rel(&bo->vcl); if (bo->vary != NULL) free(bo->vary); memset(&bo->refcount, 0, sizeof *bo - offsetof(struct busyobj, refcount)); if (cache_param->bo_cache && wrk != NULL && wrk->nbo == NULL) wrk->nbo = bo; else VBO_Free(&bo); }
static void * wrk_thread_real(void *priv, unsigned thread_workspace) { struct worker *w, ww; unsigned char ws[thread_workspace]; THR_SetName("cache-worker"); w = &ww; memset(w, 0, sizeof *w); w->magic = WORKER_MAGIC; w->lastused = NAN; AZ(pthread_cond_init(&w->cond, NULL)); WS_Init(w->aws, "wrk", ws, thread_workspace); VSL(SLT_WorkThread, 0, "%p start", w); Pool_Work_Thread(priv, w); AZ(w->pool); VSL(SLT_WorkThread, 0, "%p end", w); if (w->vcl != NULL) VCL_Rel(&w->vcl); AZ(pthread_cond_destroy(&w->cond)); if (w->nvbo != NULL) VBO_Free(&w->nvbo); HSH_Cleanup(w); WRK_SumStat(w); return (NULL); }
void VRT_vcl_select(VRT_CTX, VCL_VCL vcl) { struct req *req = ctx->req; CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); VCL_TaskLeave(req->vcl, req->privs); VCL_Rel(&req->vcl); vcl_get(&req->vcl, vcl); /* XXX: better logging */ VSLb(ctx->req->vsl, SLT_Debug, "Now using %s VCL", vcl->loaded_name); VCL_TaskEnter(req->vcl, req->privs); }
void VCL_Refresh(struct vcl **vcc) { if (*vcc == vcl_active) return; if (*vcc != NULL) VCL_Rel(vcc); /* XXX: optimize locking */ while (vcl_active == NULL) (void)usleep(100000); vcl_get(vcc, NULL); }
static void * wrk_thread_real(void *priv, unsigned shm_workspace, unsigned sess_workspace, uint16_t nhttp, unsigned http_space, unsigned siov) { struct worker *w, ww; uint32_t wlog[shm_workspace / 4]; /* XXX: can we trust these to be properly aligned ? */ unsigned char ws[sess_workspace]; unsigned char http0[http_space]; unsigned char http1[http_space]; unsigned char http2[http_space]; struct iovec iov[siov]; struct SHA256Context sha256; THR_SetName("cache-worker"); w = &ww; memset(w, 0, sizeof *w); w->magic = WORKER_MAGIC; w->lastused = NAN; w->wlb = w->wlp = wlog; w->wle = wlog + (sizeof wlog) / 4; w->sha256ctx = &sha256; w->bereq = HTTP_create(http0, nhttp); w->beresp = HTTP_create(http1, nhttp); w->resp = HTTP_create(http2, nhttp); w->wrw.iov = iov; w->wrw.siov = siov; w->wrw.ciov = siov; AZ(pthread_cond_init(&w->cond, NULL)); WS_Init(w->ws, "wrk", ws, sess_workspace); VSL(SLT_WorkThread, 0, "%p start", w); Pool_Work_Thread(priv, w); AZ(w->pool); VSL(SLT_WorkThread, 0, "%p end", w); if (w->vcl != NULL) VCL_Rel(&w->vcl); AZ(pthread_cond_destroy(&w->cond)); HSH_Cleanup(w); WRK_SumStat(w); return (NULL); }
static void ses_req_pool_task(struct worker *wrk, void *arg) { struct req *req; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC); THR_SetRequest(req); AZ(wrk->aws->r); wrk->lastused = NAN; HTTP1_Session(wrk, req); WS_Assert(wrk->aws); AZ(wrk->wrw); if (DO_DEBUG(DBG_VCLREL) && wrk->vcl != NULL) VCL_Rel(&wrk->vcl); THR_SetRequest(NULL); }
SES_Proto_Req(struct worker *wrk, void *arg) { struct req *req; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC); THR_SetRequest(req); AZ(wrk->aws->r); if (req->sp->sess_step < S_STP_H1_LAST) { HTTP1_Session(wrk, req); AZ(wrk->v1l); } else { WRONG("Wrong session step"); } WS_Assert(wrk->aws); if (DO_DEBUG(DBG_VCLREL) && wrk->vcl != NULL) VCL_Rel(&wrk->vcl); THR_SetRequest(NULL); }
void WRK_Thread(struct pool *qp, size_t stacksize, unsigned thread_workspace) { struct worker *w, ww; unsigned char ws[thread_workspace]; uintptr_t u; AN(qp); AN(stacksize); AN(thread_workspace); THR_SetName("cache-worker"); w = &ww; INIT_OBJ(w, WORKER_MAGIC); w->lastused = NAN; AZ(pthread_cond_init(&w->cond, NULL)); WS_Init(w->aws, "wrk", ws, thread_workspace); u = getpagesize(); AN(u); u -= 1U; w->stack_start = (((uintptr_t)&qp) + u) & ~u; /* XXX: assuming stack grows down. */ w->stack_end = w->stack_start - stacksize; VSL(SLT_WorkThread, 0, "%p start", w); Pool_Work_Thread(qp, w); AZ(w->pool); VSL(SLT_WorkThread, 0, "%p end", w); if (w->vcl != NULL) VCL_Rel(&w->vcl); AZ(pthread_cond_destroy(&w->cond)); if (w->nbo != NULL) VBO_Free(&w->nbo); HSH_Cleanup(w); Pool_Sumstat(w); }
http1_reembark(struct worker *wrk, struct req *req) { struct sess *sp; sp = req->sp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); http1_setstate(sp, H1BUSY); if (!SES_Reschedule_Req(req)) return; /* Couldn't schedule, ditch */ wrk->stats->busy_wakeup--; wrk->stats->busy_killed++; AN (req->vcl); VCL_Rel(&req->vcl); CNT_AcctLogCharge(wrk->stats, req); Req_Release(req); SES_Delete(sp, SC_OVERLOAD, NAN); DSL(DBG_WAITINGLIST, req->vsl->wid, "kill from waiting list"); usleep(10000); }
int SES_ScheduleReq(struct req *req) { struct sess *sp; struct sesspool *pp; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); sp = req->sp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); sp->task.func = ses_req_pool_task; sp->task.priv = req; if (Pool_Task(pp->pool, &sp->task, POOL_QUEUE_FRONT)) { AN (req->vcl); VCL_Rel(&req->vcl); SES_Delete(sp, SC_OVERLOAD, NAN); return (1); } return (0); }
static enum http1_cleanup_ret http1_cleanup(struct sess *sp, struct worker *wrk, struct req *req) { CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_ORNULL(req->vcl, VCL_CONF_MAGIC); req->director_hint = NULL; req->restarts = 0; AZ(req->esi_level); if (req->vcl != NULL) { if (wrk->vcl != NULL) VCL_Rel(&wrk->vcl); wrk->vcl = req->vcl; req->vcl = NULL; } /* Charge and log byte counters */ AN(req->vsl->wid); CNT_AcctLogCharge(wrk->stats, req); req->req_bodybytes = 0; req->resp_hdrbytes = 0; req->resp_bodybytes = 0; VSL_End(req->vsl); if (!isnan(req->t_prev) && req->t_prev > 0.) sp->t_idle = req->t_prev; else sp->t_idle = W_TIM_real(wrk); req->t_first = NAN; req->t_prev = NAN; req->t_req = NAN; req->req_body_status = REQ_BODY_INIT; req->hash_always_miss = 0; req->hash_ignore_busy = 0; if (sp->fd >= 0 && req->doclose != SC_NULL) SES_Close(sp, req->doclose); if (sp->fd < 0) { wrk->stats->sess_closed++; AZ(req->vcl); SES_ReleaseReq(req); SES_Delete(sp, SC_NULL, NAN); return (SESS_DONE_RET_GONE); } WS_Reset(req->ws, NULL); WS_Reset(wrk->aws, NULL); if (HTTP1_Reinit(req->htc) == HTTP1_COMPLETE) { AZ(req->vsl->wid); req->t_first = req->t_req = sp->t_idle; wrk->stats->sess_pipeline++; req->acct.req_hdrbytes += Tlen(req->htc->rxbuf); return (SESS_DONE_RET_START); } else { if (Tlen(req->htc->rxbuf)) wrk->stats->sess_readahead++; return (SESS_DONE_RET_WAIT); } }
static int cnt_done(struct sess *sp) { double dh, dp, da; int i; struct worker *wrk; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); wrk = sp->wrk; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_ORNULL(sp->vcl, VCL_CONF_MAGIC); AZ(wrk->obj); AZ(wrk->busyobj); sp->director = NULL; sp->restarts = 0; wrk->busyobj = NULL; SES_Charge(sp); /* If we did an ESI include, don't mess up our state */ if (sp->esi_level > 0) return (1); if (sp->vcl != NULL) { if (wrk->vcl != NULL) VCL_Rel(&wrk->vcl); wrk->vcl = sp->vcl; sp->vcl = NULL; } sp->t_end = W_TIM_real(wrk); WSP(sp, SLT_Debug, "PHK req %.9f resp %.9f end %.9f open %.9f", sp->t_req, sp->t_resp, sp->t_end, sp->t_open); if (sp->xid == 0) { // sp->t_req = sp->t_end; sp->t_resp = sp->t_end; } else { dp = sp->t_resp - sp->t_req; da = sp->t_end - sp->t_resp; dh = sp->t_req - sp->t_open; /* XXX: Add StatReq == StatSess */ /* XXX: Workaround for pipe */ if (sp->fd >= 0) { WSP(sp, SLT_Length, "%ju", (uintmax_t)sp->req_bodybytes); } WSP(sp, SLT_ReqEnd, "%u %.9f %.9f %.9f %.9f %.9f", sp->xid, sp->t_req, sp->t_end, dh, dp, da); } sp->xid = 0; WSL_Flush(wrk, 0); sp->t_open = sp->t_end; sp->t_resp = NAN; sp->req_bodybytes = 0; sp->t_req = NAN; sp->hash_always_miss = 0; sp->hash_ignore_busy = 0; if (sp->fd >= 0 && sp->doclose != NULL) { /* * This is an orderly close of the connection; ditch nolinger * before we close, to get queued data transmitted. */ // XXX: not yet (void)VTCP_linger(sp->fd, 0); SES_Close(sp, sp->doclose); } if (sp->fd < 0) { wrk->stats.sess_closed++; SES_Delete(sp, NULL); return (1); } if (wrk->stats.client_req >= cache_param->wthread_stats_rate) WRK_SumStat(wrk); /* Reset the workspace to the session-watermark */ WS_Reset(sp->ws, sp->ws_ses); WS_Reset(wrk->ws, NULL); i = HTC_Reinit(sp->htc); if (i == 1) { wrk->stats.sess_pipeline++; sp->step = STP_START; return (0); } if (Tlen(sp->htc->rxbuf)) { wrk->stats.sess_readahead++; sp->step = STP_WAIT; return (0); } if (cache_param->session_linger > 0) { wrk->stats.sess_linger++; sp->step = STP_WAIT; return (0); } wrk->stats.sess_herd++; Pool_Wait(sp); return (1); }
static int cnt_done(struct sess *sp) { double dh, dp, da; int i; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); CHECK_OBJ_ORNULL(sp->vcl, VCL_CONF_MAGIC); AZ(sp->obj); AZ(sp->vbe); sp->director = NULL; sp->restarts = 0; if (sp->vcl != NULL && sp->esis == 0) { if (sp->wrk->vcl != NULL) VCL_Rel(&sp->wrk->vcl); sp->wrk->vcl = sp->vcl; sp->vcl = NULL; } sp->t_end = TIM_real(); sp->wrk->lastused = sp->t_end; if (sp->xid == 0) { sp->t_req = sp->t_end; sp->t_resp = sp->t_end; } dp = sp->t_resp - sp->t_req; da = sp->t_end - sp->t_resp; dh = sp->t_req - sp->t_open; WSL(sp->wrk, SLT_ReqEnd, sp->id, "%u %.9f %.9f %.9f %.9f %.9f", sp->xid, sp->t_req, sp->t_end, dh, dp, da); sp->xid = 0; sp->t_open = sp->t_end; sp->t_resp = NAN; WSL_Flush(sp->wrk, 0); /* If we did an ESI include, don't mess up our state */ if (sp->esis > 0) return (1); sp->t_req = NAN; if (sp->fd >= 0 && sp->doclose != NULL) { /* * This is an orderly close of the connection; ditch nolinger * before we close, to get queued data transmitted. */ TCP_linger(sp->fd, 0); vca_close_session(sp, sp->doclose); } if (sp->fd < 0) { SES_Charge(sp); VSL_stats->sess_closed++; sp->wrk = NULL; SES_Delete(sp); return (1); } /* Reset the workspace to the session-watermark */ WS_Reset(sp->ws, sp->ws_ses); i = HTC_Reinit(sp->htc); if (i == 1) { VSL_stats->sess_pipeline++; sp->step = STP_START; return (0); } if (Tlen(sp->htc->rxbuf)) { VSL_stats->sess_readahead++; sp->step = STP_WAIT; return (0); } if (params->session_linger > 0) { VSL_stats->sess_linger++; sp->step = STP_WAIT; return (0); } VSL_stats->sess_herd++; SES_Charge(sp); sp->wrk = NULL; vca_return_session(sp); return (1); }
void Pool_Work_Thread(struct pool *pp, struct worker *wrk) { struct pool_task *tp; struct pool_task tpx, tps; int i; CHECK_OBJ_NOTNULL(pp, POOL_MAGIC); wrk->pool = pp; while (1) { Lck_Lock(&pp->mtx); CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); WS_Reset(wrk->aws, NULL); AZ(wrk->vsl); tp = VTAILQ_FIRST(&pp->front_queue); if (tp != NULL) { pp->lqueue--; VTAILQ_REMOVE(&pp->front_queue, tp, list); } else { tp = VTAILQ_FIRST(&pp->back_queue); if (tp != NULL) VTAILQ_REMOVE(&pp->back_queue, tp, list); } if ((tp == NULL && wrk->stats->summs > 0) || (wrk->stats->summs >= cache_param->wthread_stats_rate)) pool_addstat(pp->a_stat, wrk->stats); if (tp != NULL) { wrk->stats->summs++; } else if (pp->b_stat != NULL && pp->a_stat->summs) { /* Nothing to do, push pool stats into global pool */ tps.func = pool_stat_summ; tps.priv = pp->a_stat; pp->a_stat = pp->b_stat; pp->b_stat = NULL; tp = &tps; } else { /* Nothing to do: To sleep, perchance to dream ... */ if (isnan(wrk->lastused)) wrk->lastused = VTIM_real(); wrk->task.func = NULL; wrk->task.priv = wrk; VTAILQ_INSERT_HEAD(&pp->idle_queue, &wrk->task, list); do { i = Lck_CondWait(&wrk->cond, &pp->mtx, wrk->vcl == NULL ? 0 : wrk->lastused+60.); if (i == ETIMEDOUT) VCL_Rel(&wrk->vcl); } while (wrk->task.func == NULL); tpx = wrk->task; tp = &tpx; wrk->stats->summs++; } Lck_Unlock(&pp->mtx); if (tp->func == pool_kiss_of_death) break; do { memset(&wrk->task, 0, sizeof wrk->task); assert(wrk->pool == pp); tp->func(wrk, tp->priv); tpx = wrk->task; tp = &tpx; } while (tp->func != NULL); /* cleanup for next task */ wrk->seen_methods = 0; } wrk->pool = NULL; }