static void vbp_has_poked(struct vbp_target *vt) { unsigned i, j; uint64_t u; const char *logmsg; char bits[10]; CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); /* Calculate exponential average */ if (vt->happy & 1) { if (vt->rate < AVG_RATE) vt->rate += 1.0; vt->avg += (vt->last - vt->avg) / vt->rate; } i = 0; #define BITMAP(n, c, t, b) bits[i++] = (vt->n & 1) ? c : '-'; #include "tbl/backend_poll.h" #undef BITMAP bits[i] = '\0'; u = vt->happy; for (i = j = 0; i < vt->probe.window; i++) { if (u & 1) j++; u >>= 1; } vt->good = j; Lck_Lock(&vt->mtx); if (vt->backend != NULL) { if (vt->good >= vt->probe.threshold) { if (vt->backend->healthy) logmsg = "Still healthy"; else { logmsg = "Back healthy"; vt->backend->health_changed = VTIM_real(); } vt->backend->healthy = 1; } else { if (vt->backend->healthy) { logmsg = "Went sick"; vt->backend->health_changed = VTIM_real(); } else logmsg = "Still sick"; vt->backend->healthy = 0; } VSL(SLT_Backend_health, 0, "%s %s %s %u %u %u %.6f %.6f %s", vt->backend->display_name, logmsg, bits, vt->good, vt->probe.threshold, vt->probe.window, vt->last, vt->avg, vt->resp_buf); if (!vt->disable) { AN(vt->backend->vsc); vt->backend->vsc->happy = vt->happy; } } Lck_Unlock(&vt->mtx); }
static void * vwp_main(void *priv) { int v; struct vwp *vwp; struct waiter *w; struct waited *wp; double now, then; int i; THR_SetName("cache-poll"); CAST_OBJ_NOTNULL(vwp, priv, VWP_MAGIC); w = vwp->waiter; while (1) { then = Wait_HeapDue(w, &wp); if (wp == NULL) i = -1; else i = (int)floor(1e3 * (then - VTIM_real())); assert(vwp->hpoll > 0); AN(vwp->pollfd); v = poll(vwp->pollfd, vwp->hpoll, i); assert(v >= 0); now = VTIM_real(); if (vwp->pollfd[0].revents) v--; for (i = 1; i < vwp->hpoll;) { VSL(SLT_Debug, vwp->pollfd[i].fd, "POLL loop i=%d revents=0x%x", i, vwp->pollfd[i].revents); assert(vwp->pollfd[i].fd != vwp->pipes[0]); wp = vwp->idx[i]; CHECK_OBJ_NOTNULL(wp, WAITED_MAGIC); if (v == 0 && Wait_HeapDue(w, NULL) > now) break; if (vwp->pollfd[i].revents) v--; then = Wait_When(wp); if (then <= now) { Wait_HeapDelete(w, wp); Wait_Call(w, wp, WAITER_TIMEOUT, now); vwp_del(vwp, i); } else if (vwp->pollfd[i].revents & POLLIN) { assert(wp->fd > 0); assert(wp->fd == vwp->pollfd[i].fd); Wait_HeapDelete(w, wp); Wait_Call(w, wp, WAITER_ACTION, now); vwp_del(vwp, i); } else { i++; } } if (vwp->pollfd[0].revents) vwp_dopipe(vwp); } NEEDLESS_RETURN(NULL); }
static void vbf_fetch_thread(struct worker *wrk, void *priv) { struct busyobj *bo; enum fetch_step stp; double t_hdr, t_body; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); THR_SetBusyobj(bo); stp = F_STP_MKBEREQ; bo->t_start = VTIM_real(); bo->t_send = NAN; bo->t_sent = NAN; bo->t_hdr = NAN; bo->t_body = NAN; while (stp != F_STP_DONE) { CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); bo->step = stp; switch(stp) { #define FETCH_STEP(l, U, arg) \ case F_STP_##U: \ stp = vbf_stp_##l arg; \ break; #include "tbl/steps.h" #undef FETCH_STEP default: WRONG("Illegal fetch_step"); } VSLb(bo->vsl, SLT_Debug, "%s -> %s", vbf_step_name(bo->step), vbf_step_name(stp)); } assert(WRW_IsReleased(wrk)); if (bo->state == BOS_FAILED) assert(bo->fetch_objcore->flags & OC_F_FAILED); if (bo->ims_obj != NULL) (void)HSH_DerefObj(&wrk->stats, &bo->ims_obj); t_hdr = bo->t_hdr - bo->t_sent; t_body = bo->t_body - bo->t_hdr; VSLb(bo->vsl, SLT_BereqEnd, "%.9f %.9f %.9f %.9f %.9f %.9f", bo->t_start, VTIM_real(), bo->t_sent - bo->t_send, t_hdr, t_body, t_hdr + t_body); VBO_DerefBusyObj(wrk, &bo); THR_SetBusyobj(NULL); }
static char * macro_get(const char *b, const char *e) { struct macro *m; int l; char *retval = NULL; l = e - b; if (l == 4 && !memcmp(b, "date", l)) { double t = VTIM_real(); retval = malloc(64); AN(retval); VTIM_format(t, retval); return (retval); } AZ(pthread_mutex_lock(¯o_mtx)); VTAILQ_FOREACH(m, ¯o_list, list) if (!memcmp(b, m->name, l) && m->name[l] == '\0') break; if (m != NULL) retval = strdup(m->val); AZ(pthread_mutex_unlock(¯o_mtx)); return (retval); }
void SES_Delete(struct sess *sp, enum sess_close reason, double now) { struct acct *b; struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); if (reason != SC_NULL) SES_Close(sp, reason); assert(sp->fd < 0); if (isnan(now)) now = VTIM_real(); assert(!isnan(sp->t_open)); b = &sp->acct_ses; VSL(SLT_SessClose, sp->vxid, "%s %.3f %ju %ju %ju %ju %ju %ju", sess_close_2str(sp->reason, 0), now - sp->t_open, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); VSL(SLT_End, sp->vxid, "%s", ""); Lck_Delete(&sp->mtx); MPL_Free(pp->mpl_sess, sp); }
double VRT_r_now(VRT_CTX) { (void)ctx; return (VTIM_real()); }
void SES_Delete(struct sess *sp, enum sess_close reason, double now) { CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); if (reason != SC_NULL) SES_Close(sp, reason); assert(sp->fd < 0); if (isnan(now)) now = VTIM_real(); AZ(isnan(sp->t_open)); if (now < sp->t_open) { VSL(SLT_Debug, sp->vxid, "Clock step (now=%f < t_open=%f)", now, sp->t_open); if (now + cache_param->clock_step < sp->t_open) WRONG("Clock step detected"); now = sp->t_open; /* Do not log negatives */ } if (reason == SC_NULL) reason = (enum sess_close)-sp->fd; VSL(SLT_SessClose, sp->vxid, "%s %.3f", sess_close_2str(reason, 0), now - sp->t_open); VSL(SLT_End, sp->vxid, "%s", ""); SES_Rel(sp); }
double VRT_r_now(const struct sess *sp) { (void)sp; return (VTIM_real()); }
vbp_task(struct worker *wrk, void *priv) { struct vbp_target *vt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(vt, priv, VBP_TARGET_MAGIC); AN(vt->running); AN(vt->req); assert(vt->req_len > 0); vbp_start_poke(vt); vbp_poke(vt); vbp_has_poked(vt); VBP_Update_Backend(vt); Lck_Lock(&vbp_mtx); if (vt->running < 0) { assert(vt->heap_idx == BINHEAP_NOIDX); vbp_delete(vt); } else { vt->running = 0; if (vt->heap_idx != BINHEAP_NOIDX) { vt->due = VTIM_real() + vt->interval; binheap_delete(vbp_heap, vt->heap_idx); binheap_insert(vbp_heap, vt); } } Lck_Unlock(&vbp_mtx); }
void SES_pool_accept_task(struct worker *wrk, void *arg) { struct sesspool *pp; struct sess *sp; const char *lsockname; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(pp, arg, SESSPOOL_MAGIC); /* Turn accepted socket into a session */ AN(wrk->aws->r); sp = ses_new(pp); if (sp == NULL) { VCA_FailSess(wrk); return; } wrk->stats->s_sess++; sp->t_open = VTIM_real(); sp->t_idle = sp->t_open; sp->vxid = VXID_Get(wrk, VSL_CLIENTMARKER); lsockname = VCA_SetupSess(wrk, sp); ses_vsl_socket(sp, lsockname); ses_sess_pool_task(wrk, sp); }
vbp_thread(struct worker *wrk, void *priv) { vtim_real now, nxt; struct vbp_target *vt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AZ(priv); Lck_Lock(&vbp_mtx); while (1) { now = VTIM_real(); vt = binheap_root(vbp_heap); if (vt == NULL) { nxt = 8.192 + now; (void)Lck_CondWait(&vbp_cond, &vbp_mtx, nxt); } else if (vt->due > now) { nxt = vt->due; vt = NULL; (void)Lck_CondWait(&vbp_cond, &vbp_mtx, nxt); } else { binheap_delete(vbp_heap, vt->heap_idx); vt->due = now + vt->interval; if (!vt->running) { vt->running = 1; vt->task.func = vbp_task; vt->task.priv = vt; if (Pool_Task_Any(&vt->task, TASK_QUEUE_REQ)) vt->running = 0; } binheap_insert(vbp_heap, vt); } } NEEDLESS(Lck_Unlock(&vbp_mtx)); NEEDLESS(return NULL); }
double VRT_r_now(const struct vrt_ctx *ctx) { (void)ctx; return (VTIM_real()); }
int SES_Schedule(struct sess *sp) { struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); AZ(sp->wrk); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); AZ(sp->wrk); sp->task.func = ses_pool_task; sp->task.priv = sp; if (Pool_Task(pp->pool, &sp->task, POOL_QUEUE_FRONT)) { VSC_C_main->client_drop_late++; sp->t_idle = VTIM_real(); if (sp->req != NULL && sp->req->vcl != NULL) { /* * A session parked on a busy object can come here * after it wakes up. Loose the VCL reference. */ VCL_Rel(&sp->req->vcl); } SES_Delete(sp, "dropped", sp->t_idle); return (1); } return (0); }
ban_lurker(struct worker *wrk, void *priv) { struct vsl_log vsl; volatile double d; unsigned gen = ban_generation + 1; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AZ(priv); VSL_Setup(&vsl, NULL, 0); while (!ban_shutdown) { d = ban_lurker_work(wrk, &vsl); ban_cleantail(); if (DO_DEBUG(DBG_LURKER)) VSLb(&vsl, SLT_Debug, "lurker: sleep = %lf", d); d += VTIM_real(); Lck_Lock(&ban_mtx); if (gen == ban_generation) { (void)Lck_CondWait(&ban_lurker_cond, &ban_mtx, d); ban_batch = 0; } gen = ban_generation; Lck_Unlock(&ban_mtx); } pthread_exit(0); NEEDLESS_RETURN(NULL); }
int SES_Schedule(struct sess *sp) { struct sesspool *pp; pp = ses_getpool(sp); AZ(sp->wrk); AN(pp->pool); if (Pool_Schedule(pp->pool, sp)) { VSC_C_main->client_drop_late++; sp->t_idle = VTIM_real(); if (sp->req->vcl != NULL) { /* * A session parked on a busy object can come here * after it wakes up. Loose the VCL reference. */ VCL_Rel(&sp->req->vcl); } SES_Delete(sp, "dropped", sp->t_idle); return (1); } return (0); }
void SES_Delete(struct sess *sp, enum sess_close reason, double now) { struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); if (reason != SC_NULL) SES_Close(sp, reason); assert(sp->fd < 0); if (isnan(now)) now = VTIM_real(); AZ(isnan(sp->t_open)); VSL(SLT_SessClose, sp->vxid, "%s %.3f", sess_close_2str(sp->reason, 0), now - sp->t_open); VSL(SLT_End, sp->vxid, "%s", ""); Lck_Delete(&sp->mtx); MPL_Free(pp->mpl_sess, sp); }
static void * vwk_thread(void *priv) { struct vwk *vwk; struct kevent ke[NKEV], *kp; int j, n, dotimer; double deadline; struct sess *sp; CAST_OBJ_NOTNULL(vwk, priv, VWK_MAGIC); THR_SetName("cache-kqueue"); vwk->kq = kqueue(); assert(vwk->kq >= 0); j = 0; EV_SET(&ke[j], 0, EVFILT_TIMER, EV_ADD, 0, 100, NULL); j++; EV_SET(&ke[j], vwk->pipes[0], EVFILT_READ, EV_ADD, 0, 0, vwk->pipes); j++; AZ(kevent(vwk->kq, ke, j, NULL, 0, NULL)); vwk->nki = 0; while (1) { dotimer = 0; n = kevent(vwk->kq, vwk->ki, vwk->nki, ke, NKEV, NULL); assert(n >= 1 && n <= NKEV); vwk->nki = 0; for (kp = ke, j = 0; j < n; j++, kp++) { if (kp->filter == EVFILT_TIMER) { dotimer = 1; continue; } assert(kp->filter == EVFILT_READ); vwk_kev(vwk, kp); } if (!dotimer) continue; /* * Make sure we have no pending changes for the fd's * we are about to close, in case the accept(2) in the * other thread creates new fd's betwen our close and * the kevent(2) at the top of this loop, the kernel * would not know we meant "the old fd of this number". */ vwk_kq_flush(vwk); deadline = VTIM_real() - params->sess_timeout; for (;;) { sp = VTAILQ_FIRST(&vwk->sesshead); if (sp == NULL) break; if (sp->t_open > deadline) break; VTAILQ_REMOVE(&vwk->sesshead, sp, list); // XXX: not yet (void)VTCP_linger(sp->fd, 0); SES_Delete(sp, "timeout"); } } }
void MGT_Run(void) { struct sigaction sac; struct vev *e; int i; mgt_uptime_t0 = VTIM_real(); e = vev_new(); XXXAN(e); e->callback = mgt_uptime; e->timeout = 1.0; e->name = "mgt_uptime"; AZ(vev_add(mgt_evb, e)); e = vev_new(); XXXAN(e); e->sig = SIGTERM; e->callback = mgt_sigint; e->name = "mgt_sigterm"; AZ(vev_add(mgt_evb, e)); e = vev_new(); XXXAN(e); e->sig = SIGINT; e->callback = mgt_sigint; e->name = "mgt_sigint"; AZ(vev_add(mgt_evb, e)); #ifdef HAVE_SETPROCTITLE setproctitle("Varnish-Mgr %s", heritage.name); #endif memset(&sac, 0, sizeof sac); sac.sa_handler = SIG_IGN; sac.sa_flags = SA_RESTART; AZ(sigaction(SIGPIPE, &sac, NULL)); AZ(sigaction(SIGHUP, &sac, NULL)); if (!d_flag && !mgt_has_vcl()) MGT_complain(C_ERR, "No VCL loaded yet"); else if (!d_flag) { mgt_launch_child(NULL); if (child_state != CH_RUNNING) { // XXX correct? or 0? exit_status = 2; return; } } mgt_SHM_Commit(); i = vev_schedule(mgt_evb); if (i != 0) MGT_complain(C_ERR, "vev_schedule() = %d", i); MGT_complain(C_INFO, "manager dies"); }
vmod_timestamp(const struct vrt_ctx *ctx, VCL_STRING label) { CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); if (label == NULL) return; if (*label == '\0') return; if (ctx->bo != NULL && ctx->req == NULL) { /* Called from backend vcl methods */ CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); VSLb_ts_busyobj(ctx->bo, label, VTIM_real()); } else if (ctx->req != NULL) { /* Called from request vcl methdos */ CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC); VSLb_ts_req(ctx->req, label, VTIM_real()); } }
VCL_DURATION vmod_elapsed(VRT_CTX) { //thread check if(ctx->req == NULL || ctx->req->magic != REQ_MAGIC){ //bg-thread if (isnan(ctx->bo->t_first) || ctx->bo->t_first == 0.) return(0.); return(VTIM_real() - ctx->bo->t_first); }else{ //cl-thread if (isnan(ctx->req->t_first) || ctx->req->t_first == 0.) return(0.); return(VTIM_real() - ctx->req->t_first); } }
static void vbp_update_backend(struct vbp_target *vt) { unsigned i; char bits[10]; const char *logmsg; CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); Lck_Lock(&vbp_mtx); if (vt->backend != NULL) { i = 0; #define BITMAP(n, c, t, b) \ bits[i++] = (vt->n & 1) ? c : '-'; #include "tbl/backend_poll.h" #undef BITMAP bits[i] = '\0'; if (vt->good >= vt->threshold) { if (vt->backend->healthy) logmsg = "Still healthy"; else { logmsg = "Back healthy"; vt->backend->health_changed = VTIM_real(); } vt->backend->healthy = 1; } else { if (vt->backend->healthy) { logmsg = "Went sick"; vt->backend->health_changed = VTIM_real(); } else logmsg = "Still sick"; vt->backend->healthy = 0; } VSL(SLT_Backend_health, 0, "%s %s %s %u %u %u %.6f %.6f %s", vt->backend->display_name, logmsg, bits, vt->good, vt->threshold, vt->window, vt->last, vt->avg, vt->resp_buf); if (vt->backend != NULL && vt->backend->vsc != NULL) vt->backend->vsc->happy = vt->happy; } Lck_Unlock(&vbp_mtx); }
mgt_uptime(const struct vev *e, int what) { (void)e; (void)what; AN(VSC_C_mgt); VSC_C_mgt->uptime = static_VSC_C_mgt.uptime = (uint64_t)(VTIM_real() - mgt_uptime_t0); if (heritage.vsm != NULL) VSM_common_ageupdate(heritage.vsm); return (0); }
static void tst_delta() { double m_begin, m_end; double r_begin, r_end; const double ref = 1; int err = 0; r_begin = VTIM_real(); m_begin = VTIM_mono(); VTIM_sleep(ref); r_end = VTIM_real(); m_end = VTIM_mono(); err += tst_delta_check("VTIM_mono", m_begin, m_end, ref); err += tst_delta_check("VTIM_real", r_begin, r_end, ref); if (err) { printf("%d time delta test errrors\n", err); exit(4); } }
static void bench() { double s, e, t; int i; t = 0; s = VTIM_real(); for (i=0; i<100000; i++) t += VTIM_real(); e = VTIM_real(); printf("real: %fs / %d = %fns - tst val %f\n", e - s, i, 1e9 * (e - s) / i, t); t = 0; s = VTIM_real(); for (i=0; i<100000; i++) t += VTIM_mono(); e = VTIM_real(); printf("mono: %fs / %d = %fns - tst val %f\n", e - s, i, 1e9 * (e - s) / i, t); }
void VRT_DisableDirector(VCL_BACKEND d) { struct vcldir *vdir; CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); vdir = d->vdir; CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); vdir->admin_health = VDI_AH_DELETED; vdir->dir->sick |= 0x04; vdir->health_changed = VTIM_real(); }
static void * vwe_thread(void *priv) { struct epoll_event ev[NEEV], *ep; struct sess *sp; char junk; double now, deadline; int dotimer, i, n; struct vwe *vwe; CAST_OBJ_NOTNULL(vwe, priv, VWE_MAGIC); THR_SetName("cache-epoll"); vwe->epfd = epoll_create(1); assert(vwe->epfd >= 0); vwe_modadd(vwe, vwe->pipes[0], vwe->pipes, EPOLL_CTL_ADD); vwe_modadd(vwe, vwe->timer_pipes[0], vwe->timer_pipes, EPOLL_CTL_ADD); while (1) { dotimer = 0; n = epoll_wait(vwe->epfd, ev, NEEV, -1); now = VTIM_real(); for (ep = ev, i = 0; i < n; i++, ep++) { if (ep->data.ptr == vwe->timer_pipes && (ep->events == EPOLLIN || ep->events == EPOLLPRI)) { assert(read(vwe->timer_pipes[0], &junk, 1)); dotimer = 1; } else vwe_eev(vwe, ep, now); } if (!dotimer) continue; /* check for timeouts */ deadline = now - cache_param->timeout_idle; for (;;) { sp = VTAILQ_FIRST(&vwe->sesshead); if (sp == NULL) break; if (sp->t_idle > deadline) break; VTAILQ_REMOVE(&vwe->sesshead, sp, list); // XXX: not yet VTCP_linger(sp->fd, 0); SES_Delete(sp, SC_RX_TIMEOUT, now); } } return (NULL); }
void VRT_SetHealth(VCL_BACKEND d, int health) { struct vcldir *vdir; CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); vdir = d->vdir; CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); if (health) vdir->dir->sick &= ~0x01; else vdir->dir->sick |= 0x01; vdir->health_changed = VTIM_real(); }
static void mgt_panic_record(pid_t r) { char time_str[30]; if (child_panic != NULL) VSB_delete(child_panic); child_panic = VSB_new_auto(); AN(child_panic); VTIM_format(VTIM_real(), time_str); VSB_printf(child_panic, "Last panic at: %s\n", time_str); VSB_quote(child_panic, heritage.panic_str, strnlen(heritage.panic_str, heritage.panic_str_len), VSB_QUOTE_NONL); AZ(VSB_finish(child_panic)); MGT_complain(C_ERR, "Child (%jd) %s", (intmax_t)r, VSB_data(child_panic)); }
void SES_Delete(struct sess *sp, const char *reason, double now) { struct acct *b; struct worker *wrk; struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); pp = sp->sesspool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); wrk = sp->wrk; CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC); if (reason != NULL) SES_Close(sp, reason); if (isnan(now)) now = VTIM_real(); assert(!isnan(sp->t_open)); assert(sp->fd < 0); if (sp->req != NULL) { AZ(sp->req->vcl); SES_ReleaseReq(sp); } if (*sp->addr == '\0') strcpy(sp->addr, "-"); if (*sp->port == '\0') strcpy(sp->addr, "-"); b = &sp->acct_ses; VSL(SLT_StatSess, sp->vsl_id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju", sp->addr, sp->port, now - sp->t_open, b->sess, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); MPL_Free(pp->mpl_sess, sp); }
exp_thread(struct worker *wrk, void *priv) { struct objcore *oc; double t = 0, tnext = 0; struct exp_priv *ep; unsigned flags = 0; CAST_OBJ_NOTNULL(ep, priv, EXP_PRIV_MAGIC); ep->wrk = wrk; VSL_Setup(&ep->vsl, NULL, 0); ep->heap = binheap_new(NULL, object_cmp, object_update); AN(ep->heap); while (1) { Lck_Lock(&ep->mtx); oc = VSTAILQ_FIRST(&ep->inbox); CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); if (oc != NULL) { assert(oc->refcnt >= 1); VSTAILQ_REMOVE(&ep->inbox, oc, objcore, exp_list); VSC_C_main->exp_received++; tnext = 0; flags = oc->exp_flags; if (flags & OC_EF_REMOVE) oc->exp_flags = 0; else oc->exp_flags &= OC_EF_REFD; } else if (tnext > t) { VSL_Flush(&ep->vsl, 0); Pool_Sumstat(wrk); (void)Lck_CondWait(&ep->condvar, &ep->mtx, tnext); } Lck_Unlock(&ep->mtx); t = VTIM_real(); if (oc != NULL) exp_inbox(ep, oc, flags); else tnext = exp_expire(ep, t); } NEEDLESS(return NULL); }