vbp_thread(struct worker *wrk, void *priv) { vtim_real now, nxt; struct vbp_target *vt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AZ(priv); Lck_Lock(&vbp_mtx); while (1) { now = VTIM_real(); vt = binheap_root(vbp_heap); if (vt == NULL) { nxt = 8.192 + now; (void)Lck_CondWait(&vbp_cond, &vbp_mtx, nxt); } else if (vt->due > now) { nxt = vt->due; vt = NULL; (void)Lck_CondWait(&vbp_cond, &vbp_mtx, nxt); } else { binheap_delete(vbp_heap, vt->heap_idx); vt->due = now + vt->interval; if (!vt->running) { vt->running = 1; vt->task.func = vbp_task; vt->task.priv = vt; if (Pool_Task_Any(&vt->task, TASK_QUEUE_REQ)) vt->running = 0; } binheap_insert(vbp_heap, vt); } } NEEDLESS(Lck_Unlock(&vbp_mtx)); NEEDLESS(return NULL); }
ban_lurker(struct worker *wrk, void *priv) { struct vsl_log vsl; volatile double d; unsigned gen = ban_generation + 1; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AZ(priv); VSL_Setup(&vsl, NULL, 0); while (!ban_shutdown) { d = ban_lurker_work(wrk, &vsl); ban_cleantail(); if (DO_DEBUG(DBG_LURKER)) VSLb(&vsl, SLT_Debug, "lurker: sleep = %lf", d); d += VTIM_real(); Lck_Lock(&ban_mtx); if (gen == ban_generation) { (void)Lck_CondWait(&ban_lurker_cond, &ban_mtx, d); ban_batch = 0; } gen = ban_generation; Lck_Unlock(&ban_mtx); } pthread_exit(0); NEEDLESS_RETURN(NULL); }
void VBO_waitstate(struct busyobj *bo, enum busyobj_state_e want) { Lck_Lock(&bo->mtx); while (1) { if (bo->state >= want) break; (void)Lck_CondWait(&bo->cond, &bo->mtx, 0); } Lck_Unlock(&bo->mtx); }
ssize_t VBO_waitlen(struct busyobj *bo, ssize_t l) { Lck_Lock(&bo->mtx); assert(l <= bo->fetch_obj->len || bo->state == BOS_FAILED); while (1) { if (bo->fetch_obj->len > l || bo->state >= BOS_FINISHED) break; (void)Lck_CondWait(&bo->cond, &bo->mtx, 0); } l = bo->fetch_obj->len; Lck_Unlock(&bo->mtx); return (l); }
ssize_t VBO_waitlen(struct busyobj *bo, ssize_t l) { Lck_Lock(&bo->mtx); if (bo->state <= BOS_FINISHED) assert(l <= bo->fetch_obj->len); while (1) { if (bo->fetch_obj->len > l || bo->state >= BOS_FINISHED) { l = bo->fetch_obj->len; break; } (void)Lck_CondWait(&bo->cond, &bo->mtx, NULL); } Lck_Unlock(&bo->mtx); return (l); }
exp_thread(struct worker *wrk, void *priv) { struct objcore *oc; double t = 0, tnext = 0; struct exp_priv *ep; unsigned flags = 0; CAST_OBJ_NOTNULL(ep, priv, EXP_PRIV_MAGIC); ep->wrk = wrk; VSL_Setup(&ep->vsl, NULL, 0); ep->heap = binheap_new(NULL, object_cmp, object_update); AN(ep->heap); while (1) { Lck_Lock(&ep->mtx); oc = VSTAILQ_FIRST(&ep->inbox); CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); if (oc != NULL) { assert(oc->refcnt >= 1); VSTAILQ_REMOVE(&ep->inbox, oc, objcore, exp_list); VSC_C_main->exp_received++; tnext = 0; flags = oc->exp_flags; if (flags & OC_EF_REMOVE) oc->exp_flags = 0; else oc->exp_flags &= OC_EF_REFD; } else if (tnext > t) { VSL_Flush(&ep->vsl, 0); Pool_Sumstat(wrk); (void)Lck_CondWait(&ep->condvar, &ep->mtx, tnext); } Lck_Unlock(&ep->mtx); t = VTIM_real(); if (oc != NULL) exp_inbox(ep, oc, flags); else tnext = exp_expire(ep, t); } NEEDLESS(return NULL); }
struct busyobj * VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, int pass) { struct busyobj *bo; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); bo = VBO_GetBusyObj(wrk, req); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); bo->refcount = 2; oc->busyobj = bo; CHECK_OBJ_NOTNULL(bo->vcl, VCL_CONF_MAGIC); bo->do_pass = pass; bo->vary = req->vary_b; req->vary_b = NULL; AZ(bo->fetch_objcore); bo->fetch_objcore = oc; AZ(bo->req); bo->req = req; bo->fetch_task.priv = bo; bo->fetch_task.func = vbf_fetch_thread; if (Pool_Task(wrk->pool, &bo->fetch_task, POOL_QUEUE_FRONT)) vbf_fetch_thread(wrk, bo); Lck_Lock(&bo->mtx); while (1) { if (bo->req == NULL) break; (void)Lck_CondWait(&bo->cond, &bo->mtx, NULL); } Lck_Unlock(&bo->mtx); return (bo); }
static void* pool_herder(void *priv) { struct pool *pp; struct pool_task *pt; double t_idle; struct worker *wrk; CAST_OBJ_NOTNULL(pp, priv, POOL_MAGIC); while (1) { /* Make more threads if needed and allowed */ if (pp->nthr < cache_param->wthread_min || (pp->dry && pp->nthr < cache_param->wthread_max)) { pool_breed(pp); continue; } assert(pp->nthr >= cache_param->wthread_min); if (pp->nthr > cache_param->wthread_min) { t_idle = VTIM_real() - cache_param->wthread_timeout; Lck_Lock(&pp->mtx); /* XXX: unsafe counters */ VSC_C_main->sess_queued += pp->nqueued; VSC_C_main->sess_dropped += pp->ndropped; pp->nqueued = pp->ndropped = 0; wrk = NULL; pt = VTAILQ_LAST(&pp->idle_queue, taskhead); if (pt != NULL) { AZ(pt->func); CAST_OBJ_NOTNULL(wrk, pt->priv, WORKER_MAGIC); if (wrk->lastused < t_idle || pp->nthr > cache_param->wthread_max) { /* Give it a kiss on the cheek... */ VTAILQ_REMOVE(&pp->idle_queue, &wrk->task, list); wrk->task.func = pool_kiss_of_death; AZ(pthread_cond_signal(&wrk->cond)); } else wrk = NULL; } Lck_Unlock(&pp->mtx); if (wrk != NULL) { pp->nthr--; Lck_Lock(&pool_mtx); VSC_C_main->threads--; VSC_C_main->threads_destroyed++; Lck_Unlock(&pool_mtx); VTIM_sleep(cache_param->wthread_destroy_delay); continue; } } Lck_Lock(&pp->mtx); if (!pp->dry) { (void)Lck_CondWait(&pp->herder_cond, &pp->mtx, VTIM_real() + 5); } else { /* XXX: unsafe counters */ VSC_C_main->threads_limited++; pp->dry = 0; } Lck_Unlock(&pp->mtx); } NEEDLESS_RETURN(NULL); }
void Pool_Work_Thread(struct pool *pp, struct worker *wrk) { struct pool_task *tp; struct pool_task tpx, tps; int i; CHECK_OBJ_NOTNULL(pp, POOL_MAGIC); wrk->pool = pp; while (1) { Lck_Lock(&pp->mtx); CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); WS_Reset(wrk->aws, NULL); AZ(wrk->vsl); tp = VTAILQ_FIRST(&pp->front_queue); if (tp != NULL) { pp->lqueue--; VTAILQ_REMOVE(&pp->front_queue, tp, list); } else { tp = VTAILQ_FIRST(&pp->back_queue); if (tp != NULL) VTAILQ_REMOVE(&pp->back_queue, tp, list); } if ((tp == NULL && wrk->stats->summs > 0) || (wrk->stats->summs >= cache_param->wthread_stats_rate)) pool_addstat(pp->a_stat, wrk->stats); if (tp != NULL) { wrk->stats->summs++; } else if (pp->b_stat != NULL && pp->a_stat->summs) { /* Nothing to do, push pool stats into global pool */ tps.func = pool_stat_summ; tps.priv = pp->a_stat; pp->a_stat = pp->b_stat; pp->b_stat = NULL; tp = &tps; } else { /* Nothing to do: To sleep, perchance to dream ... */ if (isnan(wrk->lastused)) wrk->lastused = VTIM_real(); wrk->task.func = NULL; wrk->task.priv = wrk; VTAILQ_INSERT_HEAD(&pp->idle_queue, &wrk->task, list); do { i = Lck_CondWait(&wrk->cond, &pp->mtx, wrk->vcl == NULL ? 0 : wrk->lastused+60.); if (i == ETIMEDOUT) VCL_Rel(&wrk->vcl); } while (wrk->task.func == NULL); tpx = wrk->task; tp = &tpx; wrk->stats->summs++; } Lck_Unlock(&pp->mtx); if (tp->func == pool_kiss_of_death) break; do { memset(&wrk->task, 0, sizeof wrk->task); assert(wrk->pool == pp); tp->func(wrk, tp->priv); tpx = wrk->task; tp = &tpx; } while (tp->func != NULL); /* cleanup for next task */ wrk->seen_methods = 0; } wrk->pool = NULL; }