void MPL_Free(struct mempool *mpl, void *item) { struct memitem *mi; CHECK_OBJ_NOTNULL(mpl, MEMPOOL_MAGIC); AN(item); mi = (void*)((uintptr_t)item - sizeof(*mi)); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); memset(item, 0, mi->size); Lck_Lock(&mpl->mtx); mpl->vsc->frees++; mpl->vsc->live = --mpl->live; if (mi->size < *mpl->cur_size) { mpl->vsc->toosmall++; VTAILQ_INSERT_HEAD(&mpl->surplus, mi, list); } else { mpl->vsc->pool = ++mpl->n_pool; mi->touched = mpl->t_now; VTAILQ_INSERT_HEAD(&mpl->list, mi, list); } Lck_Unlock(&mpl->mtx); }
void SES_Delete(struct sess *sp) { struct acct *b = &sp->acct_ses; struct sessmem *sm; static char noaddr[] = "-"; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); sm = sp->mem; CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC); AZ(sp->obj); AZ(sp->vcl); assert(!isnan(b->first)); assert(!isnan(sp->t_end)); if (sp->addr == NULL) sp->addr = noaddr; if (sp->port == NULL) sp->port = noaddr; VSL(SLT_StatSess, sp->id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju", sp->addr, sp->port, sp->t_end - b->first, b->sess, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); if (sm->workspace != params->sess_workspace) { free(sm); sm = NULL; } else { /* Clean and prepare for reuse */ ses_setup(sm); Lck_Lock(&ses_mem_mtx); VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list); Lck_Unlock(&ses_mem_mtx); } SES_ClearReqBodyCache(sp); /* Update statistics */ Lck_Lock(&stat_mtx); if (sm == NULL) VSC_C_main->n_sess_mem--; n_sess_rel++; VSC_C_main->n_sess = n_sess_grab - n_sess_rel; Lck_Unlock(&stat_mtx); /* Try to precreate some ses-mem so the acceptor will not have to */ if (VSC_C_main->n_sess_mem < VSC_C_main->n_sess + 10) { sm = ses_sm_alloc(); if (sm != NULL) { ses_setup(sm); Lck_Lock(&ses_mem_mtx); VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list); Lck_Unlock(&ses_mem_mtx); } } }
void VDI_RecycleFd(struct vbc **vbp) { struct backend *bp; struct vbc *vc; AN(vbp); vc = *vbp; *vbp = NULL; CHECK_OBJ_NOTNULL(vc, VBC_MAGIC); CHECK_OBJ_NOTNULL(vc->backend, BACKEND_MAGIC); assert(vc->fd >= 0); bp = vc->backend; VSLb(vc->vsl, SLT_BackendReuse, "%s", bp->display_name); /* XXX: revisit this hack */ VSL_Flush(vc->vsl, 0); vc->vsl = NULL; Lck_Lock(&bp->mtx); VSC_C_main->backend_recycle++; VTAILQ_INSERT_HEAD(&bp->connlist, vc, list); VBE_DropRefLocked(bp); }
void VDI_RecycleFd(struct worker *wrk, struct vbc **vbp) { struct backend *bp; struct vbc *vc; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AN(vbp); vc = *vbp; *vbp = NULL; CHECK_OBJ_NOTNULL(vc, VBC_MAGIC); CHECK_OBJ_NOTNULL(vc->backend, BACKEND_MAGIC); assert(vc->fd >= 0); bp = vc->backend; WSL(wrk, SLT_BackendReuse, vc->vsl_id, "%s", bp->display_name); /* * Flush the shmlog, so that another session reusing this backend * will log chronologically later than our use of it. */ WSL_Flush(wrk, 0); Lck_Lock(&bp->mtx); VSC_C_main->backend_recycle++; VTAILQ_INSERT_HEAD(&bp->connlist, vc, list); VBE_DropRefLocked(bp); }
void VBE_RecycleFd(struct sess *sp) { struct backend *bp; CHECK_OBJ_NOTNULL(sp->vbe, VBE_CONN_MAGIC); CHECK_OBJ_NOTNULL(sp->vbe->backend, BACKEND_MAGIC); assert(sp->vbe->fd >= 0); bp = sp->vbe->backend; WSL(sp->wrk, SLT_BackendReuse, sp->vbe->fd, "%s", bp->vcl_name); Lck_Lock(&bp->mtx); VSL_stats->backend_recycle++; VTAILQ_INSERT_HEAD(&bp->connlist, sp->vbe, list); sp->vbe = NULL; VBE_DropRefLocked(bp); }
void VBE_ReleaseConn(struct vbe_conn *vc) { CHECK_OBJ_NOTNULL(vc, VBE_CONN_MAGIC); assert(vc->backend == NULL); assert(vc->fd < 0); if (params->cache_vbe_conns) { Lck_Lock(&VBE_mtx); VTAILQ_INSERT_HEAD(&vbe_conns, vc, list); VSL_stats->backend_unused++; Lck_Unlock(&VBE_mtx); } else { VSL_stats->n_vbe_conn--; free(vc); } }
void * MPL_Get(struct mempool *mpl, unsigned *size) { struct memitem *mi; CHECK_OBJ_NOTNULL(mpl, MEMPOOL_MAGIC); Lck_Lock(&mpl->mtx); mpl->vsc->allocs++; mpl->vsc->live = ++mpl->live; do { mi = VTAILQ_FIRST(&mpl->list); if (mi == NULL) { mpl->vsc->randry++; break; } mpl->vsc->pool = --mpl->n_pool; CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); VTAILQ_REMOVE(&mpl->list, mi, list); if (mi->size < *mpl->cur_size) { mpl->vsc->toosmall++; VTAILQ_INSERT_HEAD(&mpl->surplus, mi, list); mi = NULL; } else { mpl->vsc->recycle++; } } while (mi == NULL); Lck_Unlock(&mpl->mtx); if (mi == NULL) mi = mpl_alloc(mpl); if (size != NULL) *size = mi->size; CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); /* Throw away sizeof info for FlexeLint: */ return ((void*)(uintptr_t)(mi+1)); }
void VDP_push(struct req *req, vdp_bytes *func, void *priv, int bottom) { struct vdp_entry *vdp; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); AN(func); vdp = WS_Alloc(req->ws, sizeof *vdp); AN(vdp); INIT_OBJ(vdp, VDP_ENTRY_MAGIC); vdp->func = func; vdp->priv = priv; if (bottom) VTAILQ_INSERT_TAIL(&req->vdp, vdp, list); else VTAILQ_INSERT_HEAD(&req->vdp, vdp, list); req->vdp_nxt = VTAILQ_FIRST(&req->vdp); AZ(vdp->func(req, VDP_INIT, &vdp->priv, NULL, 0)); }
static void vbe_RecycleFd(struct vbc **vbp, const struct acct_bereq *acct_bereq) { struct backend *bp; struct vbc *vc; AN(vbp); vc = *vbp; *vbp = NULL; CHECK_OBJ_NOTNULL(vc, VBC_MAGIC); CHECK_OBJ_NOTNULL(vc->backend, BACKEND_MAGIC); assert(vc->fd >= 0); bp = vc->backend; VSLb(vc->vsl, SLT_BackendReuse, "%d %s", vc->fd, bp->display_name); vc->vsl = NULL; Lck_Lock(&bp->mtx); VSC_C_main->backend_recycle++; VTAILQ_INSERT_HEAD(&bp->connlist, vc, list); VBE_DropRefLocked(bp, acct_bereq); }
static void * mpl_guard(void *priv) { struct mempool *mpl; struct memitem *mi = NULL; double mpl_slp __state_variable__(mpl_slp); double last = 0; CAST_OBJ_NOTNULL(mpl, priv, MEMPOOL_MAGIC); mpl_slp = 0.15; // random while (1) { VTIM_sleep(mpl_slp); mpl_slp = 0.814; // random mpl->t_now = VTIM_real(); if (mi != NULL && (mpl->n_pool > mpl->param->max_pool || mi->size < *mpl->cur_size)) { FREE_OBJ(mi); mi = NULL; } if (mi == NULL && mpl->n_pool < mpl->param->min_pool) mi = mpl_alloc(mpl); if (mpl->n_pool < mpl->param->min_pool && mi != NULL) { /* can do */ } else if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { /* can do */ } else if (!VTAILQ_EMPTY(&mpl->surplus)) { /* can do */ } else if (last + .1 * mpl->param->max_age < mpl->t_now) { /* should do */ } else if (mpl->self_destruct) { /* can do */ } else { continue; /* nothing to do */ } mpl_slp = 0.314; // random if (Lck_Trylock(&mpl->mtx)) continue; if (mpl->self_destruct) { AZ(mpl->live); while (1) { if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); if (mi != NULL) { mpl->vsc->pool = --mpl->n_pool; VTAILQ_REMOVE(&mpl->list, mi, list); } } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) VTAILQ_REMOVE(&mpl->surplus, mi, list); } if (mi == NULL) break; FREE_OBJ(mi); mi = NULL; } VSM_Free(mpl->vsc); Lck_Unlock(&mpl->mtx); Lck_Delete(&mpl->mtx); FREE_OBJ(mpl); break; } if (mpl->n_pool < mpl->param->min_pool && mi != NULL && mi->size >= *mpl->cur_size) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = ++mpl->n_pool; mi->touched = mpl->t_now; VTAILQ_INSERT_HEAD(&mpl->list, mi, list); mi = NULL; mpl_slp = .01; // random } if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = --mpl->n_pool; mpl->vsc->surplus++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); VTAILQ_REMOVE(&mpl->surplus, mi, list); mpl_slp = .01; // random } } if (mi == NULL && mpl->n_pool > mpl->param->min_pool) { mi = VTAILQ_LAST(&mpl->list, memhead_s); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); if (mi->touched + mpl->param->max_age < mpl->t_now) { mpl->vsc->pool = --mpl->n_pool; mpl->vsc->timeout++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } else { mi = NULL; last = mpl->t_now; } } else if (mpl->n_pool <= mpl->param->min_pool) { last = mpl->t_now; } Lck_Unlock(&mpl->mtx); if (mi != NULL) { FREE_OBJ(mi); mi = NULL; } } return (NULL); }
void SES_Delete(struct sess *sp, const char *reason, double now) { struct acct *b; struct sessmem *sm; struct worker *wrk; struct sesspool *pp; pp = ses_getpool(sp); sm = sp->mem; CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC); wrk = sp->wrk; CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC); if (reason != NULL) SES_Close(sp, reason); if (isnan(now)) now = VTIM_real(); assert(!isnan(sp->t_open)); assert(sp->fd < 0); if (sp->req != NULL) { AZ(sp->req->vcl); SES_ReleaseReq(sp); } if (*sp->addr == '\0') strcpy(sp->addr, "-"); if (*sp->port == '\0') strcpy(sp->addr, "-"); b = &sp->acct_ses; VSL(SLT_StatSess, sp->vsl_id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju", sp->addr, sp->port, now - sp->t_open, b->sess, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); if (sm->workspace != cache_param->sess_workspace || sm->nhttp != (uint16_t)cache_param->http_max_hdr || pp->nsess > cache_param->max_sess) { free(sm); Lck_Lock(&pp->mtx); if (wrk != NULL) wrk->stats.sessmem_free++; else pp->dly_free_cnt++; pp->nsess--; Lck_Unlock(&pp->mtx); } else { /* Clean and prepare for reuse */ ses_setup(sm); Lck_Lock(&pp->mtx); if (wrk != NULL) { wrk->stats.sessmem_free += pp->dly_free_cnt; pp->dly_free_cnt = 0; } VTAILQ_INSERT_HEAD(&pp->freelist, sm, list); Lck_Unlock(&pp->mtx); } }
void Pool_Work_Thread(struct pool *pp, struct worker *wrk) { struct pool_task *tp; struct pool_task tpx, tps; int i; CHECK_OBJ_NOTNULL(pp, POOL_MAGIC); wrk->pool = pp; while (1) { Lck_Lock(&pp->mtx); CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); WS_Reset(wrk->aws, NULL); AZ(wrk->vsl); tp = VTAILQ_FIRST(&pp->front_queue); if (tp != NULL) { pp->lqueue--; VTAILQ_REMOVE(&pp->front_queue, tp, list); } else { tp = VTAILQ_FIRST(&pp->back_queue); if (tp != NULL) VTAILQ_REMOVE(&pp->back_queue, tp, list); } if ((tp == NULL && wrk->stats->summs > 0) || (wrk->stats->summs >= cache_param->wthread_stats_rate)) pool_addstat(pp->a_stat, wrk->stats); if (tp != NULL) { wrk->stats->summs++; } else if (pp->b_stat != NULL && pp->a_stat->summs) { /* Nothing to do, push pool stats into global pool */ tps.func = pool_stat_summ; tps.priv = pp->a_stat; pp->a_stat = pp->b_stat; pp->b_stat = NULL; tp = &tps; } else { /* Nothing to do: To sleep, perchance to dream ... */ if (isnan(wrk->lastused)) wrk->lastused = VTIM_real(); wrk->task.func = NULL; wrk->task.priv = wrk; VTAILQ_INSERT_HEAD(&pp->idle_queue, &wrk->task, list); do { i = Lck_CondWait(&wrk->cond, &pp->mtx, wrk->vcl == NULL ? 0 : wrk->lastused+60.); if (i == ETIMEDOUT) VCL_Rel(&wrk->vcl); } while (wrk->task.func == NULL); tpx = wrk->task; tp = &tpx; wrk->stats->summs++; } Lck_Unlock(&pp->mtx); if (tp->func == pool_kiss_of_death) break; do { memset(&wrk->task, 0, sizeof wrk->task); assert(wrk->pool == pp); tp->func(wrk, tp->priv); tpx = wrk->task; tp = &tpx; } while (tp->func != NULL); /* cleanup for next task */ wrk->seen_methods = 0; } wrk->pool = NULL; }
const char * BAN_Commit(struct ban_proto *bp) { struct ban *b, *bi; ssize_t ln; double t0; CHECK_OBJ_NOTNULL(bp, BAN_PROTO_MAGIC); AN(bp->vsb); if (ban_shutdown) return (ban_error(bp, "Shutting down")); AZ(VSB_finish(bp->vsb)); ln = VSB_len(bp->vsb); assert(ln >= 0); ALLOC_OBJ(b, BAN_MAGIC); if (b == NULL) return (ban_error(bp, ban_build_err_no_mem)); VTAILQ_INIT(&b->objcore); b->spec = malloc(ln + BANS_HEAD_LEN); if (b->spec == NULL) { free(b); return (ban_error(bp, ban_build_err_no_mem)); } b->flags = bp->flags; memset(b->spec, 0, BANS_HEAD_LEN); t0 = VTIM_real(); memcpy(b->spec + BANS_TIMESTAMP, &t0, sizeof t0); b->spec[BANS_FLAGS] = b->flags & 0xff; memcpy(b->spec + BANS_HEAD_LEN, VSB_data(bp->vsb), ln); ln += BANS_HEAD_LEN; vbe32enc(b->spec + BANS_LENGTH, ln); Lck_Lock(&ban_mtx); if (ban_shutdown) { /* We could have raced a shutdown */ Lck_Unlock(&ban_mtx); BAN_Free(b); return (ban_error(bp, "Shutting down")); } bi = VTAILQ_FIRST(&ban_head); VTAILQ_INSERT_HEAD(&ban_head, b, list); ban_start = b; VSC_C_main->bans++; VSC_C_main->bans_added++; VSC_C_main->bans_persisted_bytes += ln; if (b->flags & BANS_FLAG_OBJ) VSC_C_main->bans_obj++; if (b->flags & BANS_FLAG_REQ) VSC_C_main->bans_req++; if (bi != NULL) ban_info_new(b->spec, ln); /* Notify stevedores */ if (cache_param->ban_dups) { /* Hunt down duplicates, and mark them as completed */ for (bi = VTAILQ_NEXT(b, list); bi != NULL; bi = VTAILQ_NEXT(bi, list)) { if (!(bi->flags & BANS_FLAG_COMPLETED) && ban_equal(b->spec, bi->spec)) { ban_mark_completed(bi); VSC_C_main->bans_dups++; } } } if (!(b->flags & BANS_FLAG_REQ)) ban_kick_lurker(); Lck_Unlock(&ban_mtx); BAN_Abandon(bp); return (NULL); }
void SES_Delete(struct sess *sp, const char *reason) { struct acct *b; struct sessmem *sm; static char noaddr[] = "-"; struct worker *wrk; struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); sm = sp->mem; CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC); pp = sm->pool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); wrk = sp->wrk; CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC); if (reason != NULL) SES_Close(sp, reason); assert(sp->fd < 0); AZ(sp->obj); AZ(sp->vcl); if (sp->addr == NULL) sp->addr = noaddr; if (sp->port == NULL) sp->port = noaddr; b = &sp->acct_ses; assert(!isnan(b->first)); assert(!isnan(sp->t_end)); VSL(SLT_StatSess, sp->vsl_id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju", sp->addr, sp->port, sp->t_end - b->first, b->sess, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); if (sm->workspace != cache_param->sess_workspace || sm->nhttp != (uint16_t)cache_param->http_max_hdr || pp->nsess > cache_param->max_sess) { free(sm); Lck_Lock(&pp->mtx); if (wrk != NULL) wrk->stats.sessmem_free++; else pp->dly_free_cnt++; pp->nsess--; Lck_Unlock(&pp->mtx); } else { /* Clean and prepare for reuse */ ses_setup(sm); Lck_Lock(&pp->mtx); if (wrk != NULL) { wrk->stats.sessmem_free += pp->dly_free_cnt; pp->dly_free_cnt = 0; } VTAILQ_INSERT_HEAD(&pp->freelist, sm, list); Lck_Unlock(&pp->mtx); } }