struct storage * VFP_GetStorage(struct busyobj *bo, ssize_t sz) { ssize_t l; struct storage *st; struct object *obj; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); obj = bo->fetch_obj; CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); st = VTAILQ_LAST(&obj->store, storagehead); if (st != NULL && st->len < st->space) return (st); AN(bo->stats); l = fetchfrag; if (l == 0) l = sz; if (l == 0) l = cache_param->fetch_chunksize; st = STV_alloc(bo, l); if (st == NULL) { (void)VFP_Error(bo, "Could not get storage"); } else { AZ(st->len); Lck_Lock(&bo->mtx); VTAILQ_INSERT_TAIL(&obj->store, st, list); Lck_Unlock(&bo->mtx); } return (st); }
static void ban_cleantail(void) { struct ban *b; do { Lck_Lock(&ban_mtx); b = VTAILQ_LAST(&ban_head, banhead_s); if (b != VTAILQ_FIRST(&ban_head) && b->refcount == 0) { if (b->flags & BANS_FLAG_COMPLETED) VSC_C_main->bans_completed--; if (b->flags & BANS_FLAG_OBJ) VSC_C_main->bans_obj--; if (b->flags & BANS_FLAG_REQ) VSC_C_main->bans_req--; VSC_C_main->bans--; VSC_C_main->bans_deleted++; VTAILQ_REMOVE(&ban_head, b, list); VSC_C_main->bans_persisted_fragmentation += ban_len(b->spec); ban_info(BI_DROP, b->spec, ban_len(b->spec)); } else { b = NULL; } Lck_Unlock(&ban_mtx); if (b != NULL) BAN_Free(b); } while (b != NULL); }
struct storage * FetchStorage(struct worker *wrk, ssize_t sz) { ssize_t l; struct storage *st; struct object *obj; obj = wrk->busyobj->fetch_obj; CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); st = VTAILQ_LAST(&obj->store, storagehead); if (st != NULL && st->len < st->space) return (st); l = fetchfrag; if (l == 0) l = sz; if (l == 0) l = cache_param->fetch_chunksize; st = STV_alloc(wrk, l); if (st == NULL) { (void)FetchError(wrk, "Could not get storage"); return (NULL); } AZ(st->len); VTAILQ_INSERT_TAIL(&obj->store, st, list); return (st); }
vfp_nop_end(struct worker *wrk) { struct storage *st; st = VTAILQ_LAST(&wrk->busyobj->fetch_obj->store, storagehead); if (st == NULL) return (0); if (st->len == 0) { VTAILQ_REMOVE(&wrk->busyobj->fetch_obj->store, st, list); STV_free(st); return (0); } if (st->len < st->space) STV_trim(st, st->len); return (0); }
vfp_nop_end(struct sess *sp) { struct storage *st; st = VTAILQ_LAST(&sp->obj->store, storagehead); if (st == NULL) return (0); if (st->len == 0) { VTAILQ_REMOVE(&sp->obj->store, st, list); STV_free(st); return (0); } if (st->len < st->space) STV_trim(st, st->len); return (0); }
void VBO_extend(struct busyobj *bo, ssize_t l) { struct storage *st; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->fetch_obj, OBJECT_MAGIC); if (l == 0) return; assert(l > 0); Lck_Lock(&bo->mtx); st = VTAILQ_LAST(&bo->fetch_obj->body->list, storagehead); CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); st->len += l; bo->fetch_obj->len += l; AZ(pthread_cond_broadcast(&bo->cond)); Lck_Unlock(&bo->mtx); }
struct storage * FetchStorage(const struct sess *sp, ssize_t sz) { ssize_t l; struct storage *st; st = VTAILQ_LAST(&sp->obj->store, storagehead); if (st != NULL && st->len < st->space) return (st); l = fetchfrag; if (l == 0) l = sz; if (l == 0) l = params->fetch_chunksize * 1024LL; st = STV_alloc(sp, l); if (st == NULL) { errno = ENOMEM; return (NULL); } AZ(st->len); VTAILQ_INSERT_TAIL(&sp->obj->store, st, list); return (st); }
static void * mpl_guard(void *priv) { struct mempool *mpl; struct memitem *mi = NULL; double mpl_slp __state_variable__(mpl_slp); double last = 0; CAST_OBJ_NOTNULL(mpl, priv, MEMPOOL_MAGIC); mpl_slp = 0.15; // random while (1) { VTIM_sleep(mpl_slp); mpl_slp = 0.814; // random mpl->t_now = VTIM_real(); if (mi != NULL && (mpl->n_pool > mpl->param->max_pool || mi->size < *mpl->cur_size)) { FREE_OBJ(mi); mi = NULL; } if (mi == NULL && mpl->n_pool < mpl->param->min_pool) mi = mpl_alloc(mpl); if (mpl->n_pool < mpl->param->min_pool && mi != NULL) { /* can do */ } else if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { /* can do */ } else if (!VTAILQ_EMPTY(&mpl->surplus)) { /* can do */ } else if (last + .1 * mpl->param->max_age < mpl->t_now) { /* should do */ } else if (mpl->self_destruct) { /* can do */ } else { continue; /* nothing to do */ } mpl_slp = 0.314; // random if (Lck_Trylock(&mpl->mtx)) continue; if (mpl->self_destruct) { AZ(mpl->live); while (1) { if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); if (mi != NULL) { mpl->vsc->pool = --mpl->n_pool; VTAILQ_REMOVE(&mpl->list, mi, list); } } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) VTAILQ_REMOVE(&mpl->surplus, mi, list); } if (mi == NULL) break; FREE_OBJ(mi); mi = NULL; } VSM_Free(mpl->vsc); Lck_Unlock(&mpl->mtx); Lck_Delete(&mpl->mtx); FREE_OBJ(mpl); break; } if (mpl->n_pool < mpl->param->min_pool && mi != NULL && mi->size >= *mpl->cur_size) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = ++mpl->n_pool; mi->touched = mpl->t_now; VTAILQ_INSERT_HEAD(&mpl->list, mi, list); mi = NULL; mpl_slp = .01; // random } if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = --mpl->n_pool; mpl->vsc->surplus++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); VTAILQ_REMOVE(&mpl->surplus, mi, list); mpl_slp = .01; // random } } if (mi == NULL && mpl->n_pool > mpl->param->min_pool) { mi = VTAILQ_LAST(&mpl->list, memhead_s); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); if (mi->touched + mpl->param->max_age < mpl->t_now) { mpl->vsc->pool = --mpl->n_pool; mpl->vsc->timeout++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } else { mi = NULL; last = mpl->t_now; } } else if (mpl->n_pool <= mpl->param->min_pool) { last = mpl->t_now; } Lck_Unlock(&mpl->mtx); if (mi != NULL) { FREE_OBJ(mi); mi = NULL; } } return (NULL); }
static void* pool_herder(void *priv) { struct pool *pp; struct pool_task *pt; double t_idle; struct worker *wrk; CAST_OBJ_NOTNULL(pp, priv, POOL_MAGIC); while (1) { /* Make more threads if needed and allowed */ if (pp->nthr < cache_param->wthread_min || (pp->dry && pp->nthr < cache_param->wthread_max)) { pool_breed(pp); continue; } assert(pp->nthr >= cache_param->wthread_min); if (pp->nthr > cache_param->wthread_min) { t_idle = VTIM_real() - cache_param->wthread_timeout; Lck_Lock(&pp->mtx); /* XXX: unsafe counters */ VSC_C_main->sess_queued += pp->nqueued; VSC_C_main->sess_dropped += pp->ndropped; pp->nqueued = pp->ndropped = 0; wrk = NULL; pt = VTAILQ_LAST(&pp->idle_queue, taskhead); if (pt != NULL) { AZ(pt->func); CAST_OBJ_NOTNULL(wrk, pt->priv, WORKER_MAGIC); if (wrk->lastused < t_idle || pp->nthr > cache_param->wthread_max) { /* Give it a kiss on the cheek... */ VTAILQ_REMOVE(&pp->idle_queue, &wrk->task, list); wrk->task.func = pool_kiss_of_death; AZ(pthread_cond_signal(&wrk->cond)); } else wrk = NULL; } Lck_Unlock(&pp->mtx); if (wrk != NULL) { pp->nthr--; Lck_Lock(&pool_mtx); VSC_C_main->threads--; VSC_C_main->threads_destroyed++; Lck_Unlock(&pool_mtx); VTIM_sleep(cache_param->wthread_destroy_delay); continue; } } Lck_Lock(&pp->mtx); if (!pp->dry) { (void)Lck_CondWait(&pp->herder_cond, &pp->mtx, VTIM_real() + 5); } else { /* XXX: unsafe counters */ VSC_C_main->threads_limited++; pp->dry = 0; } Lck_Unlock(&pp->mtx); } NEEDLESS_RETURN(NULL); }
void V1F_fetch_body(struct worker *wrk, struct busyobj *bo) { int cls; struct storage *st; ssize_t cl; struct http_conn *htc; struct object *obj; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); htc = &bo->htc; CHECK_OBJ_ORNULL(bo->vbc, VBC_MAGIC); obj = bo->fetch_obj; CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); CHECK_OBJ_NOTNULL(obj->http, HTTP_MAGIC); assert(bo->state == BOS_FETCHING); /* * XXX: The busyobj needs a dstat, but it is not obvious which one * XXX: it should be (own/borrowed). For now borrow the wrk's. */ AZ(bo->stats); bo->stats = &wrk->stats; AN(bo->vfp); AZ(bo->vgz_rx); assert(VTAILQ_EMPTY(&obj->store)); /* XXX: pick up estimate from objdr ? */ cl = 0; cls = bo->should_close; switch (htc->body_status) { case BS_NONE: break; case BS_ZERO: break; case BS_LENGTH: cl = vbf_fetch_number(bo->h_content_length, 10); bo->vfp->begin(bo, cl); if (bo->state == BOS_FETCHING && cl > 0) cls |= vbf_fetch_straight(bo, htc, cl); if (bo->vfp->end(bo)) assert(bo->state == BOS_FAILED); break; case BS_CHUNKED: bo->vfp->begin(bo, cl > 0 ? cl : 0); if (bo->state == BOS_FETCHING) cls |= vbf_fetch_chunked(bo, htc); if (bo->vfp->end(bo)) assert(bo->state == BOS_FAILED); break; case BS_EOF: bo->vfp->begin(bo, cl > 0 ? cl : 0); if (bo->state == BOS_FETCHING) vbf_fetch_eof(bo, htc); cls = 1; if (bo->vfp->end(bo)) assert(bo->state == BOS_FAILED); break; case BS_ERROR: cls |= VFP_Error(bo, "error incompatible Transfer-Encoding"); break; default: INCOMPL(); } bo->t_body = VTIM_mono(); AZ(bo->vgz_rx); /* * Trim or delete the last segment, if any */ st = VTAILQ_LAST(&bo->fetch_obj->store, storagehead); /* XXX: Temporary: Only trim if we are not streaming */ if (st != NULL && !bo->do_stream) { /* XXX: is any of this safe under streaming ? */ if (st->len == 0) { VTAILQ_REMOVE(&bo->fetch_obj->store, st, list); STV_free(st); } else if (st->len < st->space) { STV_trim(st, st->len, 1); } } bo->vfp = NULL; VSLb(bo->vsl, SLT_Fetch_Body, "%u(%s) cls %d", htc->body_status, body_status_2str(htc->body_status), cls); http_Teardown(bo->bereq); http_Teardown(bo->beresp); if (bo->vbc != NULL) { if (cls) VDI_CloseFd(&bo->vbc); else VDI_RecycleFd(&bo->vbc); } AZ(bo->vbc); if (bo->state == BOS_FAILED) { wrk->stats.fetch_failed++; } else { assert(bo->state == BOS_FETCHING); VSLb(bo->vsl, SLT_Length, "%zd", obj->len); { /* Sanity check fetch methods accounting */ ssize_t uu; uu = 0; VTAILQ_FOREACH(st, &obj->store, list) uu += st->len; if (bo->do_stream) /* Streaming might have started freeing stuff */ assert(uu <= obj->len); else assert(uu == obj->len); } } bo->stats = NULL; }
void VFP_Fetch_Body(struct busyobj *bo, ssize_t est) { ssize_t l; enum vfp_status vfps = VFP_ERROR; struct storage *st = NULL; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); AN(bo->vfp_nxt); if (est < 0) est = 0; if (vfp_suck_init(bo) != VFP_OK) { (void)VFP_Error(bo, "Fetch Pipeline failed to initialize"); bo->should_close = 1; return; } do { if (bo->abandon) { /* * A pass object and delivery was terminted * We don't fail the fetch, in order for hit-for-pass * objects to be created. */ AN(bo->fetch_objcore->flags & OC_F_PASS); VSLb(bo->vsl, SLT_FetchError, "Pass delivery abandoned"); vfps = VFP_END; bo->should_close = 1; break; } AZ(bo->failed); if (st == NULL) { st = VFP_GetStorage(bo, est); est = 0; } if (st == NULL) { bo->should_close = 1; (void)VFP_Error(bo, "Out of storage"); break; } CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); assert(st == VTAILQ_LAST(&bo->fetch_obj->store, storagehead)); l = st->space - st->len; AZ(bo->failed); vfps = VFP_Suck(bo, st->ptr + st->len, &l); if (l > 0 && vfps != VFP_ERROR) { AZ(VTAILQ_EMPTY(&bo->fetch_obj->store)); VBO_extend(bo, l); } if (st->len == st->space) st = NULL; } while (vfps == VFP_OK); if (vfps == VFP_ERROR) { AN(bo->failed); (void)VFP_Error(bo, "Fetch Pipeline failed to process"); bo->should_close = 1; } vfp_suck_fini(bo); /* * Trim or delete the last segment, if any */ st = VTAILQ_LAST(&bo->fetch_obj->store, storagehead); /* XXX: Temporary: Only trim if we are not streaming */ if (st != NULL && !bo->do_stream) { /* None of this this is safe under streaming */ if (st->len == 0) { VTAILQ_REMOVE(&bo->fetch_obj->store, st, list); STV_free(st); } else if (st->len < st->space) { STV_trim(st, st->len, 1); } } }