Example #1
0
void
VBO_DerefBusyObj(struct worker *wrk, struct busyobj **pbo)
{
	struct busyobj *bo;
	struct objcore *oc = NULL;
	unsigned r;

	CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC);
	AN(pbo);
	bo = *pbo;
	*pbo = NULL;
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_ORNULL(bo->fetch_objcore, OBJCORE_MAGIC);
	CHECK_OBJ_ORNULL(bo->fetch_obj, OBJECT_MAGIC);
	if (bo->fetch_objcore != NULL) {
		oc = bo->fetch_objcore;
		CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
		CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
		Lck_Lock(&oc->objhead->mtx);
		assert(bo->refcount > 0);
		r = --bo->refcount;
		Lck_Unlock(&oc->objhead->mtx);
	} else {
		Lck_Lock(&bo->mtx);
		assert(bo->refcount > 0);
		r = --bo->refcount;
		Lck_Unlock(&bo->mtx);
	}

	if (r)
		return;

	VSLb(bo->vsl, SLT_BereqAcct, "%ju %ju %ju %ju %ju %ju",
	    (uintmax_t)bo->acct.bereq_hdrbytes,
	    (uintmax_t)bo->acct.bereq_bodybytes,
	    (uintmax_t)(bo->acct.bereq_hdrbytes + bo->acct.bereq_bodybytes),
	    (uintmax_t)bo->acct.beresp_hdrbytes,
	    (uintmax_t)bo->acct.beresp_bodybytes,
	    (uintmax_t)(bo->acct.beresp_hdrbytes + bo->acct.beresp_bodybytes));

	VSL_End(bo->vsl);

	if (bo->fetch_objcore != NULL) {
		AN(wrk);
		(void)HSH_DerefObjCore(&wrk->stats, &bo->fetch_objcore);
	}

	VCL_Rel(&bo->vcl);

	if (bo->vary != NULL)
		free(bo->vary);

	memset(&bo->refcount, 0,
	    sizeof *bo - offsetof(struct busyobj, refcount));

	if (cache_param->bo_cache && wrk != NULL && wrk->nbo == NULL)
		wrk->nbo = bo;
	else
		VBO_Free(&bo);
}
Example #2
0
void
SES_ReleaseReq(struct req *req)
{
	struct sess *sp;
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	/* Make sure the request counters have all been zeroed */
#define ACCT(foo) \
	AZ(req->acct.foo);
#include "tbl/acct_fields_req.h"
#undef ACCT

	AZ(req->vcl);
	if (req->vsl->wid)
		VSL_End(req->vsl);
	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	AN(pp->pool);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	MPL_AssertSane(req);
	VSL_Flush(req->vsl, 0);
	req->sp = NULL;
	MPL_Free(pp->mpl_req, req);
}
Example #3
0
void
Req_Cleanup(struct sess *sp, struct worker *wrk, struct req *req)
{

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->topreq, REQ_MAGIC);
	assert(sp == req->sp);
	AZ(req->vcl0);

	req->director_hint = NULL;
	req->restarts = 0;

	AZ(req->privs->magic);

	if (req->vcl != NULL)
		VCL_Recache(wrk, &req->vcl);

	/* Charge and log byte counters */
	req_AcctLogCharge(wrk->stats, req);
	if (req->vsl->wid)
		VSL_End(req->vsl);

	if (!isnan(req->t_prev) && req->t_prev > 0. && req->t_prev > sp->t_idle)
		sp->t_idle = req->t_prev;
	else
		sp->t_idle = W_TIM_real(wrk);

	req->t_first = NAN;
	req->t_prev = NAN;
	req->t_req = NAN;
	req->req_body_status = REQ_BODY_INIT;

	req->hash_always_miss = 0;
	req->hash_ignore_busy = 0;
	req->esi_level = 0;
	req->is_hit = 0;

	if (WS_Overflowed(req->ws))
		wrk->stats->ws_client_overflow++;

	WS_Reset(req->ws, 0);
}
static void
ved_include(struct req *preq, const char *src, const char *host,
    struct ecx *ecx)
{
	struct worker *wrk;
	struct req *req;
	enum req_fsm_nxt s;
	struct transport xp;

	CHECK_OBJ_NOTNULL(preq, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC);
	wrk = preq->wrk;

	if (preq->esi_level >= cache_param->max_esi_depth)
		return;

	req = Req_New(wrk, preq->sp);
	req->req_body_status = REQ_BODY_NONE;
	AZ(req->vsl->wid);
	req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER);
	VSLb(req->vsl, SLT_Begin, "req %u esi", VXID(preq->vsl->wid));
	VSLb(preq->vsl, SLT_Link, "req %u esi", VXID(req->vsl->wid));
	req->esi_level = preq->esi_level + 1;

	if (preq->esi_level == 0)
		assert(preq->top == preq);
	else
		CHECK_OBJ_NOTNULL(preq->top, REQ_MAGIC);

	req->top = preq->top;

	HTTP_Copy(req->http0, preq->http0);

	req->http0->ws = req->ws;
	req->http0->vsl = req->vsl;
	req->http0->logtag = SLT_ReqMethod;
	req->http0->conds = 0;

	http_SetH(req->http0, HTTP_HDR_URL, src);
	if (host != NULL && *host != '\0')  {
		http_Unset(req->http0, H_Host);
		http_SetHeader(req->http0, host);
	}

	http_ForceField(req->http0, HTTP_HDR_METHOD, "GET");
	http_ForceField(req->http0, HTTP_HDR_PROTO, "HTTP/1.1");

	/* Don't allow conditionalss, we can't use a 304 */
	http_Unset(req->http0, H_If_Modified_Since);
	http_Unset(req->http0, H_If_None_Match);

	/* Don't allow Range */
	http_Unset(req->http0, H_Range);

	/* Set Accept-Encoding according to what we want */
	http_Unset(req->http0, H_Accept_Encoding);
	if (ecx->isgzip)
		http_ForceHeader(req->http0, H_Accept_Encoding, "gzip");

	/* Client content already taken care of */
	http_Unset(req->http0, H_Content_Length);

	/* Reset request to status before we started messing with it */
	HTTP_Copy(req->http, req->http0);

	req->vcl = preq->vcl;
	preq->vcl = NULL;
	req->wrk = preq->wrk;

	/*
	 * XXX: We should decide if we should cache the director
	 * XXX: or not (for session/backend coupling).  Until then
	 * XXX: make sure we don't trip up the check in vcl_recv.
	 */
	req->req_step = R_STP_RECV;
	req->t_req = preq->t_req;
	assert(isnan(req->t_first));
	assert(isnan(req->t_prev));

	INIT_OBJ(&xp, TRANSPORT_MAGIC);
	xp.deliver = VED_Deliver;
	req->transport = &xp;
	req->transport_priv = ecx;

	THR_SetRequest(req);

	VSLb_ts_req(req, "Start", W_TIM_real(wrk));

	req->ws_req = WS_Snapshot(req->ws);

	while (1) {
		req->wrk = wrk;
		s = CNT_Request(wrk, req);
		if (s == REQ_FSM_DONE)
			break;
		DSL(DBG_WAITINGLIST, req->vsl->wid,
		    "loop waiting for ESI (%d)", (int)s);
		assert(s == REQ_FSM_DISEMBARK);
		AZ(req->wrk);
		(void)usleep(10000);
	}

	VRTPRIV_dynamic_kill(req->sp->privs, (uintptr_t)req);
	CNT_AcctLogCharge(wrk->stats, req);
	VSL_End(req->vsl);

	preq->vcl = req->vcl;
	req->vcl = NULL;

	req->wrk = NULL;

	THR_SetRequest(preq);
	Req_Release(req);
}
static enum http1_cleanup_ret
http1_cleanup(struct sess *sp, struct worker *wrk, struct req *req)
{

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_ORNULL(req->vcl, VCL_CONF_MAGIC);

	req->director_hint = NULL;
	req->restarts = 0;

	AZ(req->esi_level);

	if (req->vcl != NULL) {
		if (wrk->vcl != NULL)
			VCL_Rel(&wrk->vcl);
		wrk->vcl = req->vcl;
		req->vcl = NULL;
	}

	/* Charge and log byte counters */
	AN(req->vsl->wid);
	CNT_AcctLogCharge(wrk->stats, req);
	req->req_bodybytes = 0;
	req->resp_hdrbytes = 0;
	req->resp_bodybytes = 0;

	VSL_End(req->vsl);

	if (!isnan(req->t_prev) && req->t_prev > 0.)
		sp->t_idle = req->t_prev;
	else
		sp->t_idle = W_TIM_real(wrk);

	req->t_first = NAN;
	req->t_prev = NAN;
	req->t_req = NAN;
	req->req_body_status = REQ_BODY_INIT;

	req->hash_always_miss = 0;
	req->hash_ignore_busy = 0;

	if (sp->fd >= 0 && req->doclose != SC_NULL)
		SES_Close(sp, req->doclose);

	if (sp->fd < 0) {
		wrk->stats->sess_closed++;
		AZ(req->vcl);
		SES_ReleaseReq(req);
		SES_Delete(sp, SC_NULL, NAN);
		return (SESS_DONE_RET_GONE);
	}

	WS_Reset(req->ws, NULL);
	WS_Reset(wrk->aws, NULL);

	if (HTTP1_Reinit(req->htc) == HTTP1_COMPLETE) {
		AZ(req->vsl->wid);
		req->t_first = req->t_req = sp->t_idle;
		wrk->stats->sess_pipeline++;
		req->acct.req_hdrbytes += Tlen(req->htc->rxbuf);
		return (SESS_DONE_RET_START);
	} else {
		if (Tlen(req->htc->rxbuf))
			wrk->stats->sess_readahead++;
		return (SESS_DONE_RET_WAIT);
	}
}