Пример #1
0
static enum req_fsm_nxt
cnt_synth(struct worker *wrk, struct req *req)
{
	char date[40];
	struct http *h;
	double now;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	wrk->stats.s_synth++;


	now = W_TIM_real(wrk);
	VSLb_ts_req(req, "Process", now);

	if (req->err_code < 100 || req->err_code > 999)
		req->err_code = 501;

	HTTP_Setup(req->resp, req->ws, req->vsl, SLT_RespMethod);
	h = req->resp;
	VTIM_format(now, date);
	http_PrintfHeader(h, "Date: %s", date);
	http_SetHeader(h, "Server: Varnish");
	http_PrintfHeader(req->resp, "X-Varnish: %u", VXID(req->vsl->wid));
	http_PutResponse(h, "HTTP/1.1", req->err_code, req->err_reason);

	AZ(req->synth_body);
	req->synth_body = VSB_new_auto();
	AN(req->synth_body);

	VCL_synth_method(req->vcl, wrk, req, NULL, req->http->ws);

	http_Unset(h, H_Content_Length);

	AZ(VSB_finish(req->synth_body));

	if (wrk->handling == VCL_RET_RESTART) {
		HTTP_Setup(h, req->ws, req->vsl, SLT_RespMethod);
		VSB_delete(req->synth_body);
		req->synth_body = NULL;
		req->req_step = R_STP_RESTART;
		return (REQ_FSM_MORE);
	}
	assert(wrk->handling == VCL_RET_DELIVER);

	if (http_HdrIs(req->resp, H_Connection, "close"))
		req->doclose = SC_RESP_CLOSE;

	V1D_Deliver_Synth(req);

	VSLb_ts_req(req, "Resp", W_TIM_real(wrk));

	VSB_delete(req->synth_body);
	req->synth_body = NULL;

	req->err_code = 0;
	req->err_reason = NULL;
	return (REQ_FSM_DONE);
}
int
V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr,
    int onlycached)
{
	struct http *hp;
	int j;
	ssize_t i;
	struct http_conn *htc;
	int do_chunked = 0;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
	CHECK_OBJ_ORNULL(bo->req, REQ_MAGIC);

	htc = bo->htc;
	hp = bo->bereq;

	if (bo->req != NULL &&
	    bo->req->req_body_status == REQ_BODY_WITHOUT_LEN) {
		http_PrintfHeader(hp, "Transfer-Encoding: chunked");
		do_chunked = 1;
	}

	(void)VTCP_blocking(htc->fd);	/* XXX: we should timeout instead */
	V1L_Reserve(wrk, wrk->aws, &htc->fd, bo->vsl, bo->t_prev);
	*ctr += HTTP1_Write(wrk, hp, HTTP1_Req);

	/* Deal with any message-body the request might (still) have */
	i = 0;

	if (bo->req != NULL &&
	    (bo->req->req_body_status == REQ_BODY_CACHED || !onlycached)) {
		if (do_chunked)
			V1L_Chunked(wrk);
		i = VRB_Iterate(bo->req, vbf_iter_req_body, bo);

		if (bo->req->req_body_status == REQ_BODY_FAIL) {
			assert(i < 0);
			VSLb(bo->vsl, SLT_FetchError,
			    "req.body read error: %d (%s)",
			    errno, strerror(errno));
			bo->req->doclose = SC_RX_BODY;
		}
		if (do_chunked)
			V1L_EndChunk(wrk);
	}

	j = V1L_FlushRelease(wrk);
	if (j != 0 || i < 0) {
		VSLb(bo->vsl, SLT_FetchError, "backend write error: %d (%s)",
		    errno, strerror(errno));
		VSLb_ts_busyobj(bo, "Bereq", W_TIM_real(wrk));
		htc->doclose = SC_TX_ERROR;
		return (-1);
	}
	VSLb_ts_busyobj(bo, "Bereq", W_TIM_real(wrk));
	return (0);
}
Пример #3
0
static enum req_fsm_nxt
cnt_restart(struct worker *wrk, struct req *req)
{
	unsigned wid, owid;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	req->director_hint = NULL;
	if (++req->restarts >= cache_param->max_restarts) {
		VSLb(req->vsl, SLT_VCL_Error, "Too many restarts");
		req->err_code = 503;
		req->req_step = R_STP_SYNTH;
	} else {
		wid = VXID_Get(&wrk->vxid_pool);
		// XXX: ReqEnd + ReqAcct ?
		VSLb_ts_req(req, "Restart", W_TIM_real(wrk));
		VSLb(req->vsl, SLT_Link, "req %u restart", wid);
		VSLb(req->vsl, SLT_End, "%s", "");
		VSL_Flush(req->vsl, 0);
		owid = req->vsl->wid & VSL_IDENTMASK;
		req->vsl->wid = wid | VSL_CLIENTMARKER;
		VSLb(req->vsl, SLT_Begin, "req %u restart", owid);
		VSLb_ts_req(req, "Start", req->t_prev);
		req->err_code = 0;
		req->req_step = R_STP_RECV;
	}
	return (REQ_FSM_MORE);
}
Пример #4
0
static enum fetch_step
vbf_stp_retry(struct worker *wrk, struct busyobj *bo)
{
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE);

	VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk));

	/* VDI_Finish must have been called before */
	assert(bo->director_state == DIR_S_NULL);

	/* reset other bo attributes - See VBO_GetBusyObj */
	bo->storage = NULL;
	bo->do_esi = 0;
	bo->do_stream = 1;

	// XXX: BereqEnd + BereqAcct ?
	VSL_ChgId(bo->vsl, "bereq", "retry", VXID_Get(wrk, VSL_BACKENDMARKER));
	VSLb_ts_busyobj(bo, "Start", bo->t_prev);
	http_VSL_log(bo->bereq);

	return (F_STP_STARTFETCH);
}
Пример #5
0
static enum fetch_step
vbf_stp_fetchend(struct worker *wrk, struct busyobj *bo)
{

	AZ(bo->vfc->failed);
	VFP_Close(bo->vfc);

	AZ(ObjSetU64(wrk, bo->fetch_objcore, OA_LEN,
	    bo->fetch_objcore->boc->len_so_far));

	if (bo->do_stream)
		assert(bo->fetch_objcore->boc->state == BOS_STREAM);
	else {
		assert(bo->fetch_objcore->boc->state == BOS_REQ_DONE);
		HSH_Unbusy(wrk, bo->fetch_objcore);
	}

	/* Recycle the backend connection before setting BOS_FINISHED to
	   give predictable backend reuse behavior for varnishtest */
	VDI_Finish(bo->wrk, bo);

	ObjSetState(wrk, bo->fetch_objcore, BOS_FINISHED);
	VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk));
	if (bo->stale_oc != NULL)
		HSH_Kill(bo->stale_oc);
	return (F_STP_DONE);
}
Пример #6
0
static void
vbf_fetch_thread(struct worker *wrk, void *priv)
{
	struct busyobj *bo;
	enum fetch_step stp;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);

	THR_SetBusyobj(bo);
	stp = F_STP_MKBEREQ;
	assert(bo->doclose == SC_NULL);
	assert(isnan(bo->t_first));
	assert(isnan(bo->t_prev));
	VSLb_ts_busyobj(bo, "Start", W_TIM_real(wrk));

	bo->wrk = wrk;
	wrk->vsl = bo->vsl;

	while (stp != F_STP_DONE) {
		CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
		assert(bo->refcount >= 1);
		switch(stp) {
#define FETCH_STEP(l, U, arg)						\
		case F_STP_##U:						\
			stp = vbf_stp_##l arg;				\
			break;
#include "tbl/steps.h"
#undef FETCH_STEP
		default:
			WRONG("Illegal fetch_step");
		}
	}
	assert(WRW_IsReleased(wrk));

	assert(bo->director_state == DIR_S_NULL);

	http_Teardown(bo->bereq);
	http_Teardown(bo->beresp);

	if (bo->state == BOS_FINISHED) {
		AZ(bo->fetch_objcore->flags & OC_F_FAILED);
		HSH_Complete(bo->fetch_objcore);
		VSLb(bo->vsl, SLT_Length, "%ju",
		    (uintmax_t)ObjGetLen(bo->wrk, bo->fetch_objcore));
	}
	AZ(bo->fetch_objcore->busyobj);

	if (bo->ims_oc != NULL)
		(void)HSH_DerefObjCore(wrk, &bo->ims_oc);


	wrk->vsl = NULL;
	VBO_DerefBusyObj(wrk, &bo);
	THR_SetBusyobj(NULL);
}
Пример #7
0
static enum sess_close
vbe_dir_http1pipe(const struct director *d, struct req *req, struct busyobj *bo)
{
	int i;
	enum sess_close retval;
	struct backend *bp;
	struct v1p_acct v1a;
	struct vbc *vbc;

	CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);

	memset(&v1a, 0, sizeof v1a);

	/* This is hackish... */
	v1a.req = req->acct.req_hdrbytes;
	req->acct.req_hdrbytes = 0;

	req->res_mode = RES_PIPE;

	vbc = vbe_dir_getfd(req->wrk, bp, bo);

	if (vbc == NULL) {
		VSLb(bo->vsl, SLT_FetchError, "no backend connection");
		retval = SC_TX_ERROR;
	} else {
		i = V1F_SendReq(req->wrk, bo, &v1a.bereq, 1);
		VSLb_ts_req(req, "Pipe", W_TIM_real(req->wrk));
		if (vbc->state == VBC_STATE_STOLEN)
			VBT_Wait(req->wrk, vbc);
		if (i == 0)
			V1P_Process(req, vbc->fd, &v1a);
		VSLb_ts_req(req, "PipeSess", W_TIM_real(req->wrk));
		bo->htc->doclose = SC_TX_PIPE;
		vbe_dir_finish(d, req->wrk, bo);
		retval = SC_TX_PIPE;
	}
	V1P_Charge(req, &v1a, bp->vsc);
	return (retval);
}
Пример #8
0
static enum fetch_step
vbf_stp_retry(struct worker *wrk, struct busyobj *bo)
{

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk));

	// XXX: BereqEnd + BereqAcct ?
	VSL_ChgId(bo->vsl, "bereq", "retry", VXID_Get(wrk, VSL_BACKENDMARKER));
	VSLb_ts_busyobj(bo, "Start", bo->t_prev);

	return (F_STP_STARTFETCH);
}
Пример #9
0
static enum fetch_step
vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo)
{

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	AZ(vbf_beresp2obj(bo));

	if (ObjHasAttr(bo->wrk, bo->stale_oc, OA_ESIDATA))
		AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc,
		    OA_ESIDATA));

	AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_FLAGS));
	AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_GZIPBITS));

	if (bo->do_stream) {
		ObjSetState(wrk, bo->fetch_objcore, BOS_PREP_STREAM);
		HSH_Unbusy(wrk, bo->fetch_objcore);
		ObjSetState(wrk, bo->fetch_objcore, BOS_STREAM);
	}

	if (ObjIterate(wrk, bo->stale_oc, bo, vbf_objiterator, 0))
		(void)VFP_Error(bo->vfc, "Template object failed");

	if (bo->stale_oc->flags & OC_F_FAILED)
		(void)VFP_Error(bo->vfc, "Template object failed");
	if (bo->vfc->failed) {
		VDI_Finish(bo->wrk, bo);
		return (F_STP_FAIL);
	}

	AZ(ObjSetU64(wrk, bo->fetch_objcore, OA_LEN,
	    bo->fetch_objcore->boc->len_so_far));

	if (!bo->do_stream)
		HSH_Unbusy(wrk, bo->fetch_objcore);

	HSH_Kill(bo->stale_oc);

	/* Recycle the backend connection before setting BOS_FINISHED to
	   give predictable backend reuse behavior for varnishtest */
	VDI_Finish(bo->wrk, bo);

	ObjSetState(wrk, bo->fetch_objcore, BOS_FINISHED);
	VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk));
	return (F_STP_DONE);
}
Пример #10
0
void
Req_Cleanup(struct sess *sp, struct worker *wrk, struct req *req)
{

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->topreq, REQ_MAGIC);
	assert(sp == req->sp);
	AZ(req->vcl0);

	req->director_hint = NULL;
	req->restarts = 0;

	AZ(req->privs->magic);

	if (req->vcl != NULL)
		VCL_Recache(wrk, &req->vcl);

	/* Charge and log byte counters */
	req_AcctLogCharge(wrk->stats, req);
	if (req->vsl->wid)
		VSL_End(req->vsl);

	if (!isnan(req->t_prev) && req->t_prev > 0. && req->t_prev > sp->t_idle)
		sp->t_idle = req->t_prev;
	else
		sp->t_idle = W_TIM_real(wrk);

	req->t_first = NAN;
	req->t_prev = NAN;
	req->t_req = NAN;
	req->req_body_status = REQ_BODY_INIT;

	req->hash_always_miss = 0;
	req->hash_ignore_busy = 0;
	req->esi_level = 0;
	req->is_hit = 0;

	if (WS_Overflowed(req->ws))
		wrk->stats->ws_client_overflow++;

	WS_Reset(req->ws, 0);
}
Пример #11
0
static enum fetch_step
vbf_stp_retry(struct worker *wrk, struct busyobj *bo)
{
	unsigned owid, wid;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk));

	// XXX: BereqEnd + BereqAcct ?
	wid = VXID_Get(&wrk->vxid_pool);
	VSLb(bo->vsl, SLT_Link, "bereq %u retry", wid);
	VSLb(bo->vsl, SLT_End, "%s", "");
	VSL_Flush(bo->vsl, 0);
	owid = bo->vsl->wid & VSL_IDENTMASK;
	bo->vsl->wid = wid | VSL_BACKENDMARKER;
	VSLb(bo->vsl, SLT_Begin, "bereq %u retry", owid);
	VSLb_ts_busyobj(bo, "Start", bo->t_prev);

	return (F_STP_STARTFETCH);
}
Пример #12
0
static enum req_fsm_nxt
cnt_restart(struct worker *wrk, struct req *req)
{

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	req->director_hint = NULL;
	if (++req->restarts >= cache_param->max_restarts) {
		VSLb(req->vsl, SLT_VCL_Error, "Too many restarts");
		req->err_code = 503;
		req->req_step = R_STP_SYNTH;
	} else {
		// XXX: ReqEnd + ReqAcct ?
		VSLb_ts_req(req, "Restart", W_TIM_real(wrk));
		VSL_ChgId(req->vsl, "req", "restart",
		    VXID_Get(wrk, VSL_CLIENTMARKER));
		VSLb_ts_req(req, "Start", req->t_prev);
		req->err_code = 0;
		req->req_step = R_STP_RECV;
	}
	return (REQ_FSM_MORE);
}
Пример #13
0
static enum fetch_step
vbf_stp_fetchhdr(struct worker *wrk, struct busyobj *bo)
{
	int i;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	xxxassert (wrk->handling == VCL_RET_FETCH);

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, HTTP_Beresp);

	if (!bo->do_pass)
		vbf_release_req(bo); /* XXX: retry ?? */

	assert(bo->state == BOS_INVALID);

	i = V1F_fetch_hdr(wrk, bo, bo->req);
	/*
	 * If we recycle a backend connection, there is a finite chance
	 * that the backend closed it before we get a request to it.
	 * Do a single retry in that case.
	 */
	if (i == 1) {
		VSC_C_main->backend_retry++;
		i = V1F_fetch_hdr(wrk, bo, bo->req);
	}

	if (bo->do_pass)
		vbf_release_req(bo); /* XXX : retry ?? */

	AZ(bo->req);

	if (i) {
		AZ(bo->vbc);
		bo->err_code = 503;
		http_SetH(bo->beresp, HTTP_HDR_PROTO, "HTTP/1.1");
		http_SetResp(bo->beresp,
		    "HTTP/1.1", 503, "Backend fetch failed");
		http_SetHeader(bo->beresp, "Content-Length: 0");
		http_SetHeader(bo->beresp, "Connection: close");
	} else {
		AN(bo->vbc);
	}

	/*
	 * These two headers can be spread over multiple actual headers
	 * and we rely on their content outside of VCL, so collect them
	 * into one line here.
	 */
	http_CollectHdr(bo->beresp, H_Cache_Control);
	http_CollectHdr(bo->beresp, H_Vary);

	/*
	 * Figure out how the fetch is supposed to happen, before the
	 * headers are adultered by VCL
	 * NB: Also sets other wrk variables
	 */
	bo->htc.body_status = RFC2616_Body(bo, &wrk->stats);

	bo->err_code = http_GetStatus(bo->beresp);

	/*
	 * What does RFC2616 think about TTL ?
	 */
	EXP_Clr(&bo->exp);
	bo->exp.entered = W_TIM_real(wrk);
	RFC2616_Ttl(bo);

	/* pass from vclrecv{} has negative TTL */
	if (bo->fetch_objcore->objhead == NULL)
		bo->exp.ttl = -1.;

	AZ(bo->do_esi);

	VCL_backend_response_method(bo->vcl, wrk, NULL, bo, bo->beresp->ws);

	if (bo->do_esi)
		bo->do_stream = 0;

	if (wrk->handling == VCL_RET_DELIVER)
		return (F_STP_FETCH);
	if (wrk->handling == VCL_RET_RETRY) {
		assert(bo->state == BOS_INVALID);
		bo->retries++;
		if (bo->retries <= cache_param->max_retries) {
			VDI_CloseFd(&bo->vbc);
			return (F_STP_STARTFETCH);
		}
		// XXX: wrk->handling = VCL_RET_SYNTH;
	}

	return (F_STP_NOTYET);
}
Пример #14
0
static enum req_fsm_nxt
cnt_deliver(struct worker *wrk, struct req *req)
{

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
	CHECK_OBJ_NOTNULL(req->objcore->objhead, OBJHEAD_MAGIC);
	AN(req->vcl);

	assert(req->objcore->refcnt > 0);

	ObjTouch(req->wrk, req->objcore, req->t_prev);

	HTTP_Setup(req->resp, req->ws, req->vsl, SLT_RespMethod);
	if (HTTP_Decode(req->resp,
	    ObjGetAttr(req->wrk, req->objcore, OA_HEADERS, NULL))) {
		req->err_code = 500;
		req->req_step = R_STP_SYNTH;
		return (REQ_FSM_MORE);
	}
	http_ForceField(req->resp, HTTP_HDR_PROTO, "HTTP/1.1");

	if (req->is_hit)
		http_PrintfHeader(req->resp,
		    "X-Varnish: %u %u", VXID(req->vsl->wid),
		    ObjGetXID(wrk, req->objcore));
	else
		http_PrintfHeader(req->resp,
		    "X-Varnish: %u", VXID(req->vsl->wid));

	/*
	 * We base Age calculation upon the last timestamp taken during
	 * client request processing. This gives some inaccuracy, but
	 * since Age is only full second resolution that shouldn't
	 * matter. (Last request timestamp could be a Start timestamp
	 * taken before the object entered into cache leading to negative
	 * age. Truncate to zero in that case).
	 */
	http_PrintfHeader(req->resp, "Age: %.0f",
	    fmax(0., req->t_prev - req->objcore->t_origin));

	http_SetHeader(req->resp, "Via: 1.1 varnish-v4");

	if (cache_param->http_gzip_support &&
	    ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED) &&
	    !RFC2616_Req_Gzip(req->http))
		RFC2616_Weaken_Etag(req->resp);

	VCL_deliver_method(req->vcl, wrk, req, NULL, NULL);
	VSLb_ts_req(req, "Process", W_TIM_real(wrk));

	/* Stop the insanity before it turns "Hotel California" on us */
	if (req->restarts >= cache_param->max_restarts)
		wrk->handling = VCL_RET_DELIVER;

	if (wrk->handling != VCL_RET_DELIVER) {
		(void)HSH_DerefObjCore(wrk, &req->objcore);
		http_Teardown(req->resp);

		switch (wrk->handling) {
		case VCL_RET_RESTART:
			req->req_step = R_STP_RESTART;
			break;
		case VCL_RET_SYNTH:
			req->req_step = R_STP_SYNTH;
			break;
		default:
			WRONG("Illegal return from vcl_deliver{}");
		}

		return (REQ_FSM_MORE);
	}

	assert(wrk->handling == VCL_RET_DELIVER);

	if (!(req->objcore->flags & OC_F_PASS)
	    && req->esi_level == 0
	    && http_IsStatus(req->resp, 200)
	    && req->http->conds && RFC2616_Do_Cond(req))
		http_PutResponse(req->resp, "HTTP/1.1", 304, NULL);

	req->req_step = R_STP_TRANSMIT;
	return (REQ_FSM_MORE);
}
Пример #15
0
static enum req_fsm_nxt
cnt_lookup(struct worker *wrk, struct req *req)
{
	struct objcore *oc, *busy;
	enum lookup_e lr;
	int had_objhead = 0;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	AZ(req->objcore);

	AN(req->vcl);

	VRY_Prep(req);

	AZ(req->objcore);
	if (req->hash_objhead)
		had_objhead = 1;
	lr = HSH_Lookup(req, &oc, &busy,
	    req->esi_level == 0 ? 1 : 0,
	    req->hash_always_miss ? 1 : 0
	);
	if (lr == HSH_BUSY) {
		/*
		 * We lost the session to a busy object, disembark the
		 * worker thread.   We return to STP_LOOKUP when the busy
		 * object has been unbusied, and still have the objhead
		 * around to restart the lookup with.
		 */
		return (REQ_FSM_DISEMBARK);
	}
	if (had_objhead)
		VSLb_ts_req(req, "Waitinglist", W_TIM_real(wrk));

	if (busy == NULL) {
		VRY_Finish(req, DISCARD);
	} else {
		AN(busy->flags & OC_F_BUSY);
		VRY_Finish(req, KEEP);
	}

	AZ(req->objcore);
	if (lr == HSH_MISS) {
		/* Found nothing */
		AZ(oc);
		AN(busy);
		AN(busy->flags & OC_F_BUSY);
		req->objcore = busy;
		req->req_step = R_STP_MISS;
		return (REQ_FSM_MORE);
	}

	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
	AZ(oc->flags & OC_F_BUSY);
	req->objcore = oc;

	if ((oc->flags & OC_F_PASS) && busy != NULL) {
		/* Treat a graced Hit-For-Pass as a miss */
		req->objcore = busy;
		req->stale_oc = oc;
		req->req_step = R_STP_MISS;
		return (REQ_FSM_MORE);
	} else if (oc->flags & OC_F_PASS) {
		/* Found a hit-for-pass */
		VSLb(req->vsl, SLT_Debug, "XXXX HIT-FOR-PASS");
		VSLb(req->vsl, SLT_HitPass, "%u",
		    ObjGetXID(wrk, req->objcore));
		(void)HSH_DerefObjCore(wrk, &req->objcore);
		wrk->stats->cache_hitpass++;
		req->req_step = R_STP_PASS;
		return (REQ_FSM_MORE);
	}

	VSLb(req->vsl, SLT_Hit, "%u", ObjGetXID(wrk, req->objcore));

	VCL_hit_method(req->vcl, wrk, req, NULL, NULL);

	switch (wrk->handling) {
	case VCL_RET_DELIVER:
		if (busy != NULL) {
			AZ(oc->flags & OC_F_PASS);
			CHECK_OBJ_NOTNULL(busy->boc, BOC_MAGIC);
			VBF_Fetch(wrk, req, busy, oc, VBF_BACKGROUND);
		} else {
			(void)VRB_Ignore(req);// XXX: handle err
		}
		wrk->stats->cache_hit++;
		req->is_hit = 1;
		req->req_step = R_STP_DELIVER;
		return (REQ_FSM_MORE);
	case VCL_RET_FETCH:
		VSLb(req->vsl, SLT_VCL_Error,
		    "change return(fetch) to return(miss) in vcl_hit{}");
		/* FALL-THROUGH */
	case VCL_RET_MISS:
		if (busy != NULL) {
			req->objcore = busy;
			req->stale_oc = oc;
			req->req_step = R_STP_MISS;
		} else {
			(void)HSH_DerefObjCore(wrk, &req->objcore);
			/*
			 * We don't have a busy object, so treat this
			 * like a pass
			 */
			VSLb(req->vsl, SLT_VCL_Error,
			    "vcl_hit{} returns miss without busy object."
			    "  Doing pass.");
			req->req_step = R_STP_PASS;
		}
		return (REQ_FSM_MORE);
	case VCL_RET_RESTART:
		req->req_step = R_STP_RESTART;
		break;
	case VCL_RET_SYNTH:
		req->req_step = R_STP_SYNTH;
		break;
	case VCL_RET_PASS:
		wrk->stats->cache_hit++;
		req->is_hit = 1;
		req->req_step = R_STP_PASS;
		break;
	default:
		WRONG("Illegal return from vcl_hit{}");
	}

	/* Drop our object, we won't need it */
	(void)HSH_DerefObjCore(wrk, &req->objcore);

	if (busy != NULL) {
		(void)HSH_DerefObjCore(wrk, &busy);
		VRY_Clear(req);
	}

	return (REQ_FSM_MORE);
}
Пример #16
0
static enum req_fsm_nxt
cnt_transmit(struct worker *wrk, struct req *req)
{
	struct boc *boc;
	const char *r;
	uint16_t status;
	int sendbody;
	intmax_t clval;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->transport, TRANSPORT_MAGIC);

	/* Grab a ref to the bo if there is one */
	boc = HSH_RefBoc(req->objcore);

	clval = http_GetContentLength(req->resp);
	if (boc != NULL)
		req->resp_len = clval;
	else
		req->resp_len = ObjGetLen(req->wrk, req->objcore);

	req->res_mode = 0;

	/* RFC 7230, 3.3.3 */
	status = http_GetStatus(req->resp);
	if (!strcmp(req->http0->hd[HTTP_HDR_METHOD].b, "HEAD")) {
		if (req->objcore->flags & OC_F_PASS)
			sendbody = -1;
		else
			sendbody = 0;
	} else if (status < 200 || status == 204 || status == 304) {
		req->resp_len = -1;
		sendbody = 0;
	} else
		sendbody = 1;

	if (sendbody >= 0) {
		if (!req->disable_esi && req->resp_len != 0 &&
		    ObjHasAttr(wrk, req->objcore, OA_ESIDATA))
			VDP_push(req, VDP_ESI, NULL, 0, "ESI");

		if (cache_param->http_gzip_support &&
		    ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED) &&
		    !RFC2616_Req_Gzip(req->http))
			VDP_push(req, VDP_gunzip, NULL, 1, "GUZ");

		if (cache_param->http_range_support &&
		    http_IsStatus(req->resp, 200)) {
			http_SetHeader(req->resp, "Accept-Ranges: bytes");
			if (sendbody && http_GetHdr(req->http, H_Range, &r))
				VRG_dorange(req, r);
		}
	}

	if (sendbody < 0) {
		/* Don't touch pass+HEAD C-L */
		sendbody = 0;
	} else if (clval >= 0 && clval == req->resp_len) {
		/* Reuse C-L header */
	} else {
		http_Unset(req->resp, H_Content_Length);
		if (req->resp_len >= 0 && sendbody)
			http_PrintfHeader(req->resp,
			    "Content-Length: %jd", req->resp_len);
	}

	req->transport->deliver(req, boc, sendbody);

	VSLb_ts_req(req, "Resp", W_TIM_real(wrk));

	if (req->objcore->flags & (OC_F_PRIVATE | OC_F_PASS)) {
		if (boc != NULL) {
			HSH_Abandon(req->objcore);
			ObjWaitState(req->objcore, BOS_FINISHED);
		}
		ObjSlim(wrk, req->objcore);
	}

	if (boc != NULL)
		HSH_DerefBoc(wrk, req->objcore);

	(void)HSH_DerefObjCore(wrk, &req->objcore);
	http_Teardown(req->resp);

	return (REQ_FSM_DONE);
}
Пример #17
0
static enum req_fsm_nxt
cnt_synth(struct worker *wrk, struct req *req)
{
	struct http *h;
	double now;
	struct vsb *synth_body;
	ssize_t sz, szl;
	uint8_t *ptr;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	wrk->stats->s_synth++;

	now = W_TIM_real(wrk);
	VSLb_ts_req(req, "Process", now);

	if (req->err_code < 100 || req->err_code > 999)
		req->err_code = 501;

	HTTP_Setup(req->resp, req->ws, req->vsl, SLT_RespMethod);
	h = req->resp;
	http_TimeHeader(h, "Date: ", now);
	http_SetHeader(h, "Server: Varnish");
	http_PrintfHeader(req->resp, "X-Varnish: %u", VXID(req->vsl->wid));
	http_PutResponse(h, "HTTP/1.1", req->err_code, req->err_reason);

	synth_body = VSB_new_auto();
	AN(synth_body);

	VCL_synth_method(req->vcl, wrk, req, NULL, synth_body);

	AZ(VSB_finish(synth_body));

	http_Unset(h, H_Content_Length);
	http_PrintfHeader(req->resp, "Content-Length: %zd",
	    VSB_len(synth_body));

	/* Discard any lingering request body before delivery */
	(void)VRB_Ignore(req);

	if (wrk->handling == VCL_RET_RESTART) {
		HTTP_Setup(h, req->ws, req->vsl, SLT_RespMethod);
		VSB_destroy(&synth_body);
		req->req_step = R_STP_RESTART;
		return (REQ_FSM_MORE);
	}
	assert(wrk->handling == VCL_RET_DELIVER);

	req->objcore = HSH_Private(wrk);
	CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
	szl = -1;
	if (STV_NewObject(wrk, req->objcore, TRANSIENT_STORAGE, 1024)) {
		szl = VSB_len(synth_body);
		assert(szl >= 0);
		sz = szl;
		if (sz > 0 &&
		    ObjGetSpace(wrk, req->objcore, &sz, &ptr) && sz >= szl) {
			memcpy(ptr, VSB_data(synth_body), szl);
			ObjExtend(wrk, req->objcore, szl);
		} else if (sz > 0) {
			szl = -1;
		}
	}

	if (szl >= 0)
		AZ(ObjSetU64(wrk, req->objcore, OA_LEN, szl));
	HSH_DerefBoc(wrk, req->objcore);
	VSB_destroy(&synth_body);

	if (szl < 0) {
		VSLb(req->vsl, SLT_Error, "Could not get storage");
		req->doclose = SC_OVERLOAD;
		VSLb_ts_req(req, "Resp", W_TIM_real(wrk));
		(void)HSH_DerefObjCore(wrk, &req->objcore);
		http_Teardown(req->resp);
		return (REQ_FSM_DONE);
	}

	req->req_step = R_STP_TRANSMIT;
	return (REQ_FSM_MORE);
}
Пример #18
0
static enum fetch_step
vbf_stp_error(struct worker *wrk, struct busyobj *bo)
{
	ssize_t l, ll, o;
	double now;
	uint8_t *ptr;
	struct vsb *synth_body;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);
	assert(bo->director_state == DIR_S_NULL);

	if(bo->fetch_objcore->stobj->stevedore != NULL)
		ObjFreeObj(bo->wrk, bo->fetch_objcore);

	now = W_TIM_real(wrk);
	VSLb_ts_busyobj(bo, "Error", now);

	AN(bo->fetch_objcore->flags & OC_F_BUSY);

	synth_body = VSB_new_auto();
	AN(synth_body);

	// XXX: reset all beresp flags ?

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);
	http_PutResponse(bo->beresp, "HTTP/1.1", 503, "Backend fetch failed");
	http_TimeHeader(bo->beresp, "Date: ", now);
	http_SetHeader(bo->beresp, "Server: Varnish");

	EXP_Clr(&bo->fetch_objcore->exp);
	bo->fetch_objcore->exp.t_origin = bo->t_prev;

	VCL_backend_error_method(bo->vcl, wrk, NULL, bo, synth_body);

	AZ(VSB_finish(synth_body));

	if (wrk->handling == VCL_RET_RETRY ||
	    wrk->handling == VCL_RET_ABANDON) {
		VSB_delete(synth_body);

		if (bo->director_state != DIR_S_NULL) {
			bo->htc->doclose = SC_RESP_CLOSE;
			VDI_Finish(bo->wrk, bo);
		}

		if (wrk->handling == VCL_RET_RETRY) {
			if (bo->retries++ < cache_param->max_retries)
				return (F_STP_RETRY);
			VSLb(bo->vsl, SLT_VCL_Error,
			    "Too many retries, delivering 503");
		}

		return (F_STP_FAIL);
	}

	assert(wrk->handling == VCL_RET_DELIVER);

	bo->vfc->bo = bo;
	bo->vfc->wrk = bo->wrk;
	bo->vfc->oc = bo->fetch_objcore;
	bo->vfc->http = bo->beresp;
	bo->vfc->esi_req = bo->bereq;

	if (vbf_beresp2obj(bo)) {
		VSB_delete(synth_body);
		return (F_STP_FAIL);
	}

	ll = VSB_len(synth_body);
	o = 0;
	while (ll > 0) {
		l = ll;
		if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK)
			break;
		memcpy(ptr, VSB_data(synth_body) + o, l);
		VBO_extend(bo, l);
		ll -= l;
		o += l;
	}
	VSB_delete(synth_body);

	HSH_Unbusy(wrk, bo->fetch_objcore);
	VBO_setstate(bo, BOS_FINISHED);
	return (F_STP_DONE);
}
Пример #19
0
static enum fetch_step
vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo)
{
	int i;
	double now;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	AZ(bo->storage);
	bo->storage = bo->do_pass ? stv_transient : STV_next();

	if (bo->retries > 0)
		http_Unset(bo->bereq, "\012X-Varnish:");

	http_PrintfHeader(bo->bereq, "X-Varnish: %u", VXID(bo->vsl->wid));

	VCL_backend_fetch_method(bo->vcl, wrk, NULL, bo, NULL);

	bo->uncacheable = bo->do_pass;
	if (wrk->handling == VCL_RET_ABANDON || wrk->handling == VCL_RET_FAIL)
		return (F_STP_FAIL);

	assert (wrk->handling == VCL_RET_FETCH);

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);

	assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE);

	AZ(bo->htc);

	VFP_Setup(bo->vfc, wrk);
	bo->vfc->oc = bo->fetch_objcore;
	bo->vfc->resp = bo->beresp;
	bo->vfc->req = bo->bereq;

	i = VDI_GetHdr(wrk, bo);

	now = W_TIM_real(wrk);
	VSLb_ts_busyobj(bo, "Beresp", now);

	if (i) {
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	http_VSL_log(bo->beresp);

	if (bo->htc->body_status == BS_ERROR) {
		bo->htc->doclose = SC_RX_BODY;
		VDI_Finish(bo->wrk, bo);
		VSLb(bo->vsl, SLT_Error, "Body cannot be fetched");
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	if (!http_GetHdr(bo->beresp, H_Date, NULL)) {
		/*
		 * RFC 2616 14.18 Date: The Date general-header field
		 * represents the date and time at which the message was
		 * originated, having the same semantics as orig-date in
		 * RFC 822. ... A received message that does not have a
		 * Date header field MUST be assigned one by the recipient
		 * if the message will be cached by that recipient or
		 * gatewayed via a protocol which requires a Date.
		 *
		 * If we didn't get a Date header, we assign one here.
		 */
		http_TimeHeader(bo->beresp, "Date: ", now);
	}

	/*
	 * These two headers can be spread over multiple actual headers
	 * and we rely on their content outside of VCL, so collect them
	 * into one line here.
	 */
	http_CollectHdr(bo->beresp, H_Cache_Control);
	http_CollectHdr(bo->beresp, H_Vary);

	if (bo->fetch_objcore->flags & OC_F_PRIVATE) {
		/* private objects have negative TTL */
		bo->fetch_objcore->t_origin = now;
		bo->fetch_objcore->ttl = -1.;
		bo->fetch_objcore->grace = 0;
		bo->fetch_objcore->keep = 0;
	} else {
		/* What does RFC2616 think about TTL ? */
		RFC2616_Ttl(bo, now,
		    &bo->fetch_objcore->t_origin,
		    &bo->fetch_objcore->ttl,
		    &bo->fetch_objcore->grace,
		    &bo->fetch_objcore->keep
		    );
	}

	AZ(bo->do_esi);
	AZ(bo->was_304);

	if (http_IsStatus(bo->beresp, 304)) {
		if (bo->stale_oc != NULL &&
		    ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND)) {
			if (ObjCheckFlag(bo->wrk, bo->stale_oc, OF_CHGGZIP)) {
				/*
				 * If we changed the gzip status of the object
				 * the stored Content_Encoding controls we
				 * must weaken any new ETag we get.
				 */
				http_Unset(bo->beresp, H_Content_Encoding);
				RFC2616_Weaken_Etag(bo->beresp);
			}
			http_Unset(bo->beresp, H_Content_Length);
			HTTP_Merge(bo->wrk, bo->stale_oc, bo->beresp);
			assert(http_IsStatus(bo->beresp, 200));
			bo->was_304 = 1;
		} else if (!bo->do_pass) {
			/*
			 * Backend sent unallowed 304
			 */
			VSLb(bo->vsl, SLT_Error,
			    "304 response but not conditional fetch");
			bo->htc->doclose = SC_RX_BAD;
			VDI_Finish(bo->wrk, bo);
			return (F_STP_ERROR);
		}
	}

	VCL_backend_response_method(bo->vcl, wrk, NULL, bo, NULL);

	if (wrk->handling == VCL_RET_ABANDON || wrk->handling == VCL_RET_FAIL) {
		bo->htc->doclose = SC_RESP_CLOSE;
		VDI_Finish(bo->wrk, bo);
		return (F_STP_FAIL);
	}

	if (wrk->handling == VCL_RET_RETRY) {
		if (bo->htc->body_status != BS_NONE)
			bo->htc->doclose = SC_RESP_CLOSE;
		if (bo->director_state != DIR_S_NULL)
			VDI_Finish(bo->wrk, bo);

		if (bo->retries++ < cache_param->max_retries)
			return (F_STP_RETRY);

		VSLb(bo->vsl, SLT_VCL_Error,
		    "Too many retries, delivering 503");
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE);
	if (bo->fetch_objcore->boc->state != BOS_REQ_DONE) {
		bo->req = NULL;
		ObjSetState(wrk, bo->fetch_objcore, BOS_REQ_DONE);
	}

	if (bo->do_esi)
		bo->do_stream = 0;
	if (wrk->handling == VCL_RET_PASS) {
		bo->fetch_objcore->flags |= OC_F_HFP;
		bo->uncacheable = 1;
		wrk->handling = VCL_RET_DELIVER;
	}
	if (bo->do_pass || bo->uncacheable)
		bo->fetch_objcore->flags |= OC_F_PASS;

	assert(wrk->handling == VCL_RET_DELIVER);

	return (bo->was_304 ? F_STP_CONDFETCH : F_STP_FETCH);
}
Пример #20
0
void
VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
    struct objcore *oldoc, enum vbf_fetch_mode_e mode)
{
	struct boc *boc;
	struct busyobj *bo;
	const char *how;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
	AN(oc->flags & OC_F_BUSY);
	CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC);

	bo = VBO_GetBusyObj(wrk, req);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	boc = HSH_RefBoc(oc);
	CHECK_OBJ_NOTNULL(boc, BOC_MAGIC);

	switch (mode) {
	case VBF_PASS:
		how = "pass";
		bo->do_pass = 1;
		break;
	case VBF_NORMAL:
		how = "fetch";
		break;
	case VBF_BACKGROUND:
		how = "bgfetch";
		bo->is_bgfetch = 1;
		break;
	default:
		WRONG("Wrong fetch mode");
	}

	VSLb(bo->vsl, SLT_Begin, "bereq %u %s", VXID(req->vsl->wid), how);
	VSLb(req->vsl, SLT_Link, "bereq %u %s", VXID(bo->vsl->wid), how);

	THR_SetBusyobj(bo);

	bo->sp = req->sp;
	SES_Ref(bo->sp);

	AN(bo->vcl);

	oc->boc->vary = req->vary_b;
	req->vary_b = NULL;

	HSH_Ref(oc);
	AZ(bo->fetch_objcore);
	bo->fetch_objcore = oc;

	AZ(bo->stale_oc);
	if (oldoc != NULL) {
		assert(oldoc->refcnt > 0);
		HSH_Ref(oldoc);
		bo->stale_oc = oldoc;
	}

	AZ(bo->req);
	bo->req = req;

	bo->fetch_task.priv = bo;
	bo->fetch_task.func = vbf_fetch_thread;

	if (Pool_Task(wrk->pool, &bo->fetch_task, TASK_QUEUE_BO)) {
		wrk->stats->fetch_no_thread++;
		(void)vbf_stp_fail(req->wrk, bo);
		if (bo->stale_oc != NULL)
			(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0);
		HSH_DerefBoc(wrk, oc);
		SES_Rel(bo->sp);
		VBO_ReleaseBusyObj(wrk, &bo);
	} else {
		bo = NULL; /* ref transferred to fetch thread */
		if (mode == VBF_BACKGROUND) {
			ObjWaitState(oc, BOS_REQ_DONE);
			(void)VRB_Ignore(req);
		} else {
			ObjWaitState(oc, BOS_STREAM);
			if (oc->boc->state == BOS_FAILED) {
				AN((oc->flags & OC_F_FAILED));
			} else {
				AZ(oc->flags & OC_F_BUSY);
			}
		}
	}
	AZ(bo);
	VSLb_ts_req(req, "Fetch", W_TIM_real(wrk));
	assert(oc->boc == boc);
	HSH_DerefBoc(wrk, oc);
	if (mode == VBF_BACKGROUND)
		(void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY);
	THR_SetBusyobj(NULL);
}
Пример #21
0
static enum fetch_step
vbf_stp_error(struct worker *wrk, struct busyobj *bo)
{
	ssize_t l, ll, o;
	double now;
	uint8_t *ptr;
	struct vsb *synth_body;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);
	AN(bo->fetch_objcore->flags & OC_F_BUSY);
	assert(bo->director_state == DIR_S_NULL);

	wrk->stats->fetch_failed++;

	now = W_TIM_real(wrk);
	VSLb_ts_busyobj(bo, "Error", now);

	if (bo->fetch_objcore->stobj->stevedore != NULL)
		ObjFreeObj(bo->wrk, bo->fetch_objcore);

	if (bo->storage == NULL)
		bo->storage = STV_next();

	// XXX: reset all beresp flags ?

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);
	http_PutResponse(bo->beresp, "HTTP/1.1", 503, "Backend fetch failed");
	http_TimeHeader(bo->beresp, "Date: ", now);
	http_SetHeader(bo->beresp, "Server: Varnish");

	bo->fetch_objcore->t_origin = now;
	if (!VTAILQ_EMPTY(&bo->fetch_objcore->objhead->waitinglist)) {
		/*
		 * If there is a waitinglist, it means that there is no
		 * grace-able object, so cache the error return for a
		 * short time, so the waiting list can drain, rather than
		 * each objcore on the waiting list sequentially attempt
		 * to fetch from the backend.
		 */
		bo->fetch_objcore->ttl = 1;
		bo->fetch_objcore->grace = 5;
		bo->fetch_objcore->keep = 5;
	} else {
		bo->fetch_objcore->ttl = 0;
		bo->fetch_objcore->grace = 0;
		bo->fetch_objcore->keep = 0;
	}

	synth_body = VSB_new_auto();
	AN(synth_body);

	VCL_backend_error_method(bo->vcl, wrk, NULL, bo, synth_body);

	AZ(VSB_finish(synth_body));

	if (wrk->handling == VCL_RET_ABANDON || wrk->handling == VCL_RET_FAIL) {
		VSB_destroy(&synth_body);
		return (F_STP_FAIL);
	}

	if (wrk->handling == VCL_RET_RETRY) {
		VSB_destroy(&synth_body);
		if (bo->retries++ < cache_param->max_retries)
			return (F_STP_RETRY);
		VSLb(bo->vsl, SLT_VCL_Error, "Too many retries, failing");
		return (F_STP_FAIL);
	}

	assert(wrk->handling == VCL_RET_DELIVER);

	assert(bo->vfc->wrk == bo->wrk);
	assert(bo->vfc->oc == bo->fetch_objcore);
	assert(bo->vfc->resp == bo->beresp);
	assert(bo->vfc->req == bo->bereq);

	if (vbf_beresp2obj(bo)) {
		(void)VFP_Error(bo->vfc, "Could not get storage");
		VSB_destroy(&synth_body);
		return (F_STP_FAIL);
	}

	ll = VSB_len(synth_body);
	o = 0;
	while (ll > 0) {
		l = ll;
		if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK)
			break;
		if (l > ll)
			l = ll;
		memcpy(ptr, VSB_data(synth_body) + o, l);
		VFP_Extend(bo->vfc, l);
		ll -= l;
		o += l;
	}
	AZ(ObjSetU64(wrk, bo->fetch_objcore, OA_LEN, o));
	VSB_destroy(&synth_body);
	HSH_Unbusy(wrk, bo->fetch_objcore);
	ObjSetState(wrk, bo->fetch_objcore, BOS_FINISHED);
	return (F_STP_DONE);
}
Пример #22
0
void
VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
    struct object *oldobj, enum vbf_fetch_mode_e mode)
{
	struct busyobj *bo;
	const char *how;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
	CHECK_OBJ_ORNULL(oldobj, OBJECT_MAGIC);


	switch(mode) {
	case VBF_PASS:		how = "pass"; break;
	case VBF_NORMAL:	how = "fetch"; break;
	case VBF_BACKGROUND:	how = "bgfetch"; break;
	default:		WRONG("Wrong fetch mode");
	}

	bo = VBO_GetBusyObj(wrk, req);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	VSLb(bo->vsl, SLT_Begin, "bereq %u %s ", VXID(req->vsl->wid), how);
	VSLb(req->vsl, SLT_Link, "bereq %u %s ", VXID(bo->vsl->wid), how);

	THR_SetBusyobj(bo);

	bo->refcount = 2;

	oc->busyobj = bo;

	CHECK_OBJ_NOTNULL(bo->vcl, VCL_CONF_MAGIC);

	if (mode == VBF_PASS)
		bo->do_pass = 1;

	bo->vary = req->vary_b;
	req->vary_b = NULL;

	if (mode != VBF_BACKGROUND)
		HSH_Ref(oc);
	bo->fetch_objcore = oc;

	AZ(bo->ims_obj);
	if (oldobj != NULL) {
		if (http_GetHdr(oldobj->http, H_Last_Modified, NULL) ||
		   http_GetHdr(oldobj->http, H_ETag, NULL)) {
			assert(oldobj->objcore->refcnt > 0);
			HSH_Ref(oldobj->objcore);
			bo->ims_obj = oldobj;
		}
	}

	AZ(bo->req);
	bo->req = req;

	bo->fetch_task.priv = bo;
	bo->fetch_task.func = vbf_fetch_thread;

	if (Pool_Task(wrk->pool, &bo->fetch_task, POOL_QUEUE_FRONT))
		vbf_fetch_thread(wrk, bo);
	if (mode == VBF_BACKGROUND) {
		VBO_waitstate(bo, BOS_REQ_DONE);
	} else {
		VBO_waitstate(bo, BOS_STREAM);
		if (bo->state == BOS_FAILED) {
			AN((oc->flags & OC_F_FAILED));
		} else {
			AZ(bo->fetch_objcore->flags & OC_F_BUSY);
		}
	}
	VSLb_ts_req(req, "Fetch", W_TIM_real(wrk));
	THR_SetBusyobj(NULL);
	VBO_DerefBusyObj(wrk, &bo);
}
Пример #23
0
static void
vbf_fetch_thread(struct worker *wrk, void *priv)
{
	struct busyobj *bo;
	enum fetch_step stp;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);

	THR_SetBusyobj(bo);
	stp = F_STP_MKBEREQ;
	assert(bo->doclose == SC_NULL);
	assert(isnan(bo->t_first));
	assert(isnan(bo->t_prev));
	VSLb_ts_busyobj(bo, "Start", W_TIM_real(wrk));

	bo->stats = &wrk->stats;

	while (stp != F_STP_DONE) {
		CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
		assert(bo->refcount >= 1);
		switch(stp) {
#define FETCH_STEP(l, U, arg)						\
		case F_STP_##U:						\
			stp = vbf_stp_##l arg;				\
			break;
#include "tbl/steps.h"
#undef FETCH_STEP
		default:
			WRONG("Illegal fetch_step");
		}
	}
	assert(WRW_IsReleased(wrk));

	bo->stats = NULL;

	if (bo->vbc != NULL) {
		if (bo->doclose != SC_NULL)
			VDI_CloseFd(&bo->vbc, &bo->acct);
		else
			VDI_RecycleFd(&bo->vbc, &bo->acct);
		AZ(bo->vbc);
	}

	http_Teardown(bo->bereq);
	http_Teardown(bo->beresp);

	if (bo->state == BOS_FINISHED) {
		AZ(bo->fetch_objcore->flags & OC_F_FAILED);
		HSH_Complete(bo->fetch_objcore);
		VSLb(bo->vsl, SLT_Length, "%zd", bo->fetch_obj->len);
		{
		/* Sanity check fetch methods accounting */
			ssize_t uu;
			struct storage *st;

			uu = 0;
			VTAILQ_FOREACH(st, &bo->fetch_obj->body->list, list)
				uu += st->len;
			if (bo->do_stream)
				/* Streaming might have started freeing stuff */
				assert(uu <= bo->fetch_obj->len);

			else
				assert(uu == bo->fetch_obj->len);
		}
	}
	AZ(bo->fetch_objcore->busyobj);

	if (bo->ims_obj != NULL)
		(void)HSH_DerefObj(&wrk->stats, &bo->ims_obj);

	VBO_DerefBusyObj(wrk, &bo);
	THR_SetBusyobj(NULL);
}
Пример #24
0
static enum fetch_step
vbf_stp_error(struct worker *wrk, struct busyobj *bo)
{
	struct storage *st;
	ssize_t l;
	double now;
	char time_str[VTIM_FORMAT_SIZE];

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	now = W_TIM_real(wrk);
	VSLb_ts_busyobj(bo, "Error", now);

	AN(bo->fetch_objcore->flags & OC_F_BUSY);

	AZ(bo->synth_body);
	bo->synth_body = VSB_new_auto();
	AN(bo->synth_body);

	// XXX: reset all beresp flags ?

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);
	http_PutResponse(bo->beresp, "HTTP/1.1", 503, "Backend fetch failed");
	VTIM_format(now, time_str);
	http_PrintfHeader(bo->beresp, "Date: %s", time_str);
	http_SetHeader(bo->beresp, "Server: Varnish");

	bo->fetch_objcore->exp.t_origin = bo->t_prev;
	bo->fetch_objcore->exp.ttl = 0;
	bo->fetch_objcore->exp.grace = 0;
	bo->fetch_objcore->exp.keep = 0;

	VCL_backend_error_method(bo->vcl, wrk, NULL, bo, bo->bereq->ws);

	AZ(VSB_finish(bo->synth_body));

	if (wrk->handling == VCL_RET_RETRY) {
		VSB_delete(bo->synth_body);
		bo->synth_body = NULL;
		if (bo->retries++ < cache_param->max_retries)
			return (F_STP_RETRY);
		bo->synth_body = NULL;
		return (F_STP_FAIL);
	}

	assert(wrk->handling == VCL_RET_DELIVER);

	VFP_Setup(bo->vfc);
	bo->vfc->bo = bo;
	bo->vfc->http = bo->beresp;
	bo->vfc->vsl = bo->vsl;

	if (vbf_beresp2obj(bo))
		return (F_STP_FAIL);

	bo->vfc->body = bo->fetch_obj->body;

	l = VSB_len(bo->synth_body);
	if (l > 0) {
		st = VFP_GetStorage(bo->vfc, l);
		if (st != NULL) {
			if (st->space < l) {
				VSLb(bo->vsl, SLT_Error,
				    "No space for %zd bytes of synth body", l);
			} else {
				memcpy(st->ptr, VSB_data(bo->synth_body), l);
				VBO_extend(bo, l);
			}
		}
	}
	VSB_delete(bo->synth_body);
	bo->synth_body = NULL;

	HSH_Unbusy(&wrk->stats, bo->fetch_objcore);
	VBO_setstate(bo, BOS_FINISHED);
	return (F_STP_DONE);
}
Пример #25
0
static enum req_fsm_nxt
cnt_deliver(struct worker *wrk, struct req *req)
{
	struct busyobj *bo;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
	CHECK_OBJ_NOTNULL(req->objcore->objhead, OBJHEAD_MAGIC);
	AN(req->vcl);

	assert(req->objcore->refcnt > 0);

	if (req->objcore->exp_flags & OC_EF_EXP)
		EXP_Touch(req->objcore, req->t_prev);

	HTTP_Setup(req->resp, req->ws, req->vsl, SLT_RespMethod);
	AZ(HTTP_Decode(req->resp,
	    ObjGetattr(req->wrk, req->objcore, OA_HEADERS, NULL)));
	http_ForceField(req->resp, HTTP_HDR_PROTO, "HTTP/1.1");

	if (req->is_hit)
		http_PrintfHeader(req->resp,
		    "X-Varnish: %u %u", VXID(req->vsl->wid),
		    ObjGetXID(wrk, req->objcore));
	else
		http_PrintfHeader(req->resp,
		    "X-Varnish: %u", VXID(req->vsl->wid));

	/* We base Age calculation upon the last timestamp taken during
	   client request processing. This gives some inaccuracy, but
	   since Age is only full second resolution that shouldn't
	   matter. (Last request timestamp could be a Start timestamp
	   taken before the object entered into cache leading to negative
	   age. Truncate to zero in that case).
	*/
	http_PrintfHeader(req->resp, "Age: %.0f",
	    fmax(0., req->t_prev - req->objcore->exp.t_origin));

	http_SetHeader(req->resp, "Via: 1.1 varnish-v4");

	if (cache_param->http_gzip_support &&
	    ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED) &&
	    !RFC2616_Req_Gzip(req->http))
		RFC2616_Weaken_Etag(req->resp);

	VCL_deliver_method(req->vcl, wrk, req, NULL, NULL);
	VSLb_ts_req(req, "Process", W_TIM_real(wrk));

	/* Stop the insanity before it turns "Hotel California" on us */
	if (req->restarts >= cache_param->max_restarts)
		wrk->handling = VCL_RET_DELIVER;

	if (wrk->handling != VCL_RET_DELIVER) {
		(void)HSH_DerefObjCore(wrk, &req->objcore);
		http_Teardown(req->resp);

		switch (wrk->handling) {
		case VCL_RET_RESTART:
			req->req_step = R_STP_RESTART;
			break;
		case VCL_RET_SYNTH:
			req->req_step = R_STP_SYNTH;
			break;
		default:
			INCOMPL();
		}

		return (REQ_FSM_MORE);
	}

	assert(wrk->handling == VCL_RET_DELIVER);

	if (!(req->objcore->flags & OC_F_PASS)
	    && req->esi_level == 0
	    && http_IsStatus(req->resp, 200)
	    && req->http->conds && RFC2616_Do_Cond(req))
		http_PutResponse(req->resp, "HTTP/1.1", 304, NULL);

	/* Grab a ref to the bo if there is one, and hand it down */
	bo = HSH_RefBusy(req->objcore);
	if (bo != NULL) {
		if (req->esi_level == 0 && bo->state == BOS_FINISHED) {
			VBO_DerefBusyObj(wrk, &bo);
		} else if (!bo->do_stream) {
			VBO_waitstate(bo, BOS_FINISHED);
			VBO_DerefBusyObj(wrk, &bo);
		}
	}

	cnt_vdp(req, bo);

	if (bo != NULL)
		VBO_DerefBusyObj(wrk, &bo);

	VSLb_ts_req(req, "Resp", W_TIM_real(wrk));

	if (http_HdrIs(req->resp, H_Connection, "close"))
		req->doclose = SC_RESP_CLOSE;

	if ((req->objcore->flags & OC_F_PASS) && bo != NULL) {
		VBO_waitstate(bo, BOS_FINISHED);
		ObjSlim(wrk, req->objcore);
	}

	(void)HSH_DerefObjCore(wrk, &req->objcore);
	http_Teardown(req->resp);
	return (REQ_FSM_DONE);
}
Пример #26
0
static enum fetch_step
vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo)
{
	void *oi;
	void *sp;
	ssize_t sl, al, l;
	uint8_t *ptr;
	enum objiter_status ois;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	AZ(vbf_beresp2obj(bo));

	if (ObjGetattr(bo->wrk, bo->stale_oc, OA_ESIDATA, NULL) != NULL)
		AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc,
		    OA_ESIDATA));

	AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_FLAGS));
	AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_GZIPBITS));

	if (bo->do_stream) {
		HSH_Unbusy(wrk, bo->fetch_objcore);
		VBO_setstate(bo, BOS_STREAM);
	}

	al = 0;
	oi = ObjIterBegin(wrk, bo->stale_oc);
	do {
		ois = ObjIter(bo->stale_oc, oi, &sp, &sl);
		if (ois == OIS_ERROR)
			(void)VFP_Error(bo->vfc, "Template object failed");
		while (sl > 0) {
			l = ObjGetLen(bo->wrk, bo->stale_oc) - al;
			assert(l > 0);
			if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK)
				break;
			if (sl < l)
				l = sl;
			memcpy(ptr, sp, l);
			VBO_extend(bo, l);
			al += l;
			sp = (char *)sp + l;
			sl -= l;
		}
	} while (!bo->vfc->failed && (ois == OIS_DATA || ois == OIS_STREAM));
	ObjIterEnd(bo->stale_oc, &oi);
	if (bo->stale_oc->flags & OC_F_FAILED)
		(void)VFP_Error(bo->vfc, "Template object failed");
	if (bo->vfc->failed) {
		VDI_Finish(bo->wrk, bo);
		return (F_STP_FAIL);
	}

	if (!bo->do_stream)
		HSH_Unbusy(wrk, bo->fetch_objcore);

	assert(ObjGetLen(bo->wrk, bo->fetch_objcore) == al);
	EXP_Rearm(bo->stale_oc, bo->stale_oc->exp.t_origin, 0, 0, 0);

	/* Recycle the backend connection before setting BOS_FINISHED to
	   give predictable backend reuse behavior for varnishtest */
	VDI_Finish(bo->wrk, bo);

	VBO_setstate(bo, BOS_FINISHED);
	VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk));
	return (F_STP_DONE);
}
Пример #27
0
vbf_fetch_thread(struct worker *wrk, void *priv)
{
	struct busyobj *bo;
	enum fetch_step stp;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);

	THR_SetBusyobj(bo);
	stp = F_STP_MKBEREQ;
	assert(isnan(bo->t_first));
	assert(isnan(bo->t_prev));
	VSLb_ts_busyobj(bo, "Start", W_TIM_real(wrk));

	bo->wrk = wrk;
	wrk->vsl = bo->vsl;

#if 0
	if (bo->stale_oc != NULL) {
		CHECK_OBJ_NOTNULL(bo->stale_oc, OBJCORE_MAGIC);
		/* We don't want the oc/stevedore ops in fetching thread */
		if (!ObjCheckFlag(wrk, bo->stale_oc, OF_IMSCAND))
			(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0);
	}
#endif

	while (stp != F_STP_DONE) {
		CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
		assert(bo->fetch_objcore->boc->refcount >= 1);
		switch (stp) {
#define FETCH_STEP(l, U, arg)						\
		case F_STP_##U:						\
			stp = vbf_stp_##l arg;				\
			break;
#include "tbl/steps.h"
		default:
			WRONG("Illegal fetch_step");
		}
	}

	assert(bo->director_state == DIR_S_NULL);

	http_Teardown(bo->bereq);
	http_Teardown(bo->beresp);

	if (bo->fetch_objcore->boc->state == BOS_FINISHED) {
		AZ(bo->fetch_objcore->flags & OC_F_FAILED);
		VSLb(bo->vsl, SLT_Length, "%ju",
		    (uintmax_t)ObjGetLen(bo->wrk, bo->fetch_objcore));
	}
	// AZ(bo->fetch_objcore->boc);	// XXX

	if (bo->stale_oc != NULL)
		(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0);

	wrk->vsl = NULL;
	HSH_DerefBoc(wrk, bo->fetch_objcore);
	SES_Rel(bo->sp);
	VBO_ReleaseBusyObj(wrk, &bo);
	THR_SetBusyobj(NULL);
}
Пример #28
0
static enum fetch_step
vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo)
{
	int i;
	double now;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	AZ(bo->storage_hint);

	if (bo->do_pass)
		AN(bo->req);
	else
		AZ(bo->req);

	if (bo->retries > 0)
		http_Unset(bo->bereq, "\012X-Varnish:");

	http_PrintfHeader(bo->bereq, "X-Varnish: %u", VXID(bo->vsl->wid));

	VCL_backend_fetch_method(bo->vcl, wrk, NULL, bo, NULL);

	bo->uncacheable = bo->do_pass;
	if (wrk->handling == VCL_RET_ABANDON)
		return (F_STP_FAIL);

	assert (wrk->handling == VCL_RET_FETCH);

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);

	assert(bo->state <= BOS_REQ_DONE);

	AZ(bo->htc);
	i = VDI_GetHdr(wrk, bo);

	now = W_TIM_real(wrk);
	VSLb_ts_busyobj(bo, "Beresp", now);

	if (i) {
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	http_VSL_log(bo->beresp);

	if (!http_GetHdr(bo->beresp, H_Date, NULL)) {
		/*
		 * RFC 2616 14.18 Date: The Date general-header field
		 * represents the date and time at which the message was
		 * originated, having the same semantics as orig-date in
		 * RFC 822. ... A received message that does not have a
		 * Date header field MUST be assigned one by the recipient
		 * if the message will be cached by that recipient or
		 * gatewayed via a protocol which requires a Date.
		 *
		 * If we didn't get a Date header, we assign one here.
		 */
		http_TimeHeader(bo->beresp, "Date: ", now);
	}

	/*
	 * These two headers can be spread over multiple actual headers
	 * and we rely on their content outside of VCL, so collect them
	 * into one line here.
	 */
	http_CollectHdr(bo->beresp, H_Cache_Control);
	http_CollectHdr(bo->beresp, H_Vary);

	/*
	 * Figure out how the fetch is supposed to happen, before the
	 * headers are adultered by VCL
	 */
	if (!strcasecmp(http_GetMethod(bo->bereq), "head")) {
		/*
		 * A HEAD request can never have a body in the reply,
		 * no matter what the headers might say.
		 * [RFC2516 4.3 p33]
		 */
		wrk->stats->fetch_head++;
		bo->htc->body_status = BS_NONE;
	} else if (http_GetStatus(bo->beresp) <= 199) {
		/*
		 * 1xx responses never have a body.
		 * [RFC2616 4.3 p33]
		 * ... but we should never see them.
		 */
		wrk->stats->fetch_1xx++;
		bo->htc->body_status = BS_ERROR;
	} else if (http_IsStatus(bo->beresp, 204)) {
		/*
		 * 204 is "No Content", obviously don't expect a body.
		 * [RFC7230 3.3.1 p28 and 3.3.2 p30]
		 */
		wrk->stats->fetch_204++;
		if (http_GetHdr(bo->beresp, H_Content_Length, NULL) ||
		    http_GetHdr(bo->beresp, H_Transfer_Encoding, NULL))
			bo->htc->body_status = BS_ERROR;
		else
			bo->htc->body_status = BS_NONE;
	} else if (http_IsStatus(bo->beresp, 304)) {
		/*
		 * 304 is "Not Modified" it has no body.
		 * [RFC2616 10.3.5 p63]
		 */
		wrk->stats->fetch_304++;
		bo->htc->body_status = BS_NONE;
	} else if (bo->htc->body_status == BS_CHUNKED) {
		wrk->stats->fetch_chunked++;
	} else if (bo->htc->body_status == BS_LENGTH) {
		assert(bo->htc->content_length > 0);
		wrk->stats->fetch_length++;
	} else if (bo->htc->body_status == BS_EOF) {
		wrk->stats->fetch_eof++;
	} else if (bo->htc->body_status == BS_ERROR) {
		wrk->stats->fetch_bad++;
	} else if (bo->htc->body_status == BS_NONE) {
		wrk->stats->fetch_none++;
	} else {
		WRONG("wrong bodystatus");
	}

	if (bo->htc->body_status == BS_ERROR) {
		bo->htc->doclose = SC_RX_BODY;
		VDI_Finish(bo->wrk, bo);
		VSLb(bo->vsl, SLT_Error, "Body cannot be fetched");
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	/*
	 * What does RFC2616 think about TTL ?
	 */
	EXP_Clr(&bo->fetch_objcore->exp);
	RFC2616_Ttl(bo, now);

	/* private objects have negative TTL */
	if (bo->fetch_objcore->flags & OC_F_PRIVATE)
		bo->fetch_objcore->exp.ttl = -1.;

	AZ(bo->do_esi);
	AZ(bo->was_304);

	if (http_IsStatus(bo->beresp, 304)) {
		if (bo->stale_oc != NULL &&
		    ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND)) {
			if (ObjCheckFlag(bo->wrk, bo->stale_oc, OF_CHGGZIP)) {
				/*
				 * If we changed the gzip status of the object
				 * the stored Content_Encoding controls we
				 * must weaken any new ETag we get.
				 */
				http_Unset(bo->beresp, H_Content_Encoding);
				RFC2616_Weaken_Etag(bo->beresp);
			}
			http_Unset(bo->beresp, H_Content_Length);
			HTTP_Merge(bo->wrk, bo->stale_oc, bo->beresp);
			assert(http_IsStatus(bo->beresp, 200));
			bo->was_304 = 1;
		} else if (!bo->do_pass) {
			/*
			 * Backend sent unallowed 304
			 */
			VSLb(bo->vsl, SLT_Error,
			    "304 response but not conditional fetch");
			bo->htc->doclose = SC_RX_BAD;
			VDI_Finish(bo->wrk, bo);
			return (F_STP_FAIL);
		}
	}

	bo->vfc->bo = bo;
	bo->vfc->oc = bo->fetch_objcore;
	bo->vfc->wrk = bo->wrk;
	bo->vfc->http = bo->beresp;
	bo->vfc->esi_req = bo->bereq;

	VCL_backend_response_method(bo->vcl, wrk, NULL, bo, NULL);

	if (wrk->handling == VCL_RET_ABANDON) {
		bo->htc->doclose = SC_RESP_CLOSE;
		VDI_Finish(bo->wrk, bo);
		return (F_STP_FAIL);
	}

	if (wrk->handling == VCL_RET_RETRY) {
		if (bo->htc->body_status != BS_NONE)
			bo->htc->doclose = SC_RESP_CLOSE;
		if (bo->director_state != DIR_S_NULL)
			VDI_Finish(bo->wrk, bo);

		if (bo->retries++ < cache_param->max_retries)
			return (F_STP_RETRY);

		VSLb(bo->vsl, SLT_VCL_Error,
		    "Too many retries, delivering 503");
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	assert(bo->state == BOS_REQ_DONE);

	if (bo->do_esi)
		bo->do_stream = 0;
	if (bo->do_pass || bo->uncacheable)
		bo->fetch_objcore->flags |= OC_F_PASS;

	assert(wrk->handling == VCL_RET_DELIVER);

	return (bo->was_304 ? F_STP_CONDFETCH : F_STP_FETCH);
}
Пример #29
0
static void
ved_include(struct req *preq, const char *src, const char *host,
    struct ecx *ecx)
{
	struct worker *wrk;
	struct req *req;
	enum req_fsm_nxt s;
	struct transport xp;

	CHECK_OBJ_NOTNULL(preq, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC);
	wrk = preq->wrk;

	if (preq->esi_level >= cache_param->max_esi_depth)
		return;

	req = Req_New(wrk, preq->sp);
	req->req_body_status = REQ_BODY_NONE;
	AZ(req->vsl->wid);
	req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER);
	VSLb(req->vsl, SLT_Begin, "req %u esi", VXID(preq->vsl->wid));
	VSLb(preq->vsl, SLT_Link, "req %u esi", VXID(req->vsl->wid));
	req->esi_level = preq->esi_level + 1;

	if (preq->esi_level == 0)
		assert(preq->top == preq);
	else
		CHECK_OBJ_NOTNULL(preq->top, REQ_MAGIC);

	req->top = preq->top;

	HTTP_Copy(req->http0, preq->http0);

	req->http0->ws = req->ws;
	req->http0->vsl = req->vsl;
	req->http0->logtag = SLT_ReqMethod;
	req->http0->conds = 0;

	http_SetH(req->http0, HTTP_HDR_URL, src);
	if (host != NULL && *host != '\0')  {
		http_Unset(req->http0, H_Host);
		http_SetHeader(req->http0, host);
	}

	http_ForceField(req->http0, HTTP_HDR_METHOD, "GET");
	http_ForceField(req->http0, HTTP_HDR_PROTO, "HTTP/1.1");

	/* Don't allow conditionalss, we can't use a 304 */
	http_Unset(req->http0, H_If_Modified_Since);
	http_Unset(req->http0, H_If_None_Match);

	/* Don't allow Range */
	http_Unset(req->http0, H_Range);

	/* Set Accept-Encoding according to what we want */
	http_Unset(req->http0, H_Accept_Encoding);
	if (ecx->isgzip)
		http_ForceHeader(req->http0, H_Accept_Encoding, "gzip");

	/* Client content already taken care of */
	http_Unset(req->http0, H_Content_Length);

	/* Reset request to status before we started messing with it */
	HTTP_Copy(req->http, req->http0);

	req->vcl = preq->vcl;
	preq->vcl = NULL;
	req->wrk = preq->wrk;

	/*
	 * XXX: We should decide if we should cache the director
	 * XXX: or not (for session/backend coupling).  Until then
	 * XXX: make sure we don't trip up the check in vcl_recv.
	 */
	req->req_step = R_STP_RECV;
	req->t_req = preq->t_req;
	assert(isnan(req->t_first));
	assert(isnan(req->t_prev));

	INIT_OBJ(&xp, TRANSPORT_MAGIC);
	xp.deliver = VED_Deliver;
	req->transport = &xp;
	req->transport_priv = ecx;

	THR_SetRequest(req);

	VSLb_ts_req(req, "Start", W_TIM_real(wrk));

	req->ws_req = WS_Snapshot(req->ws);

	while (1) {
		req->wrk = wrk;
		s = CNT_Request(wrk, req);
		if (s == REQ_FSM_DONE)
			break;
		DSL(DBG_WAITINGLIST, req->vsl->wid,
		    "loop waiting for ESI (%d)", (int)s);
		assert(s == REQ_FSM_DISEMBARK);
		AZ(req->wrk);
		(void)usleep(10000);
	}

	VRTPRIV_dynamic_kill(req->sp->privs, (uintptr_t)req);
	CNT_AcctLogCharge(wrk->stats, req);
	VSL_End(req->vsl);

	preq->vcl = req->vcl;
	req->vcl = NULL;

	req->wrk = NULL;

	THR_SetRequest(preq);
	Req_Release(req);
}
Пример #30
0
static enum fetch_step
vbf_stp_fetch(struct worker *wrk, struct busyobj *bo)
{
	const char *p;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);

	assert(wrk->handling == VCL_RET_DELIVER);

	/*
	 * The VCL variables beresp.do_g[un]zip tells us how we want the
	 * object processed before it is stored.
	 *
	 * The backend Content-Encoding header tells us what we are going
	 * to receive, which we classify in the following three classes:
	 *
	 *	"Content-Encoding: gzip"	--> object is gzip'ed.
	 *	no Content-Encoding		--> object is not gzip'ed.
	 *	anything else			--> do nothing wrt gzip
	 *
	 */

	/* We do nothing unless the param is set */
	if (!cache_param->http_gzip_support)
		bo->do_gzip = bo->do_gunzip = 0;

	if (bo->htc->content_length == 0)
		http_Unset(bo->beresp, H_Content_Encoding);

	if (bo->htc->body_status != BS_NONE) {
		bo->is_gzip =
		    http_HdrIs(bo->beresp, H_Content_Encoding, "gzip");
		bo->is_gunzip =
		    !http_GetHdr(bo->beresp, H_Content_Encoding, NULL);
		assert(bo->is_gzip == 0 || bo->is_gunzip == 0);
	}

	/* We won't gunzip unless it is non-empty and gzip'ed */
	if (bo->htc->body_status == BS_NONE ||
	    bo->htc->content_length == 0 ||
	    (bo->do_gunzip && !bo->is_gzip))
		bo->do_gunzip = 0;

	/* We wont gzip unless it is non-empty and ungziped */
	if (bo->htc->body_status == BS_NONE ||
	    bo->htc->content_length == 0 ||
	    (bo->do_gzip && !bo->is_gunzip))
		bo->do_gzip = 0;

	/* But we can't do both at the same time */
	assert(bo->do_gzip == 0 || bo->do_gunzip == 0);

	if (bo->do_gunzip || (bo->is_gzip && bo->do_esi))
		(void)VFP_Push(bo->vfc, &vfp_gunzip, 1);

	if (bo->htc->content_length != 0) {
		if (bo->do_esi && bo->do_gzip) {
			(void)VFP_Push(bo->vfc, &vfp_esi_gzip, 1);
		} else if (bo->do_esi && bo->is_gzip && !bo->do_gunzip) {
			(void)VFP_Push(bo->vfc, &vfp_esi_gzip, 1);
		} else if (bo->do_esi) {
			(void)VFP_Push(bo->vfc, &vfp_esi, 1);
		} else if (bo->do_gzip) {
			(void)VFP_Push(bo->vfc, &vfp_gzip, 1);
		} else if (bo->is_gzip && !bo->do_gunzip) {
			(void)VFP_Push(bo->vfc, &vfp_testgunzip, 1);
		}
	}

	if (bo->fetch_objcore->flags & OC_F_PRIVATE)
		AN(bo->uncacheable);

	/* No reason to try streaming a non-existing body */
	if (bo->htc->body_status == BS_NONE)
		bo->do_stream = 0;

	if (VFP_Open(bo->vfc)) {
		(void)VFP_Error(bo->vfc, "Fetch pipeline failed to open");
		bo->htc->doclose = SC_RX_BODY;
		VDI_Finish(bo->wrk, bo);
		return (F_STP_ERROR);
	}

	if (vbf_beresp2obj(bo)) {
		(void)VFP_Error(bo->vfc, "Could not get storage");
		bo->htc->doclose = SC_RX_BODY;
		VDI_Finish(bo->wrk, bo);
		return (F_STP_ERROR);
	}

	if (bo->do_esi)
		ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_ESIPROC, 1);

	if (bo->do_gzip || (bo->is_gzip && !bo->do_gunzip))
		ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_GZIPED, 1);

	if (bo->do_gzip || bo->do_gunzip)
		ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_CHGGZIP, 1);

	if (http_IsStatus(bo->beresp, 200) && (
	    http_GetHdr(bo->beresp, H_Last_Modified, &p) ||
	    http_GetHdr(bo->beresp, H_ETag, &p)))
		ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_IMSCAND, 1);

	if (bo->htc->body_status != BS_NONE)
		AZ(VDI_GetBody(bo->wrk, bo));

	assert(bo->refcount >= 1);

	assert(bo->state == BOS_REQ_DONE);

	if (bo->do_stream) {
		HSH_Unbusy(wrk, bo->fetch_objcore);
		VBO_setstate(bo, BOS_STREAM);
	}

	VSLb(bo->vsl, SLT_Fetch_Body, "%u %s %s",
	    bo->htc->body_status, body_status_2str(bo->htc->body_status),
	    bo->do_stream ? "stream" : "-");

	if (bo->htc->body_status != BS_NONE) {
		assert(bo->htc->body_status != BS_ERROR);
		vbf_fetch_body_helper(bo);
	}

	if (bo->vfc->failed) {
		VDI_Finish(bo->wrk, bo);
		if (!bo->do_stream) {
			assert(bo->state < BOS_STREAM);
			// XXX: doclose = ?
			return (F_STP_ERROR);
		} else {
			return (F_STP_FAIL);
		}
	}

	if (bo->do_stream)
		assert(bo->state == BOS_STREAM);
	else {
		assert(bo->state == BOS_REQ_DONE);
		HSH_Unbusy(wrk, bo->fetch_objcore);
	}

	/* Recycle the backend connection before setting BOS_FINISHED to
	   give predictable backend reuse behavior for varnishtest */
	VDI_Finish(bo->wrk, bo);

	VBO_setstate(bo, BOS_FINISHED);
	VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk));
	if (bo->stale_oc != NULL)
		EXP_Rearm(bo->stale_oc, bo->stale_oc->exp.t_origin, 0, 0, 0);
	return (F_STP_DONE);
}