Esempio n. 1
0
const char *
vmod_filtersep(struct sess *sp)
{
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	return NULL;
}
Esempio n. 2
0
int
VRY_Create(struct busyobj *bo, struct vsb **psb)
{
	const char *v, *p, *q, *h, *e;
	struct vsb *sb, *sbh;
	unsigned l;
	int error = 0;

	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->bereq, HTTP_MAGIC);
	CHECK_OBJ_NOTNULL(bo->beresp, HTTP_MAGIC);
	AN(psb);
	AZ(*psb);

	/* No Vary: header, no worries */
	if (!http_GetHdr(bo->beresp, H_Vary, &v))
		return (0);

	/* For vary matching string */
	sb = VSB_new_auto();
	AN(sb);

	/* For header matching strings */
	sbh = VSB_new_auto();
	AN(sbh);

	for (p = v; *p; p++) {

		/* Find next header-name */
		if (vct_issp(*p))
			continue;
		for (q = p; *q && !vct_issp(*q) && *q != ','; q++)
			continue;

		if (q - p > INT8_MAX) {
			VSLb(bo->vsl, SLT_Error,
			    "Vary header name length exceeded");
			error = 1;
			break;
		}

		/* Build a header-matching string out of it */
		VSB_clear(sbh);
		VSB_printf(sbh, "%c%.*s:%c",
		    (char)(1 + (q - p)), (int)(q - p), p, 0);
		AZ(VSB_finish(sbh));

		if (http_GetHdr(bo->bereq, VSB_data(sbh), &h)) {
			AZ(vct_issp(*h));
			/* Trim trailing space */
			e = strchr(h, '\0');
			while (e > h && vct_issp(e[-1]))
				e--;
			/* Encode two byte length and contents */
			l = e - h;
			if (l > 0xffff - 1) {
				VSLb(bo->vsl, SLT_Error,
				    "Vary header maximum length exceeded");
				error = 1;
				break;
			}
		} else {
			e = h;
			l = 0xffff;
		}
		VSB_printf(sb, "%c%c", (int)(l >> 8), (int)(l & 0xff));
		/* Append to vary matching string */
		VSB_bcat(sb, VSB_data(sbh), VSB_len(sbh));
		if (e != h)
			VSB_bcat(sb, h, e - h);

		while (vct_issp(*q))
			q++;
		if (*q == '\0')
			break;
		if (*q != ',') {
			VSLb(bo->vsl, SLT_Error, "Malformed Vary header");
			error = 1;
			break;
		}
		p = q;
	}

	if (error) {
		VSB_delete(sbh);
		VSB_delete(sb);
		return (-1);
	}

	/* Terminate vary matching string */
	VSB_printf(sb, "%c%c%c", 0xff, 0xff, 0);

	VSB_delete(sbh);
	AZ(VSB_finish(sb));
	*psb = sb;
	return (VSB_len(sb));
}
Esempio n. 3
0
static int
cnt_lookup(struct sess *sp)
{
    struct objcore *oc;
    struct object *o;
    struct objhead *oh;
    struct worker *wrk;

    CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
    wrk = sp->wrk;
    CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);

    CHECK_OBJ_NOTNULL(sp->vcl, VCL_CONF_MAGIC);
    AZ(wrk->busyobj);

    if (sp->hash_objhead == NULL) {
        /* Not a waiting list return */
        AZ(sp->vary_b);
        AZ(sp->vary_l);
        AZ(sp->vary_e);
        (void)WS_Reserve(sp->ws, 0);
    } else {
        AN(sp->ws->r);
    }
    sp->vary_b = (void*)sp->ws->f;
    sp->vary_e = (void*)sp->ws->r;
    sp->vary_b[2] = '\0';

    oc = HSH_Lookup(sp, &oh);

    if (oc == NULL) {
        /*
         * We lost the session to a busy object, disembark the
         * worker thread.   The hash code to restart the session,
         * still in STP_LOOKUP, later when the busy object isn't.
         * NB:  Do not access sp any more !
         */
        return (1);
    }


    CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
    CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);

    /* If we inserted a new object it's a miss */
    if (oc->flags & OC_F_BUSY) {
        wrk->stats.cache_miss++;

        if (sp->vary_l != NULL) {
            assert(oc->busyobj->vary == sp->vary_b);
            VRY_Validate(oc->busyobj->vary);
            WS_ReleaseP(sp->ws, (void*)sp->vary_l);
        } else {
            AZ(oc->busyobj->vary);
            WS_Release(sp->ws, 0);
        }
        sp->vary_b = NULL;
        sp->vary_l = NULL;
        sp->vary_e = NULL;

        wrk->objcore = oc;
        CHECK_OBJ_NOTNULL(wrk->busyobj, BUSYOBJ_MAGIC);
        sp->step = STP_MISS;
        return (0);
    }

    o = oc_getobj(wrk, oc);
    CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
    wrk->obj = o;

    WS_Release(sp->ws, 0);
    sp->vary_b = NULL;
    sp->vary_l = NULL;
    sp->vary_e = NULL;

    if (oc->flags & OC_F_PASS) {
        wrk->stats.cache_hitpass++;
        WSP(sp, SLT_HitPass, "%u", wrk->obj->xid);
        (void)HSH_Deref(wrk, NULL, &wrk->obj);
        wrk->objcore = NULL;
        sp->step = STP_PASS;
        return (0);
    }

    wrk->stats.cache_hit++;
    WSP(sp, SLT_Hit, "%u", wrk->obj->xid);
    sp->step = STP_HIT;
    return (0);
}
Esempio n. 4
0
vbe_dir_gethdrs(const struct director *d, struct worker *wrk,
    struct busyobj *bo)
{
	int i, extrachance = 1;
	struct backend *bp;
	struct vbc *vbc;

	CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);

	/*
	 * Now that we know our backend, we can set a default Host:
	 * header if one is necessary.  This cannot be done in the VCL
	 * because the backend may be chosen by a director.
	 */
	if (!http_GetHdr(bo->bereq, H_Host, NULL) && bp->hosthdr != NULL)
		http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr);

	do {
		vbc = vbe_dir_getfd(wrk, bp, bo);
		if (vbc == NULL) {
			VSLb(bo->vsl, SLT_FetchError, "no backend connection");
			return (-1);
		}
		AN(bo->htc);
		if (vbc->state != VBC_STATE_STOLEN)
			extrachance = 0;

		i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes, 0);

		if (vbc->state != VBC_STATE_USED)
			VBT_Wait(wrk, vbc);

		assert(vbc->state == VBC_STATE_USED);

		if (i == 0)
			i = V1F_FetchRespHdr(bo);
		if (i == 0) {
			AN(bo->htc->priv);
			return (0);
		}

		/*
		 * If we recycled a backend connection, there is a finite chance
		 * that the backend closed it before we got the bereq to it.
		 * In that case do a single automatic retry if req.body allows.
		 */
		vbe_dir_finish(d, wrk, bo);
		AZ(bo->htc);
		if (i < 0)
			break;
		if (bo->req != NULL &&
		    bo->req->req_body_status != REQ_BODY_NONE &&
		    bo->req->req_body_status != REQ_BODY_CACHED)
			break;
		VSC_C_main->backend_retry++;
	} while (extrachance);
	return (-1);
}
static int
http1_dissect(struct worker *wrk, struct req *req)
{
	const char *r_100 = "HTTP/1.1 100 Continue\r\n\r\n";
	const char *r_400 = "HTTP/1.1 400 Bad Request\r\n\r\n";
	const char *r_417 = "HTTP/1.1 417 Expectation Failed\r\n\r\n";
	const char *p;
	ssize_t r;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	/* Allocate a new vxid now that we know we'll need it. */
	AZ(req->vsl->wid);
	req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER);

	VSLb(req->vsl, SLT_Begin, "req %u rxreq", VXID(req->sp->vxid));
	VSL(SLT_Link, req->sp->vxid, "req %u rxreq", VXID(req->vsl->wid));
	AZ(isnan(req->t_first)); /* First byte timestamp set by http1_wait */
	AZ(isnan(req->t_req));	 /* Complete req rcvd set by http1_wait */
	req->t_prev = req->t_first;
	VSLb_ts_req(req, "Start", req->t_first);
	VSLb_ts_req(req, "Req", req->t_req);

	/* Borrow VCL reference from worker thread */
	VCL_Refresh(&wrk->vcl);
	req->vcl = wrk->vcl;
	wrk->vcl = NULL;

	HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod);
	req->err_code = HTTP1_DissectRequest(req->htc, req->http);

	/* If we could not even parse the request, just close */
	if (req->err_code != 0) {
		VSLb(req->vsl, SLT_HttpGarbage, "%.*s",
		    (int)(req->htc->rxbuf_e - req->htc->rxbuf_b),
		    req->htc->rxbuf_b);
		wrk->stats->client_req_400++;
		r = write(req->sp->fd, r_400, strlen(r_400));
		if (r > 0)
			req->acct.resp_hdrbytes += r;
		req->doclose = SC_RX_JUNK;
		return (-1);
	}

	assert (req->req_body_status == REQ_BODY_INIT);

	if (req->htc->body_status == BS_CHUNKED) {
		req->req_body_status = REQ_BODY_WITHOUT_LEN;
	} else if (req->htc->body_status == BS_LENGTH) {
		req->req_body_status = REQ_BODY_WITH_LEN;
	} else if (req->htc->body_status == BS_NONE) {
		req->req_body_status = REQ_BODY_NONE;
	} else if (req->htc->body_status == BS_EOF) {
		req->req_body_status = REQ_BODY_WITHOUT_LEN;
	} else {
		WRONG("Unknown req.body_length situation");
	}

	if (http_GetHdr(req->http, H_Expect, &p)) {
		if (strcasecmp(p, "100-continue")) {
			wrk->stats->client_req_417++;
			req->err_code = 417;
			r = write(req->sp->fd, r_417, strlen(r_417));
			if (r > 0)
				req->acct.resp_hdrbytes += r;
			req->doclose = SC_RX_JUNK;
			return (-1);
		}
		r = write(req->sp->fd, r_100, strlen(r_100));
		if (r > 0)
			req->acct.resp_hdrbytes += r;
		if (r != strlen(r_100)) {
			req->doclose = SC_REM_CLOSE;
			return (-1);
		}
		http_Unset(req->http, H_Expect);
	}

	wrk->stats->client_req++;
	wrk->stats->s_req++;

	AZ(req->err_code);
	req->ws_req = WS_Snapshot(req->ws);

	req->doclose = http_DoConnection(req->http);
	if (req->doclose == SC_RX_BAD) {
		r = write(req->sp->fd, r_400, strlen(r_400));
		if (r > 0)
			req->acct.resp_hdrbytes += r;
		return (-1);
	}

	assert(req->req_body_status != REQ_BODY_INIT);

	HTTP_Copy(req->http0, req->http);	// For ESI & restart

	return (0);
}
Esempio n. 6
0
void
vdir_rdlock(struct vdir *vd)
{
	CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC);
	AZ(pthread_rwlock_rdlock(&vd->mtx));
}
Esempio n. 7
0
void
VJ_master(enum jail_master_e jme)
{
	CHECK_OBJ_NOTNULL(vjt, JAIL_TECH_MAGIC);
	vjt->master(jme);
}
Esempio n. 8
0
static enum fetch_step
vbf_stp_error(struct worker *wrk, struct busyobj *bo)
{
	ssize_t l, ll, o;
	double now;
	uint8_t *ptr;
	char time_str[VTIM_FORMAT_SIZE];

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	assert(bo->director_state == DIR_S_NULL);

	now = W_TIM_real(wrk);
	VSLb_ts_busyobj(bo, "Error", now);

	AN(bo->fetch_objcore->flags & OC_F_BUSY);

	AZ(bo->synth_body);
	bo->synth_body = VSB_new_auto();
	AN(bo->synth_body);

	// XXX: reset all beresp flags ?

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);
	http_PutResponse(bo->beresp, "HTTP/1.1", 503, "Backend fetch failed");
	VTIM_format(now, time_str);
	http_PrintfHeader(bo->beresp, "Date: %s", time_str);
	http_SetHeader(bo->beresp, "Server: Varnish");

	bo->fetch_objcore->exp.t_origin = bo->t_prev;
	bo->fetch_objcore->exp.ttl = 0;
	bo->fetch_objcore->exp.grace = 0;
	bo->fetch_objcore->exp.keep = 0;

	VCL_backend_error_method(bo->vcl, wrk, NULL, bo, bo->bereq->ws);

	AZ(VSB_finish(bo->synth_body));

	if (wrk->handling == VCL_RET_RETRY) {
		VSB_delete(bo->synth_body);
		bo->synth_body = NULL;
		if (bo->retries++ < cache_param->max_retries)
			return (F_STP_RETRY);
		return (F_STP_FAIL);
	}

	assert(wrk->handling == VCL_RET_DELIVER);

	VFP_Setup(bo->vfc);
	bo->vfc->bo = bo;
	bo->vfc->wrk = bo->wrk;
	bo->vfc->oc = bo->fetch_objcore;
	bo->vfc->http = bo->beresp;
	bo->vfc->esi_req = bo->bereq;

	if (vbf_beresp2obj(bo))
		return (F_STP_FAIL);

	ll = VSB_len(bo->synth_body);
	o = 0;
	while (ll > 0) {
		l = ll;
		if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK)
			break;
		memcpy(ptr, VSB_data(bo->synth_body) + o, l);
		VBO_extend(bo, l);
		ll -= l;
		o += l;
	}
	VSB_delete(bo->synth_body);
	bo->synth_body = NULL;

	HSH_Unbusy(wrk, bo->fetch_objcore);
	VBO_setstate(bo, BOS_FINISHED);
	return (F_STP_DONE);
}
Esempio n. 9
0
void
VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
    struct objcore *oldoc, enum vbf_fetch_mode_e mode)
{
	struct busyobj *bo;
	const char *how;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
	CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC);


	switch(mode) {
	case VBF_PASS:		how = "pass"; break;
	case VBF_NORMAL:	how = "fetch"; break;
	case VBF_BACKGROUND:	how = "bgfetch"; break;
	default:		WRONG("Wrong fetch mode");
	}

	bo = VBO_GetBusyObj(wrk, req);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	VSLb(bo->vsl, SLT_Begin, "bereq %u %s", VXID(req->vsl->wid), how);
	VSLb(req->vsl, SLT_Link, "bereq %u %s", VXID(bo->vsl->wid), how);

	THR_SetBusyobj(bo);

	bo->refcount = 2;

	oc->busyobj = bo;

	CHECK_OBJ_NOTNULL(bo->vcl, VCL_CONF_MAGIC);

	if (mode == VBF_PASS)
		bo->do_pass = 1;

	bo->vary = req->vary_b;
	req->vary_b = NULL;

	if (mode != VBF_BACKGROUND)
		HSH_Ref(oc);
	bo->fetch_objcore = oc;

	AZ(bo->ims_oc);
	if (oldoc != NULL && ObjCheckFlag(req->wrk, oldoc, OF_IMSCAND)) {
		assert(oldoc->refcnt > 0);
		HSH_Ref(oldoc);
		bo->ims_oc = oldoc;
	}

	AZ(bo->req);
	bo->req = req;

	bo->fetch_task.priv = bo;
	bo->fetch_task.func = vbf_fetch_thread;

	if (Pool_Task(wrk->pool, &bo->fetch_task, POOL_QUEUE_FRONT))
		vbf_fetch_thread(wrk, bo);
	if (mode == VBF_BACKGROUND) {
		VBO_waitstate(bo, BOS_REQ_DONE);
	} else {
		VBO_waitstate(bo, BOS_STREAM);
		if (bo->state == BOS_FAILED) {
			AN((oc->flags & OC_F_FAILED));
		} else {
			AZ(bo->fetch_objcore->flags & OC_F_BUSY);
		}
	}
	VSLb_ts_req(req, "Fetch", W_TIM_real(wrk));
	THR_SetBusyobj(NULL);
	VBO_DerefBusyObj(wrk, &bo);
}
Esempio n. 10
0
static enum fetch_step
vbf_stp_fetch(struct worker *wrk, struct busyobj *bo)
{
	char *p;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC);

	assert(wrk->handling == VCL_RET_DELIVER);

	/*
	 * The VCL variables beresp.do_g[un]zip tells us how we want the
	 * object processed before it is stored.
	 *
	 * The backend Content-Encoding header tells us what we are going
	 * to receive, which we classify in the following three classes:
	 *
	 *	"Content-Encoding: gzip"	--> object is gzip'ed.
	 *	no Content-Encoding		--> object is not gzip'ed.
	 *	anything else			--> do nothing wrt gzip
	 *
	 */

	/* We do nothing unless the param is set */
	if (!cache_param->http_gzip_support)
		bo->do_gzip = bo->do_gunzip = 0;

	bo->is_gzip = http_HdrIs(bo->beresp, H_Content_Encoding, "gzip");

	bo->is_gunzip = !http_GetHdr(bo->beresp, H_Content_Encoding, NULL);

	/* It can't be both */
	assert(bo->is_gzip == 0 || bo->is_gunzip == 0);

	/* We won't gunzip unless it is gzip'ed */
	if (bo->do_gunzip && !bo->is_gzip)
		bo->do_gunzip = 0;

	/* We wont gzip unless it is ungziped */
	if (bo->do_gzip && !bo->is_gunzip)
		bo->do_gzip = 0;

	/* But we can't do both at the same time */
	assert(bo->do_gzip == 0 || bo->do_gunzip == 0);

	if (bo->do_gunzip || (bo->is_gzip && bo->do_esi))
		(void)VFP_Push(bo->vfc, &vfp_gunzip, 1);

	if (bo->do_esi && bo->do_gzip) {
		(void)VFP_Push(bo->vfc, &vfp_esi_gzip, 1);
	} else if (bo->do_esi && bo->is_gzip && !bo->do_gunzip) {
		(void)VFP_Push(bo->vfc, &vfp_esi_gzip, 1);
	} else if (bo->do_esi) {
		(void)VFP_Push(bo->vfc, &vfp_esi, 1);
	} else if (bo->do_gzip) {
		(void)VFP_Push(bo->vfc, &vfp_gzip, 1);
	} else if (bo->is_gzip && !bo->do_gunzip) {
		(void)VFP_Push(bo->vfc, &vfp_testgunzip, 1);
	}

	if (bo->fetch_objcore->flags & OC_F_PRIVATE)
		AN(bo->uncacheable);

	/* No reason to try streaming a non-existing body */
	if (bo->htc->body_status == BS_NONE)
		bo->do_stream = 0;

	if (VFP_Open(bo->vfc)) {
		(void)VFP_Error(bo->vfc, "Fetch Pipeline failed to open");
		bo->doclose = SC_RX_BODY;
		VDI_Finish(bo->director_resp, bo->wrk, bo);
		return (F_STP_ERROR);
	}

	if (vbf_beresp2obj(bo)) {
		(void)VFP_Error(bo->vfc, "Could not get storage");
		bo->doclose = SC_RX_BODY;
		VDI_Finish(bo->director_resp, bo->wrk, bo);
		return (F_STP_ERROR);
	}

	assert(WRW_IsReleased(wrk));


	if (bo->do_gzip || (bo->is_gzip && !bo->do_gunzip))
		ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_GZIPED, 1);

	if (bo->do_gzip || bo->do_gunzip)
		ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_CHGGZIP, 1);

	if (http_IsStatus(bo->beresp, 200) && (
	    http_GetHdr(bo->beresp, H_Last_Modified, &p) ||
	    http_GetHdr(bo->beresp, H_ETag, &p)))
		ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_IMSCAND, 1);

	if (bo->htc->body_status != BS_NONE)
		AZ(VDI_GetBody(bo->director_resp, bo->wrk, bo));

	/*
	 * Ready to fetch the body
	 */

	assert(bo->refcount >= 1);

	assert (bo->state == BOS_REQ_DONE);

	if (bo->do_stream) {
		HSH_Unbusy(wrk, bo->fetch_objcore);
		VBO_setstate(bo, BOS_STREAM);
	}

	VSLb(bo->vsl, SLT_Fetch_Body, "%u %s %s",
	    bo->htc->body_status, body_status_2str(bo->htc->body_status),
	    bo->do_stream ? "stream" : "-");

	if (bo->htc->body_status != BS_NONE) {
		assert(bo->htc->body_status != BS_ERROR);
		vbf_fetch_body_helper(bo);
	}

	if (bo->vfc->failed && !bo->do_stream) {
		assert(bo->state < BOS_STREAM);
		ObjFreeObj(bo->wrk, bo->fetch_objcore);
		// XXX: doclose = ?
		VDI_Finish(bo->director_resp, bo->wrk, bo);
		return (F_STP_ERROR);
	}

	if (bo->vfc->failed) {
		VDI_Finish(bo->director_resp, bo->wrk, bo);
		return (F_STP_FAIL);
	}

	if (bo->do_stream)
		assert(bo->state == BOS_STREAM);
	else {
		assert(bo->state == BOS_REQ_DONE);
		HSH_Unbusy(wrk, bo->fetch_objcore);
	}

	/* Recycle the backend connection before setting BOS_FINISHED to
	   give predictable backend reuse behavior for varnishtest */
	VDI_Finish(bo->director_resp, bo->wrk, bo);

	VBO_setstate(bo, BOS_FINISHED);
	VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk));
	if (bo->ims_oc != NULL)
		EXP_Rearm(bo->ims_oc, bo->ims_oc->exp.t_origin, 0, 0, 0);
	return (F_STP_DONE);
}
Esempio n. 11
0
static enum fetch_step
vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo)
{
	void *oi;
	void *sp;
	ssize_t sl, al, l;
	uint8_t *ptr;
	uint64_t ol;
	enum objiter_status ois;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	AZ(vbf_beresp2obj(bo));

	if (ObjGetattr(bo->wrk, bo->ims_oc, OA_ESIDATA, NULL) != NULL)
		AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->ims_oc,
		    OA_ESIDATA));

	AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->ims_oc, OA_FLAGS));
	AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->ims_oc, OA_GZIPBITS));

	if (bo->do_stream) {
		HSH_Unbusy(wrk, bo->fetch_objcore);
		VBO_setstate(bo, BOS_STREAM);
	}

	al = 0;
	ol = ObjGetLen(bo->wrk, bo->ims_oc);
	oi = ObjIterBegin(wrk, bo->ims_oc);
	do {
		ois = ObjIter(bo->ims_oc, oi, &sp, &sl);
		while (sl > 0) {
			l = ol - al;
			if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK)
				break;
			if (sl < l)
				l = sl;
			memcpy(ptr, sp, l);
			VBO_extend(bo, l);
			al += l;
			sp = (char *)sp + l;
			sl -= l;
		}
	} while (!bo->vfc->failed && (ois == OIS_DATA || ois == OIS_STREAM));
	ObjIterEnd(bo->ims_oc, &oi);
	if (bo->vfc->failed)
		return (F_STP_FAIL);

	if (!bo->do_stream)
		HSH_Unbusy(wrk, bo->fetch_objcore);

	assert(al == ol);
	assert(ObjGetLen(bo->wrk, bo->fetch_objcore) == al);
	EXP_Rearm(bo->ims_oc, bo->ims_oc->exp.t_origin, 0, 0, 0);

	/* Recycle the backend connection before setting BOS_FINISHED to
	   give predictable backend reuse behavior for varnishtest */
	VDI_Finish(bo->director_resp, bo->wrk, bo);

	VBO_setstate(bo, BOS_FINISHED);
	VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk));
	return (F_STP_DONE);
}
Esempio n. 12
0
static void
vbf_fetch_body_helper(struct busyobj *bo)
{
	ssize_t l;
	uint8_t *ptr;
	enum vfp_status vfps = VFP_ERROR;
	ssize_t est;
	struct vfp_ctx *vfc;

	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	vfc = bo->vfc;
	CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC);

	AN(vfc->vfp_nxt);

	est = bo->htc->content_length;
	if (est < 0)
		est = 0;

	do {
		if (bo->abandon) {
			/*
			 * A pass object and delivery was terminted
			 * We don't fail the fetch, in order for hit-for-pass
			 * objects to be created.
			 */
			AN(vfc->oc->flags & OC_F_PASS);
			VSLb(vfc->wrk->vsl, SLT_FetchError,
			    "Pass delivery abandoned");
			vfps = VFP_END;
			bo->doclose = SC_RX_BODY;
			break;
		}
		AZ(vfc->failed);
		l = est;
		assert(l >= 0);
		if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK) {
			bo->doclose = SC_RX_BODY;
			break;
		}

		AZ(vfc->failed);
		vfps = VFP_Suck(vfc, ptr, &l);
		if (l > 0 && vfps != VFP_ERROR) {
			bo->acct.beresp_bodybytes += l;
			VBO_extend(bo, l);
			if (est >= l)
				est -= l;
			else
				est = 0;
		}
	} while (vfps == VFP_OK);

	VFP_Close(vfc);

	if (vfps == VFP_ERROR) {
		AN(vfc->failed);
		(void)VFP_Error(vfc, "Fetch Pipeline failed to process");
		bo->doclose = SC_RX_BODY;
	}

	if (!bo->do_stream)
		ObjTrimStore(bo->wrk, vfc->oc);
}
Esempio n. 13
0
static enum fetch_step
vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo)
{
	int i, do_ims;
	double now;
	char time_str[VTIM_FORMAT_SIZE];

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	assert(bo->doclose == SC_NULL);
	AZ(bo->storage_hint);

	if (bo->do_pass)
		AN(bo->req);
	else
		AZ(bo->req);

	http_PrintfHeader(bo->bereq, "X-Varnish: %u", VXID(bo->vsl->wid));

	VCL_backend_fetch_method(bo->vcl, wrk, NULL, bo, bo->bereq->ws);

	bo->uncacheable = bo->do_pass;
	if (wrk->handling == VCL_RET_ABANDON)
		return (F_STP_FAIL);

	assert (wrk->handling == VCL_RET_FETCH);

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod);

	assert(bo->state <= BOS_REQ_DONE);

	i = VDI_GetHdr(wrk, bo);

	now = W_TIM_real(wrk);
	VSLb_ts_busyobj(bo, "Beresp", now);

	if (i) {
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	http_VSL_log(bo->beresp);

	if (!http_GetHdr(bo->beresp, H_Date, NULL)) {
		/*
		 * RFC 2616 14.18 Date: The Date general-header field
		 * represents the date and time at which the message was
		 * originated, having the same semantics as orig-date in
		 * RFC 822. ... A received message that does not have a
		 * Date header field MUST be assigned one by the recipient
		 * if the message will be cached by that recipient or
		 * gatewayed via a protocol which requires a Date.
		 *
		 * If we didn't get a Date header, we assign one here.
		 */
		VTIM_format(now, time_str);
		http_PrintfHeader(bo->beresp, "Date: %s", time_str);
	}

	/*
	 * These two headers can be spread over multiple actual headers
	 * and we rely on their content outside of VCL, so collect them
	 * into one line here.
	 */
	http_CollectHdr(bo->beresp, H_Cache_Control);
	http_CollectHdr(bo->beresp, H_Vary);

	/*
	 * Figure out how the fetch is supposed to happen, before the
	 * headers are adultered by VCL
	 */
	if (!strcasecmp(http_GetMethod(bo->bereq), "head")) {
		/*
		 * A HEAD request can never have a body in the reply,
		 * no matter what the headers might say.
		 * [RFC2516 4.3 p33]
		 */
		wrk->stats->fetch_head++;
		bo->htc->body_status = BS_NONE;
	} else if (http_GetStatus(bo->beresp) <= 199) {
		/*
		 * 1xx responses never have a body.
		 * [RFC2616 4.3 p33]
		 * ... but we should never see them.
		 */
		wrk->stats->fetch_1xx++;
		bo->htc->body_status = BS_ERROR;
	} else if (http_IsStatus(bo->beresp, 204)) {
		/*
		 * 204 is "No Content", obviously don't expect a body.
		 * [RFC2616 10.2.5 p60]
		 */
		wrk->stats->fetch_204++;
		bo->htc->body_status = BS_NONE;
	} else if (http_IsStatus(bo->beresp, 304)) {
		/*
		 * 304 is "Not Modified" it has no body.
		 * [RFC2616 10.3.5 p63]
		 */
		wrk->stats->fetch_304++;
		bo->htc->body_status = BS_NONE;
	} else if (bo->htc->body_status == BS_CHUNKED) {
		wrk->stats->fetch_chunked++;
	} else if (bo->htc->body_status == BS_LENGTH) {
		assert(bo->htc->content_length > 0);
		wrk->stats->fetch_length++;
	} else if (bo->htc->body_status == BS_EOF) {
		wrk->stats->fetch_eof++;
	} else if (bo->htc->body_status == BS_ERROR) {
		wrk->stats->fetch_bad++;
	} else if (bo->htc->body_status == BS_NONE) {
		wrk->stats->fetch_none++;
	} else {
		WRONG("wrong bodystatus");
	}

	if (bo->htc->body_status == BS_ERROR) {
		bo->doclose = SC_RX_BODY;
		VDI_Finish(bo->director_resp, bo->wrk, bo);
		VSLb(bo->vsl, SLT_Error, "Body cannot be fetched");
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	/*
	 * What does RFC2616 think about TTL ?
	 */
	EXP_Clr(&bo->fetch_objcore->exp);
	RFC2616_Ttl(bo, now);

	/* private objects have negative TTL */
	if (bo->fetch_objcore->flags & OC_F_PRIVATE)
		bo->fetch_objcore->exp.ttl = -1.;

	AZ(bo->do_esi);

	if (bo->ims_oc != NULL && http_IsStatus(bo->beresp, 304)) {
		if (ObjCheckFlag(bo->wrk, bo->ims_oc, OF_CHGGZIP)) {
			/*
			 * If we changed the gzip status of the object
			 * the stored Content_Encoding controls we
			 * must weaken any new ETag we get.
			 */
			http_Unset(bo->beresp, H_Content_Encoding);
			RFC2616_Weaken_Etag(bo->beresp);
		}
		HTTP_Merge(bo->wrk, bo->ims_oc, bo->beresp);
		assert(http_IsStatus(bo->beresp, 200));
		do_ims = 1;
	} else
		do_ims = 0;

	VFP_Setup(bo->vfc);
	bo->vfc->bo = bo;
	bo->vfc->oc = bo->fetch_objcore;
	bo->vfc->wrk = bo->wrk;
	bo->vfc->http = bo->beresp;
	bo->vfc->esi_req = bo->bereq;

	VCL_backend_response_method(bo->vcl, wrk, NULL, bo, bo->beresp->ws);

	if (wrk->handling == VCL_RET_ABANDON) {
		bo->doclose = SC_RESP_CLOSE;
		VDI_Finish(bo->director_resp, bo->wrk, bo);
		return (F_STP_FAIL);
	}

	if (wrk->handling == VCL_RET_RETRY) {
		bo->doclose = SC_RESP_CLOSE;
		VDI_Finish(bo->director_resp, bo->wrk, bo);
		bo->doclose = SC_NULL;
		bo->retries++;
		if (bo->retries <= cache_param->max_retries)
			return (F_STP_RETRY);
		VSLb(bo->vsl, SLT_VCL_Error,
		    "Too many retries, delivering 503");
		assert(bo->director_state == DIR_S_NULL);
		return (F_STP_ERROR);
	}

	assert(bo->state == BOS_REQ_DONE);

	if (bo->do_esi)
		bo->do_stream = 0;
	if (bo->do_pass || bo->uncacheable)
		bo->fetch_objcore->flags |= OC_F_PASS;

	assert(wrk->handling == VCL_RET_DELIVER);

	return (do_ims ? F_STP_CONDFETCH : F_STP_FETCH);
}
Esempio n. 14
0
const char *
vmod_filtersep(const struct vrt_ctx *ctx)
{
	CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
	return NULL;
}
Esempio n. 15
0
static enum fetch_step
vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo)
{
	unsigned l;
	uint16_t nhttp;
	struct object *obj;
	struct objiter *oi;
	void *sp;
	ssize_t sl, al, tl;
	struct storage *st;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	l = 0;
	if (bo->ims_obj->vary != NULL)
		l += VRY_Len(bo->ims_obj->vary);
	l += http_EstimateWS(bo->ims_obj->http, 0, &nhttp);

	bo->stats = &wrk->stats;
	obj = STV_NewObject(bo, bo->storage_hint, l, nhttp);
	if (obj == NULL) {
		(void)VFP_Error(bo, "Could not get storage");
		VDI_CloseFd(&bo->vbc);
		return (F_STP_DONE);
	}
	bo->stats = NULL;

	AZ(bo->fetch_obj);
	bo->fetch_obj = obj;

	obj->gziped = bo->ims_obj->gziped;
	obj->gzip_start = bo->ims_obj->gzip_start;
	obj->gzip_last = bo->ims_obj->gzip_last;
	obj->gzip_stop = bo->ims_obj->gzip_stop;

	/* XXX: ESI */

	if (bo->ims_obj->vary != NULL)
		obj->vary = (void *)WS_Copy(obj->http->ws,
		    bo->ims_obj->vary, VRY_Len(bo->ims_obj->vary));

	obj->vxid = bo->vsl->wid;

	obj->http->logtag = HTTP_Obj;
	/* XXX: we should have our own HTTP_A_CONDFETCH */
	http_FilterResp(bo->ims_obj->http, obj->http, HTTPH_A_INS);
	http_CopyHome(obj->http);


	if (!(bo->fetch_obj->objcore->flags & OC_F_PRIVATE)) {
		EXP_Insert(obj);
		AN(obj->objcore->ban);
	}

	AZ(bo->ws_o->overflow);
	VBO_setstate(bo, BOS_FETCHING);
	HSH_Unbusy(&wrk->stats, obj->objcore);

	st = NULL;
	al = 0;

	oi = ObjIterBegin(wrk, bo->ims_obj);
	while (1 == ObjIter(oi, &sp, &sl)) {
		while (sl > 0) {
			if (st == NULL) {
				st = VFP_GetStorage(bo, bo->ims_obj->len - al);
				XXXAN(st);
			}
			tl = sl;
			if (tl > st->space - st->len)
				tl = st->space - st->len;
			memcpy(st->ptr + st->len, sp, tl);
			st->len += tl;
			al += tl;
			sp = (char *)sp + tl;
			sl -= tl;
			VBO_extend(bo, al);
			if (st->len == st->space)
				st = NULL;
		}
	}
	assert(al == bo->ims_obj->len);
	assert(obj->len == al);
	if (bo->state != BOS_FAILED)
		VBO_setstate(bo, BOS_FINISHED);
	HSH_Complete(obj->objcore);
	return (F_STP_DONE);
}
Esempio n. 16
0
unsigned
WRW_Flush(const struct worker *wrk)
{
	ssize_t i;
	struct wrw *wrw;
	char cbuf[32];

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	wrw = wrk->wrw;
	CHECK_OBJ_NOTNULL(wrw, WRW_MAGIC);
	AN(wrw->wfd);

	/* For chunked, there must be one slot reserved for the chunked tail */
	if (wrw->ciov < wrw->siov)
		assert(wrw->niov < wrw->siov);

	if (*wrw->wfd >= 0 && wrw->liov > 0 && wrw->werr == 0) {
		if (wrw->ciov < wrw->siov && wrw->cliov > 0) {
			/* Add chunk head & tail */
			bprintf(cbuf, "00%zx\r\n", wrw->cliov);
			i = strlen(cbuf);
			wrw->iov[wrw->ciov].iov_base = cbuf;
			wrw->iov[wrw->ciov].iov_len = i;
			wrw->liov += i;

			wrw->iov[wrw->niov].iov_base = cbuf + i - 2;
			wrw->iov[wrw->niov++].iov_len = 2;
			wrw->liov += 2;
		} else if (wrw->ciov < wrw->siov) {
			wrw->iov[wrw->ciov].iov_base = cbuf;
			wrw->iov[wrw->ciov].iov_len = 0;
		}

		i = writev(*wrw->wfd, wrw->iov, wrw->niov);
		while (i != wrw->liov && i > 0) {
			/* Remove sent data from start of I/O vector,
			 * then retry; we hit a timeout, but some data
			 * was sent.
			 *
			 * XXX: Add a "minimum sent data per timeout
			 * counter to prevent slowlaris attacks
			*/

			if (VTIM_real() - wrw->t0 > cache_param->send_timeout) {
				VSLb(wrw->vsl, SLT_Debug,
				    "Hit total send timeout, "
				    "wrote = %zd/%zd; not retrying",
				    i, wrw->liov);
				i = -1;
				break;
			}

			VSLb(wrw->vsl, SLT_Debug,
			    "Hit send timeout, wrote = %zd/%zd; retrying",
			    i, wrw->liov);

			wrw_prune(wrw, i);
			i = writev(*wrw->wfd, wrw->iov, wrw->niov);
		}
		if (i <= 0) {
			wrw->werr++;
			VSLb(wrw->vsl, SLT_Debug,
			    "Write error, retval = %zd, len = %zd, errno = %s",
			    i, wrw->liov, strerror(errno));
		}
	}
	wrw->liov = 0;
	wrw->cliov = 0;
	wrw->niov = 0;
	if (wrw->ciov < wrw->siov)
		wrw->ciov = wrw->niov++;
	return (wrw->werr);
}
Esempio n. 17
0
static ssize_t
vrb_pull(struct req *req, ssize_t maxsize, objiterate_f *func, void *priv)
{
	ssize_t l, r = 0, yet;
	struct vfp_ctx *vfc;
	uint8_t *ptr;
	enum vfp_status vfps = VFP_ERROR;
	const struct stevedore *stv;

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	CHECK_OBJ_NOTNULL(req->htc, HTTP_CONN_MAGIC);
	CHECK_OBJ_NOTNULL(req->vfc, VFP_CTX_MAGIC);
	vfc = req->vfc;

	req->body_oc = HSH_Private(req->wrk);
	AN(req->body_oc);

	if (req->storage != NULL)
		stv = req->storage;
	else
		stv = stv_transient;

	req->storage = NULL;

	XXXAN(STV_NewObject(req->wrk, req->body_oc, stv, 8));

	vfc->oc = req->body_oc;

	if (VFP_Open(vfc) < 0) {
		req->req_body_status = REQ_BODY_FAIL;
		HSH_DerefBoc(req->wrk, req->body_oc);
		AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
		return (-1);
	}

	AZ(req->req_bodybytes);
	AN(req->htc);
	yet = req->htc->content_length;
	if (yet != 0 && req->want100cont) {
		req->want100cont = 0;
		(void)req->transport->minimal_response(req, 100);
	}
	if (yet < 0)
		yet = 0;
	do {
		AZ(vfc->failed);
		if (maxsize >= 0 && req->req_bodybytes > maxsize) {
			(void)VFP_Error(vfc, "Request body too big to cache");
			break;
		}
		l = yet;
		if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK)
			break;
		AZ(vfc->failed);
		AN(ptr);
		AN(l);
		vfps = VFP_Suck(vfc, ptr, &l);
		if (l > 0 && vfps != VFP_ERROR) {
			req->req_bodybytes += l;
			req->acct.req_bodybytes += l;
			if (yet >= l)
				yet -= l;
			if (func != NULL) {
				r = func(priv, 1, ptr, l);
				if (r)
					break;
			} else {
				ObjExtend(req->wrk, req->body_oc, l);
			}
		}

	} while (vfps == VFP_OK);
	VFP_Close(vfc);
	VSLb_ts_req(req, "ReqBody", VTIM_real());
	if (func != NULL) {
		HSH_DerefBoc(req->wrk, req->body_oc);
		AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
		if (vfps != VFP_END) {
			req->req_body_status = REQ_BODY_FAIL;
			if (r == 0)
				r = -1;
		}
		return (r);
	}

	ObjTrimStore(req->wrk, req->body_oc);
	AZ(ObjSetU64(req->wrk, req->body_oc, OA_LEN, req->req_bodybytes));
	HSH_DerefBoc(req->wrk, req->body_oc);

	if (vfps != VFP_END) {
		req->req_body_status = REQ_BODY_FAIL;
		AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
		return (-1);
	}

	assert(req->req_bodybytes >= 0);
	if (req->req_bodybytes != req->htc->content_length) {
		/* We must update also the "pristine" req.* copy */
		http_Unset(req->http0, H_Content_Length);
		http_Unset(req->http0, H_Transfer_Encoding);
		http_PrintfHeader(req->http0, "Content-Length: %ju",
		    (uintmax_t)req->req_bodybytes);

		http_Unset(req->http, H_Content_Length);
		http_Unset(req->http, H_Transfer_Encoding);
		http_PrintfHeader(req->http, "Content-Length: %ju",
		    (uintmax_t)req->req_bodybytes);
	}

	req->req_body_status = REQ_BODY_CACHED;
	return (req->req_bodybytes);
}
Esempio n. 18
0
static enum req_fsm_nxt
cnt_lookup(struct worker *wrk, struct req *req)
{
	struct objcore *oc, *boc;
	struct object *o;
	struct objhead *oh;
	enum lookup_e lr;
	int had_objhead = 0;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	AZ(req->objcore);

	CHECK_OBJ_NOTNULL(req->vcl, VCL_CONF_MAGIC);

	VRY_Prep(req);

	AZ(req->objcore);
	if (req->hash_objhead)
		had_objhead = 1;
	lr = HSH_Lookup(req, &oc, &boc,
	    req->esi_level == 0 ? 1 : 0,
	    req->hash_always_miss ? 1 : 0
	);
	if (lr == HSH_BUSY) {
		/*
		 * We lost the session to a busy object, disembark the
		 * worker thread.   We return to STP_LOOKUP when the busy
		 * object has been unbusied, and still have the objhead
		 * around to restart the lookup with.
		 */
		return (REQ_FSM_DISEMBARK);
	}
	if (had_objhead)
		VSLb_ts_req(req, "Waitinglist", W_TIM_real(wrk));

	if (boc == NULL) {
		VRY_Finish(req, DISCARD);
	} else {
		AN(boc->flags & OC_F_BUSY);
		VRY_Finish(req, KEEP);
	}

	AZ(req->objcore);
	if (lr == HSH_MISS) {
		/* Found nothing */
		VSLb(req->vsl, SLT_Debug, "XXXX MISS");
		AZ(oc);
		AN(boc);
		AN(boc->flags & OC_F_BUSY);
		req->objcore = boc;
		req->req_step = R_STP_MISS;
		return (REQ_FSM_MORE);
	}

	CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
	AZ(oc->flags & OC_F_BUSY);
	AZ(req->objcore);

	o = ObjGetObj(oc, &wrk->stats);
	CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
	req->obj = o;

	if (oc->flags & OC_F_PASS) {
		/* Found a hit-for-pass */
		VSLb(req->vsl, SLT_Debug, "XXXX HIT-FOR-PASS");
		VSLb(req->vsl, SLT_HitPass, "%u", req->obj->vxid);
		AZ(boc);
		(void)HSH_DerefObj(&wrk->stats, &req->obj);
		req->objcore = NULL;
		wrk->stats.cache_hitpass++;
		req->req_step = R_STP_PASS;
		return (REQ_FSM_MORE);
	}

	oh = oc->objhead;
	CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);

	VSLb(req->vsl, SLT_Hit, "%u", req->obj->vxid);

	VCL_hit_method(req->vcl, wrk, req, NULL, req->http->ws);

	switch (wrk->handling) {
	case VCL_RET_DELIVER:
		if (boc != NULL) {
			AZ(oc->flags & (OC_F_FAILED|OC_F_PASS));
			AZ(oc->exp_flags & OC_EF_DYING);
			AZ(boc->busyobj);
			VBF_Fetch(wrk, req, boc, o, VBF_BACKGROUND);
		} else {
			(void)HTTP1_DiscardReqBody(req);// XXX: handle err
		}
		wrk->stats.cache_hit++;
		req->req_step = R_STP_DELIVER;
		return (REQ_FSM_MORE);
	case VCL_RET_FETCH:
		req->objcore = boc;
		if (req->objcore != NULL)
			req->req_step = R_STP_MISS;
		else {
			(void)HSH_DerefObj(&wrk->stats, &req->obj);
			/*
			 * We don't have a busy object, so treat this
			 * like a pass
			 */
			VSLb(req->vsl, SLT_VCL_Error,
			    "vcl_hit{} returns fetch without busy object."
			    "  Doing pass.");
			req->req_step = R_STP_PASS;
		}
		return (REQ_FSM_MORE);
	case VCL_RET_RESTART:
		req->req_step = R_STP_RESTART;
		break;
	case VCL_RET_SYNTH:
		req->req_step = R_STP_SYNTH;
		break;
	case VCL_RET_PASS:
		wrk->stats.cache_hit++;
		req->req_step = R_STP_PASS;
		break;
	default:
		INCOMPL();
	}

	/* Drop our object, we won't need it */
	(void)HSH_DerefObj(&wrk->stats, &req->obj);
	req->objcore = NULL;

	if (boc != NULL) {
		(void)HSH_DerefObjCore(&wrk->stats, &boc);
		free(req->vary_b);
		req->vary_b = NULL;
	}

	return (REQ_FSM_MORE);
}
int
V1F_FetchRespHdr(struct busyobj *bo)
{

	struct http *hp;
	enum htc_status_e hs;
	int first;
	struct http_conn *htc;

	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
	CHECK_OBJ_ORNULL(bo->req, REQ_MAGIC);

	htc = bo->htc;

	VSC_C_main->backend_req++;

	/* Receive response */

	SES_RxInit(htc, bo->ws, cache_param->http_resp_size,
	    cache_param->http_resp_hdr_len);
	CHECK_OBJ_NOTNULL(htc, HTTP_CONN_MAGIC);
	CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);

	VTCP_set_read_timeout(htc->fd, htc->first_byte_timeout);

	first = 1;
	do {
		hs = SES_Rx(htc, 0);
		if (hs == HTC_S_MORE)
			hs = HTTP1_Complete(htc);
		if (hs == HTC_S_OVERFLOW) {
			WS_ReleaseP(htc->ws, htc->rxbuf_b);
			bo->acct.beresp_hdrbytes +=
			    htc->rxbuf_e - htc->rxbuf_b;
			VSLb(bo->vsl, SLT_FetchError,
			    "http %sread error: overflow",
			    first ? "first " : "");
			htc->doclose = SC_RX_OVERFLOW;
			return (-1);
		}
		if (hs == HTC_S_EOF) {
			WS_ReleaseP(htc->ws, htc->rxbuf_b);
			bo->acct.beresp_hdrbytes +=
			    htc->rxbuf_e - htc->rxbuf_b;
			VSLb(bo->vsl, SLT_FetchError, "http %sread error: EOF",
			    first ? "first " : "");
			htc->doclose = SC_RX_TIMEOUT;
			return (first ? 1 : -1);
		}
		if (first) {
			first = 0;
			VTCP_set_read_timeout(htc->fd,
			    htc->between_bytes_timeout);
		}
	} while (hs != HTC_S_COMPLETE);
	bo->acct.beresp_hdrbytes += htc->rxbuf_e - htc->rxbuf_b;

	hp = bo->beresp;

	if (HTTP1_DissectResponse(hp, htc)) {
		VSLb(bo->vsl, SLT_FetchError, "http format error");
		htc->doclose = SC_RX_JUNK;
		return (-1);
	}

	htc->doclose = http_DoConnection(hp);

	return (0);
}
Esempio n. 20
0
static enum req_fsm_nxt
cnt_recv(struct worker *wrk, struct req *req)
{
	unsigned recv_handling;
	struct SHA256Context sha256ctx;
	char *xff;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->vcl, VCL_CONF_MAGIC);
	AZ(req->objcore);
	AZ(req->obj);
	AZ(req->objcore);

	AZ(isnan(req->t_first));
	AZ(isnan(req->t_prev));
	AZ(isnan(req->t_req));

	VSLb(req->vsl, SLT_ReqStart, "%s %s",
	    req->sp->client_addr_str, req->sp->client_port_str);

	http_VSL_log(req->http);

	if (req->err_code) {
		req->req_step = R_STP_SYNTH;
		return (REQ_FSM_MORE);
	}

	/* By default we use the first backend */
	AZ(req->director_hint);
	req->director_hint = req->vcl->director[0];
	AN(req->director_hint);

	req->d_ttl = -1;
	req->disable_esi = 0;
	req->hash_always_miss = 0;
	req->hash_ignore_busy = 0;
	req->client_identity = NULL;
	if (req->restarts == 0) {
		if (http_GetHdr(req->http, H_X_Forwarded_For, &xff)) {
			http_Unset(req->http, H_X_Forwarded_For);
			http_PrintfHeader(req->http, "X-Forwarded-For: %s, %s", xff,
					  req->sp->client_addr_str);
		} else {
			http_PrintfHeader(req->http, "X-Forwarded-For: %s",
					  req->sp->client_addr_str);
		}
	}

	http_CollectHdr(req->http, H_Cache_Control);

	VCL_recv_method(req->vcl, wrk, req, NULL, req->http->ws);

	/* Attempts to cache req.body may fail */
	if (req->req_body_status == REQ_BODY_FAIL) {
		return (REQ_FSM_DONE);
	}
	recv_handling = wrk->handling;

	/* We wash the A-E header here for the sake of VRY */
	if (cache_param->http_gzip_support &&
	     (recv_handling != VCL_RET_PIPE) &&
	     (recv_handling != VCL_RET_PASS)) {
		if (RFC2616_Req_Gzip(req->http)) {
			http_ForceHeader(req->http, H_Accept_Encoding, "gzip");
		} else {
			http_Unset(req->http, H_Accept_Encoding);
		}
	}

	req->sha256ctx = &sha256ctx;	/* so HSH_AddString() can find it */
	SHA256_Init(req->sha256ctx);
	VCL_hash_method(req->vcl, wrk, req, NULL, req->http->ws);
	assert(wrk->handling == VCL_RET_LOOKUP);
	SHA256_Final(req->digest, req->sha256ctx);
	req->sha256ctx = NULL;

	if (!strcmp(req->http->hd[HTTP_HDR_METHOD].b, "HEAD"))
		req->wantbody = 0;
	else
		req->wantbody = 1;

	switch(recv_handling) {
	case VCL_RET_PURGE:
		req->req_step = R_STP_PURGE;
		return (REQ_FSM_MORE);
	case VCL_RET_HASH:
		req->req_step = R_STP_LOOKUP;
		return (REQ_FSM_MORE);
	case VCL_RET_PIPE:
		if (req->esi_level == 0) {
			req->req_step = R_STP_PIPE;
			return (REQ_FSM_MORE);
		}
		VSLb(req->vsl, SLT_VCL_Error,
		    "vcl_recv{} returns pipe for ESI included object."
		    "  Doing pass.");
		req->req_step = R_STP_PASS;
		return (REQ_FSM_DONE);
	case VCL_RET_PASS:
		req->req_step = R_STP_PASS;
		return (REQ_FSM_MORE);
	case VCL_RET_SYNTH:
		req->req_step = R_STP_SYNTH;
		return (REQ_FSM_MORE);
	default:
		WRONG("Illegal return from vcl_recv{}");
	}
}
Esempio n. 21
0
void
VJ_subproc(enum jail_subproc_e jse)
{
	CHECK_OBJ_NOTNULL(vjt, JAIL_TECH_MAGIC);
	vjt->subproc(jse);
}
Esempio n. 22
0
static enum req_fsm_nxt
cnt_deliver(struct worker *wrk, struct req *req)
{
	struct busyobj *bo;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->obj, OBJECT_MAGIC);
	CHECK_OBJ_NOTNULL(req->obj->objcore, OBJCORE_MAGIC);
	CHECK_OBJ_NOTNULL(req->obj->objcore->objhead, OBJHEAD_MAGIC);
	CHECK_OBJ_NOTNULL(req->vcl, VCL_CONF_MAGIC);
	assert(WRW_IsReleased(wrk));

	assert(req->obj->objcore->refcnt > 0);

	if (req->obj->objcore->exp_flags & OC_EF_EXP)
		EXP_Touch(req->obj->objcore, req->t_prev);

	HTTP_Setup(req->resp, req->ws, req->vsl, SLT_RespMethod);
	http_FilterResp(req->obj->http, req->resp, 0);
	http_ForceField(req->resp, HTTP_HDR_PROTO, "HTTP/1.1");

	if (req->wrk->stats.cache_hit)
		http_PrintfHeader(req->resp,
		    "X-Varnish: %u %u", VXID(req->vsl->wid),
		    VXID(req->obj->vxid));
	else
		http_PrintfHeader(req->resp,
		    "X-Varnish: %u", VXID(req->vsl->wid));

	/* We base Age calculation upon the last timestamp taken during
	   client request processing. This gives some inaccuracy, but
	   since Age is only full second resolution that shouldn't
	   matter. (Last request timestamp could be a Start timestamp
	   taken before the object entered into cache leading to negative
	   age. Truncate to zero in that case).
	*/
	http_PrintfHeader(req->resp, "Age: %.0f",
	    fmax(0., req->t_prev - req->obj->objcore->exp.t_origin));

	http_SetHeader(req->resp, "Via: 1.1 varnish-v4");

	if (cache_param->http_gzip_support && req->obj->gziped &&
	    !RFC2616_Req_Gzip(req->http))
		RFC2616_Weaken_Etag(req->resp);

	VCL_deliver_method(req->vcl, wrk, req, NULL, req->http->ws);
	VSLb_ts_req(req, "Process", W_TIM_real(wrk));

	/* Stop the insanity before it turns "Hotel California" on us */
	if (req->restarts >= cache_param->max_restarts)
		wrk->handling = VCL_RET_DELIVER;

	if (wrk->handling == VCL_RET_RESTART) {
		(void)HSH_DerefObj(&wrk->stats, &req->obj);
		AZ(req->obj);
		http_Teardown(req->resp);
		req->req_step = R_STP_RESTART;
		return (REQ_FSM_MORE);
	}

	assert(wrk->handling == VCL_RET_DELIVER);

	if (!(req->obj->objcore->flags & OC_F_PASS)
	    && req->esi_level == 0
	    && http_GetStatus(req->obj->http) == 200
	    && req->http->conds && RFC2616_Do_Cond(req)) {
		http_PutResponse(req->resp, "HTTP/1.1", 304, NULL);
		req->wantbody = 0;
	}

	/* Grab a ref to the bo if there is one, and hand it down */
	bo = HSH_RefBusy(req->obj->objcore);
	V1D_Deliver(req, bo);
	if (bo != NULL)
		VBO_DerefBusyObj(req->wrk, &bo);

	VSLb_ts_req(req, "Resp", W_TIM_real(wrk));

	if (http_HdrIs(req->resp, H_Connection, "close"))
		req->doclose = SC_RESP_CLOSE;

	if (req->obj->objcore->flags & OC_F_PASS) {
		/*
		 * No point in saving the body if it is hit-for-pass,
		 * but we can't yank it until the fetching thread has
		 * finished/abandoned also.
		 */
		while (req->obj->objcore->busyobj != NULL)
			(void)usleep(100000);
		STV_Freestore(req->obj);
	}

	assert(WRW_IsReleased(wrk));
VSLb(req->vsl, SLT_Debug, "XXX REF %d", req->obj->objcore->refcnt);
	(void)HSH_DerefObj(&wrk->stats, &req->obj);
	http_Teardown(req->resp);
	return (REQ_FSM_DONE);
}
Esempio n. 23
0
static struct vbc *
vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo)
{
	struct vbc *vc;
	double tmod;
	char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
	char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
	AN(bp->vsc);

	if (!VBE_Healthy(bp, NULL)) {
		// XXX: per backend stats ?
		VSC_C_main->backend_unhealthy++;
		return (NULL);
	}

	if (bp->max_connections > 0 && bp->n_conn >= bp->max_connections) {
		// XXX: per backend stats ?
		VSC_C_main->backend_busy++;
		return (NULL);
	}

	AZ(bo->htc);
	bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc);
	if (bo->htc == NULL)
		/* XXX: counter ? */
		return (NULL);
	bo->htc->doclose = SC_NULL;

	FIND_TMO(connect_timeout, tmod, bo, bp);
	vc = VBT_Get(bp->tcp_pool, tmod, bp, wrk);
	if (vc == NULL) {
		// XXX: Per backend stats ?
		VSC_C_main->backend_fail++;
		bo->htc = NULL;
		return (NULL);
	}

	assert(vc->fd >= 0);
	AN(vc->addr);

	Lck_Lock(&bp->mtx);
	bp->n_conn++;
	bp->vsc->conn++;
	bp->vsc->req++;
	Lck_Unlock(&bp->mtx);

	if (bp->proxy_header != 0)
		VPX_Send_Proxy(vc->fd, bp->proxy_header, bo->sp);

	VTCP_myname(vc->fd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
	VTCP_hisname(vc->fd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
	VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s",
	    vc->fd, bp->display_name, abuf2, pbuf2, abuf1, pbuf1);

	INIT_OBJ(bo->htc, HTTP_CONN_MAGIC);
	bo->htc->priv = vc;
	bo->htc->fd = vc->fd;
	FIND_TMO(first_byte_timeout,
	    bo->htc->first_byte_timeout, bo, bp);
	FIND_TMO(between_bytes_timeout,
	    bo->htc->between_bytes_timeout, bo, bp);
	return (vc);
}
Esempio n. 24
0
enum req_fsm_nxt
CNT_Request(struct worker *wrk, struct req *req)
{
	enum req_fsm_nxt nxt;
	struct storage *st;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	/*
	 * Possible entrance states
	 */
	assert(
	    req->req_step == R_STP_LOOKUP ||
	    req->req_step == R_STP_RECV);

	AN(req->vsl->wid & VSL_CLIENTMARKER);

	req->wrk = wrk;

	for (nxt = REQ_FSM_MORE; nxt == REQ_FSM_MORE; ) {
		/*
		 * This is a good place to be paranoid about the various
		 * pointers still pointing to the things we expect.
		 */
		CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
		CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC);
		CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

		/*
		 * We don't want the thread workspace to be used for
		 * anything of long duration, so mandate that it be
		 * empty on state-transitions.
		 */
		WS_Assert(wrk->aws);
		assert(wrk->aws->s == wrk->aws->f);

		switch (req->req_step) {
#define REQ_STEP(l,u,arg) \
		    case R_STP_##u: \
			if (DO_DEBUG(DBG_REQ_STATE)) \
				cnt_diag(req, #u); \
			nxt = cnt_##l arg; \
			break;
#include "tbl/steps.h"
#undef REQ_STEP
		default:
			WRONG("State engine misfire");
		}
		WS_Assert(wrk->aws);
		CHECK_OBJ_ORNULL(wrk->nobjhead, OBJHEAD_MAGIC);
	}
	if (nxt == REQ_FSM_DONE) {
		AN(req->vsl->wid);
		if (req->res_mode & (RES_ESI|RES_ESI_CHILD))
			VSLb(req->vsl, SLT_ESI_BodyBytes, "%ju",
			    (uintmax_t)req->resp_bodybytes);

		while (!VTAILQ_EMPTY(&req->body->list)) {
			st = VTAILQ_FIRST(&req->body->list);
			VTAILQ_REMOVE(&req->body->list, st, list);
			STV_free(st);
		}
		req->wrk = NULL;
	}
	assert(WRW_IsReleased(wrk));
	return (nxt);
}
Esempio n. 25
0
void
HTTP1_Session(struct worker *wrk, struct req *req)
{
	enum htc_status_e hs;
	struct sess *sp;
	const char *st;
	int i;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);

	/*
	 * Whenever we come in from the acceptor or waiter, we need to set
	 * blocking mode.  It would be simpler to do this in the acceptor
	 * or waiter, but we'd rather do the syscall in the worker thread.
	 * On systems which return errors for ioctl, we close early
	 */
	if (http1_getstate(sp) == H1NEWREQ && VTCP_blocking(sp->fd)) {
		if (errno == ECONNRESET)
			SES_Close(sp, SC_REM_CLOSE);
		else
			SES_Close(sp, SC_TX_ERROR);
		AN(Req_Cleanup(sp, wrk, req));
		return;
	}

	while (1) {
		st = http1_getstate(sp);
		if (st == H1NEWREQ) {
			assert(isnan(req->t_prev));
			assert(isnan(req->t_req));
			AZ(req->vcl);
			AZ(req->esi_level);

			hs = HTC_RxStuff(req->htc, HTTP1_Complete,
			    &req->t_first, &req->t_req,
			    sp->t_idle + cache_param->timeout_linger,
			    sp->t_idle + cache_param->timeout_idle,
			    cache_param->http_req_size);
			XXXAZ(req->htc->ws->r);
			if (hs < HTC_S_EMPTY) {
				req->acct.req_hdrbytes +=
				    req->htc->rxbuf_e - req->htc->rxbuf_b;
				CNT_AcctLogCharge(wrk->stats, req);
				Req_Release(req);
				switch(hs) {
				case HTC_S_CLOSE:
					SES_Delete(sp, SC_REM_CLOSE, NAN);
					return;
				case HTC_S_TIMEOUT:
					SES_Delete(sp, SC_RX_TIMEOUT, NAN);
					return;
				case HTC_S_OVERFLOW:
					SES_Delete(sp, SC_RX_OVERFLOW, NAN);
					return;
				case HTC_S_EOF:
					SES_Delete(sp, SC_REM_CLOSE, NAN);
					return;
				default:
					WRONG("htc_status (bad)");
				}
			}
			if (hs == HTC_S_IDLE) {
				wrk->stats->sess_herd++;
				Req_Release(req);
				SES_Wait(sp, &HTTP1_transport);
				return;
			}
			if (hs != HTC_S_COMPLETE)
				WRONG("htc_status (nonbad)");

			i = http1_dissect(wrk, req);
			req->acct.req_hdrbytes +=
			    req->htc->rxbuf_e - req->htc->rxbuf_b;
			if (i) {
				SES_Close(req->sp, req->doclose);
				http1_setstate(sp, H1CLEANUP);
			} else {
				req->req_step = R_STP_RECV;
				http1_setstate(sp, H1PROC);
			}
		} else if (st == H1BUSY) {
			/*
			 * Return from waitinglist.
			 * Check to see if the remote has left.
			 */
			if (VTCP_check_hup(sp->fd)) {
				AN(req->hash_objhead);
				(void)HSH_DerefObjHead(wrk, &req->hash_objhead);
				AZ(req->hash_objhead);
				SES_Close(sp, SC_REM_CLOSE);
				AN(Req_Cleanup(sp, wrk, req));
				return;
			}
			http1_setstate(sp, H1PROC);
		} else if (st == H1PROC) {
			req->transport = &HTTP1_transport;
			if (CNT_Request(wrk, req) == REQ_FSM_DISEMBARK) {
				req->task.func = http1_req;
				req->task.priv = req;
				http1_setstate(sp, H1BUSY);
				return;
			}
			req->transport = NULL;
			http1_setstate(sp, H1CLEANUP);
		} else if (st == H1CLEANUP) {
			if (Req_Cleanup(sp, wrk, req))
				return;
			HTC_RxInit(req->htc, req->ws);
			if (req->htc->rxbuf_e != req->htc->rxbuf_b)
				wrk->stats->sess_readahead++;
			http1_setstate(sp, H1NEWREQ);
		} else {
			WRONG("Wrong H1 session state");
		}
	}
}
Esempio n. 26
0
static enum fetch_step
vbf_stp_fetchhdr(struct worker *wrk, struct busyobj *bo)
{
	int i, do_ims;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	xxxassert (wrk->handling == VCL_RET_FETCH);

	HTTP_Setup(bo->beresp, bo->ws, bo->vsl, HTTP_Beresp);

	if (!bo->do_pass && bo->req != NULL)
		vbf_release_req(bo); /* XXX: retry ?? */

	assert(bo->state <= BOS_REQ_DONE);

	i = V1F_fetch_hdr(wrk, bo, bo->req);
	/*
	 * If we recycle a backend connection, there is a finite chance
	 * that the backend closed it before we get a request to it.
	 * Do a single retry in that case.
	 */
	if (i == 1) {
		VSC_C_main->backend_retry++;
		i = V1F_fetch_hdr(wrk, bo, bo->req);
	}

	if (bo->do_pass && bo->req != NULL)
		vbf_release_req(bo); /* XXX : retry ?? */

	AZ(bo->req);

	if (i) {
		AZ(bo->vbc);
		bo->err_code = 503;
		http_SetH(bo->beresp, HTTP_HDR_PROTO, "HTTP/1.1");
		http_SetResp(bo->beresp,
		    "HTTP/1.1", 503, "Backend fetch failed");
		http_SetHeader(bo->beresp, "Content-Length: 0");
		http_SetHeader(bo->beresp, "Connection: close");
	} else {
		AN(bo->vbc);
	}

	/*
	 * These two headers can be spread over multiple actual headers
	 * and we rely on their content outside of VCL, so collect them
	 * into one line here.
	 */
	http_CollectHdr(bo->beresp, H_Cache_Control);
	http_CollectHdr(bo->beresp, H_Vary);

	/*
	 * Figure out how the fetch is supposed to happen, before the
	 * headers are adultered by VCL
	 * NB: Also sets other wrk variables
	 */
	bo->htc.body_status = RFC2616_Body(bo, &wrk->stats);

	bo->err_code = http_GetStatus(bo->beresp);

	/*
	 * What does RFC2616 think about TTL ?
	 */
	EXP_Clr(&bo->exp);
	bo->exp.entered = W_TIM_real(wrk);
	RFC2616_Ttl(bo);

	/* private objects have negative TTL */
	if (bo->fetch_objcore->flags & OC_F_PRIVATE)
		bo->exp.ttl = -1.;

	AZ(bo->do_esi);

	if (bo->ims_obj != NULL && bo->beresp->status == 304) {
		bo->beresp->status = 200;
		http_PrintfHeader(bo->beresp, "Content-Length: %jd",
		    bo->ims_obj->len);
		do_ims = 1;
	} else
		do_ims = 0;

	VCL_backend_response_method(bo->vcl, wrk, NULL, bo, bo->beresp->ws);

	if (bo->do_esi)
		bo->do_stream = 0;
	if (bo->do_pass)
		bo->fetch_objcore->flags |= OC_F_PASS;

	if (wrk->handling == VCL_RET_DELIVER)
		return (do_ims ? F_STP_CONDFETCH : F_STP_FETCH);
	if (wrk->handling == VCL_RET_RETRY) {
		assert(bo->state == BOS_REQ_DONE);
		bo->retries++;
		if (bo->retries <= cache_param->max_retries) {
			VDI_CloseFd(&bo->vbc);
			return (F_STP_STARTFETCH);
		}
		// XXX: wrk->handling = VCL_RET_SYNTH;
	}

	INCOMPL();
}
Esempio n. 27
0
static void *
vws_thread(void *priv)
{
	struct waited *wp;
	struct waiter *w;
	struct vws *vws;
	double now, then;
	struct timespec ts;
	const double	max_t  = 100.0;
	port_event_t ev[MAX_EVENTS];
	u_int nevents;
	int ei, ret;

	CAST_OBJ_NOTNULL(vws, priv, VWS_MAGIC);
	w = vws->waiter;
	CHECK_OBJ_NOTNULL(w, WAITER_MAGIC);
	THR_SetName("cache-ports");

	now = VTIM_real();

	while (!vws->die) {
		while (1) {
			then = Wait_HeapDue(w, &wp);
			if (wp == NULL) {
				vws->next = now + max_t;
				break;
			} else if (then > now) {
				vws->next = then;
				break;
			}
			CHECK_OBJ_NOTNULL(wp, WAITED_MAGIC);
			vws_del(vws, wp->fd);
			Wait_Call(w, wp, WAITER_TIMEOUT, now);
		}
		then = vws->next - now;
		ts.tv_sec = (time_t)floor(then);
		ts.tv_nsec = (long)(1e9 * (then - ts.tv_sec));

		/*
		 * min number of events we accept.  could consider to scale up
		 * for efficiency, but as we always get all waiting events up to
		 * the maximum, we'd only optimize the idle case sacrificing
		 * some latency
		 */
		nevents = 1;

		/*
		 * see disucssion in
		 * - https://issues.apache.org/bugzilla/show_bug.cgi?id=47645
		 * - http://mail.opensolaris.org/pipermail/\
		 *       networking-discuss/2009-August/011979.html
		 *
		 * comment from apr/poll/unix/port.c :
		 *
		 * This confusing API can return an event at the same time
		 * that it reports EINTR or ETIME.
		 *
		 */

		ret = port_getn(vws->dport, ev, MAX_EVENTS, &nevents, &ts);
		now = VTIM_real();

		if (ret < 0 && errno == EBADF) {
			/* close on dport is our stop signal */
			AN(vws->die);
			break;
		}

		if (ret < 0)
			assert((errno == EINTR) || (errno == ETIME));

		for (ei = 0; ei < nevents; ei++)
			vws_port_ev(vws, w, &ev[ei], now);
	}
	return NULL;
}
Esempio n. 28
0
static enum fetch_step
vbf_stp_fetch(struct worker *wrk, struct busyobj *bo)
{
	struct http *hp, *hp2;
	char *b;
	uint16_t nhttp;
	unsigned l;
	struct vsb *vary = NULL;
	int varyl = 0;
	struct object *obj;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	assert(wrk->handling == VCL_RET_DELIVER);

	/*
	 * The VCL variables beresp.do_g[un]zip tells us how we want the
	 * object processed before it is stored.
	 *
	 * The backend Content-Encoding header tells us what we are going
	 * to receive, which we classify in the following three classes:
	 *
	 *	"Content-Encoding: gzip"	--> object is gzip'ed.
	 *	no Content-Encoding		--> object is not gzip'ed.
	 *	anything else			--> do nothing wrt gzip
	 *
	 * XXX: BS_NONE/cl==0 should avoid gzip/gunzip
	 */

	/* We do nothing unless the param is set */
	if (!cache_param->http_gzip_support)
		bo->do_gzip = bo->do_gunzip = 0;

	bo->is_gzip = http_HdrIs(bo->beresp, H_Content_Encoding, "gzip");

	bo->is_gunzip = !http_GetHdr(bo->beresp, H_Content_Encoding, NULL);

	/* It can't be both */
	assert(bo->is_gzip == 0 || bo->is_gunzip == 0);

	/* We won't gunzip unless it is gzip'ed */
	if (bo->do_gunzip && !bo->is_gzip)
		bo->do_gunzip = 0;

	/* If we do gunzip, remove the C-E header */
	if (bo->do_gunzip)
		http_Unset(bo->beresp, H_Content_Encoding);

	/* We wont gzip unless it is ungziped */
	if (bo->do_gzip && !bo->is_gunzip)
		bo->do_gzip = 0;

	/* If we do gzip, add the C-E header */
	if (bo->do_gzip)
		http_SetHeader(bo->beresp, "Content-Encoding: gzip");

	/* But we can't do both at the same time */
	assert(bo->do_gzip == 0 || bo->do_gunzip == 0);

	/* ESI takes precedence and handles gzip/gunzip itself */
	if (bo->do_esi)
		bo->vfp = &vfp_esi;
	else if (bo->do_gunzip)
		bo->vfp = &vfp_gunzip;
	else if (bo->do_gzip)
		bo->vfp = &vfp_gzip;
	else if (bo->is_gzip)
		bo->vfp = &vfp_testgzip;

	if (bo->fetch_objcore->flags & OC_F_PRIVATE)
		AN(bo->uncacheable);

	/* No reason to try streaming a non-existing body */
	if (bo->htc.body_status == BS_NONE)
		bo->do_stream = 0;

	l = 0;

	/* Create Vary instructions */
	if (!(bo->fetch_objcore->flags & OC_F_PRIVATE)) {
		varyl = VRY_Create(bo, &vary);
		if (varyl > 0) {
			AN(vary);
			assert(varyl == VSB_len(vary));
			l += varyl;
		} else if (varyl < 0) {
			/*
			 * Vary parse error
			 * Complain about it, and make this a pass.
			 */
			VSLb(bo->vsl, SLT_Error,
			    "Illegal 'Vary' header from backend, "
			    "making this a pass.");
			bo->uncacheable = 1;
			AZ(vary);
		} else
			/* No vary */
			AZ(vary);
	}

	l += http_EstimateWS(bo->beresp,
	    bo->uncacheable ? HTTPH_R_PASS : HTTPH_A_INS, &nhttp);

	if (bo->uncacheable)
		bo->fetch_objcore->flags |= OC_F_PASS;

	if (bo->exp.ttl < cache_param->shortlived || bo->uncacheable == 1)
		bo->storage_hint = TRANSIENT_STORAGE;

	AZ(bo->stats);
	bo->stats = &wrk->stats;
	AN(bo->fetch_objcore);
	obj = STV_NewObject(bo, bo->storage_hint, l, nhttp);
	if (obj == NULL) {
		/*
		 * Try to salvage the transaction by allocating a
		 * shortlived object on Transient storage.
		 */
		if (bo->exp.ttl > cache_param->shortlived)
			bo->exp.ttl = cache_param->shortlived;
		bo->exp.grace = 0.0;
		bo->exp.keep = 0.0;
		obj = STV_NewObject(bo, TRANSIENT_STORAGE, l, nhttp);
	}
	bo->stats = NULL;
	if (obj == NULL) {
		(void)VFP_Error(bo, "Could not get storage");
		VDI_CloseFd(&bo->vbc);
		return (F_STP_DONE);
	}
	CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC);

	bo->storage_hint = NULL;

	AZ(bo->fetch_obj);
	bo->fetch_obj = obj;

	if (bo->do_gzip || (bo->is_gzip && !bo->do_gunzip))
		obj->gziped = 1;

	if (vary != NULL) {
		obj->vary = (void *)WS_Copy(obj->http->ws,
		    VSB_data(vary), varyl);
		AN(obj->vary);
		VRY_Validate(obj->vary);
		VSB_delete(vary);
	}

	obj->vxid = bo->vsl->wid;
	obj->response = bo->err_code;
	WS_Assert(bo->ws_o);

	/* Filter into object */
	hp = bo->beresp;
	hp2 = obj->http;

	hp2->logtag = HTTP_Obj;
	http_FilterResp(hp, hp2, bo->uncacheable ? HTTPH_R_PASS : HTTPH_A_INS);
	http_CopyHome(hp2);

	if (http_GetHdr(hp, H_Last_Modified, &b))
		obj->last_modified = VTIM_parse(b);
	else
		obj->last_modified = floor(bo->exp.entered);

	assert(WRW_IsReleased(wrk));

	/*
	 * Ready to fetch the body
	 */

	assert(bo->refcount >= 1);

	if (!(bo->fetch_obj->objcore->flags & OC_F_PRIVATE)) {
		EXP_Insert(obj);
		AN(obj->objcore->ban);
	}

	AZ(bo->ws_o->overflow);
	if (bo->do_stream)
		HSH_Unbusy(&wrk->stats, obj->objcore);

	if (bo->vfp == NULL)
		bo->vfp = &VFP_nop;

	assert(bo->state == BOS_REQ_DONE);
	VBO_setstate(bo, BOS_FETCHING);

	V1F_fetch_body(wrk, bo);
	if (!bo->do_stream)
		HSH_Unbusy(&wrk->stats, obj->objcore);
	HSH_Complete(obj->objcore);

	assert(bo->refcount >= 1);

	if (bo->state != BOS_FAILED)
		VBO_setstate(bo, BOS_FINISHED);

VSLb(bo->vsl, SLT_Debug, "YYY REF %d %d", bo->refcount, bo->fetch_obj->objcore->refcnt);
	return (F_STP_DONE);
}
Esempio n. 29
0
static int
cnt_miss(struct sess *sp)
{
    struct worker *wrk;

    CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
    wrk = sp->wrk;
    CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
    CHECK_OBJ_NOTNULL(sp->vcl, VCL_CONF_MAGIC);

    AZ(wrk->obj);
    AN(wrk->objcore);
    CHECK_OBJ_NOTNULL(wrk->busyobj, BUSYOBJ_MAGIC);
    WS_Reset(wrk->ws, NULL);
    wrk->busyobj = VBO_GetBusyObj(wrk);
    http_Setup(wrk->busyobj->bereq, wrk->ws);
    http_FilterHeader(sp, HTTPH_R_FETCH);
    http_ForceGet(wrk->busyobj->bereq);
    if (cache_param->http_gzip_support) {
        /*
         * We always ask the backend for gzip, even if the
         * client doesn't grok it.  We will uncompress for
         * the minority of clients which don't.
         */
        http_Unset(wrk->busyobj->bereq, H_Accept_Encoding);
        http_SetHeader(wrk, sp->vsl_id, wrk->busyobj->bereq,
                       "Accept-Encoding: gzip");
    }
    wrk->connect_timeout = 0;
    wrk->first_byte_timeout = 0;
    wrk->between_bytes_timeout = 0;

    VCL_miss_method(sp);

    switch(sp->handling) {
    case VCL_RET_ERROR:
        AZ(HSH_Deref(wrk, wrk->objcore, NULL));
        wrk->objcore = NULL;
        http_Setup(wrk->busyobj->bereq, NULL);
        VBO_DerefBusyObj(wrk, &wrk->busyobj);
        sp->step = STP_ERROR;
        return (0);
    case VCL_RET_PASS:
        AZ(HSH_Deref(wrk, wrk->objcore, NULL));
        wrk->objcore = NULL;
        VBO_DerefBusyObj(wrk, &wrk->busyobj);
        sp->step = STP_PASS;
        return (0);
    case VCL_RET_FETCH:
        CHECK_OBJ_NOTNULL(wrk->busyobj, BUSYOBJ_MAGIC);
        sp->step = STP_FETCH;
        return (0);
    case VCL_RET_RESTART:
        AZ(HSH_Deref(wrk, wrk->objcore, NULL));
        wrk->objcore = NULL;
        VBO_DerefBusyObj(wrk, &wrk->busyobj);
        INCOMPL();
    default:
        WRONG("Illegal action in vcl_miss{}");
    }
}
Esempio n. 30
0
void
PipeSession(struct sess *sp)
{
	struct vbc *vc;
	struct worker *w;
	struct pollfd fds[2];
	int i;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(sp->wrk, WORKER_MAGIC);
	w = sp->wrk;

	sp->vbc = VDI_GetFd(NULL, sp);
	if (sp->vbc == NULL)
		return;
	vc = sp->vbc;
	(void)VTCP_blocking(vc->fd);

	WRW_Reserve(w, &vc->fd);
	sp->wrk->acct_tmp.hdrbytes +=
	    http_Write(w, sp->vsl_id, sp->wrk->bereq, 0);

	if (sp->htc->pipeline.b != NULL)
		sp->wrk->acct_tmp.bodybytes +=
		    WRW_Write(w, sp->htc->pipeline.b, Tlen(sp->htc->pipeline));

	i = WRW_FlushRelease(w);

	if (i) {
		SES_Close(sp, "pipe");
		VDI_CloseFd(sp);
		return;
	}

	sp->t_resp = VTIM_real();

	memset(fds, 0, sizeof fds);

	// XXX: not yet (void)VTCP_linger(vc->fd, 0);
	fds[0].fd = vc->fd;
	fds[0].events = POLLIN | POLLERR;

	// XXX: not yet (void)VTCP_linger(sp->fd, 0);
	fds[1].fd = sp->fd;
	fds[1].events = POLLIN | POLLERR;

	while (fds[0].fd > -1 || fds[1].fd > -1) {
		fds[0].revents = 0;
		fds[1].revents = 0;
		i = poll(fds, 2, params->pipe_timeout * 1000);
		if (i < 1)
			break;
		if (fds[0].revents && rdf(vc->fd, sp->fd)) {
			if (fds[1].fd == -1)
				break;
			(void)shutdown(vc->fd, SHUT_RD);
			(void)shutdown(sp->fd, SHUT_WR);
			fds[0].events = 0;
			fds[0].fd = -1;
		}
		if (fds[1].revents && rdf(sp->fd, vc->fd)) {
			if (fds[0].fd == -1)
				break;
			(void)shutdown(sp->fd, SHUT_RD);
			(void)shutdown(vc->fd, SHUT_WR);
			fds[1].events = 0;
			fds[1].fd = -1;
		}
	}
	SES_Close(sp, "pipe");
	VDI_CloseFd(sp);
}