Example #1
0
struct vep_state *
VEP_Init(struct vfp_ctx *vc, const struct http *req, vep_callback_t *cb,
    void *cb_priv)
{
	struct vep_state *vep;

	CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC);
	CHECK_OBJ_NOTNULL(req, HTTP_MAGIC);
	vep = WS_Alloc(vc->resp->ws, sizeof *vep);
	AN(vep);

	INIT_OBJ(vep, VEP_MAGIC);
	vep->url = req->hd[HTTP_HDR_URL].b;
	vep->vc = vc;
	vep->vsb = VSB_new_auto();
	AN(vep->vsb);

	if (cb != NULL) {
		vep->dogzip = 1;
		/* XXX */
		VSB_printf(vep->vsb, "%c", VEC_GZ);
		vep->cb = cb;
		vep->cb_priv = cb_priv;
	} else {
		vep->cb = vep_default_cb;
		vep->cb_priv = &vep->cb_x;
	}

	vep->state = VEP_START;
	vep->crc = crc32(0L, Z_NULL, 0);
	vep->crcp = crc32(0L, Z_NULL, 0);

	vep->startup = 1;
	return (vep);
}
Example #2
0
static void
dyn_dir_init(VRT_CTX, struct xyzzy_debug_dyn *dyn,
     VCL_STRING addr, VCL_STRING port, VCL_PROBE probe)
{
	struct addrinfo hints, *res = NULL;
	struct suckaddr *sa;
	VCL_BACKEND dir, dir2;
	struct vrt_backend vrt;

	CHECK_OBJ_NOTNULL(dyn, VMOD_DEBUG_DYN_MAGIC);
	XXXAN(addr);
	XXXAN(port);

	INIT_OBJ(&vrt, VRT_BACKEND_MAGIC);
	vrt.port = port;
	vrt.vcl_name = dyn->vcl_name;
	vrt.hosthdr = addr;
	vrt.probe = probe;

	memset(&hints, 0, sizeof(hints));
	hints.ai_family = AF_UNSPEC;
	hints.ai_socktype = SOCK_STREAM;
	AZ(getaddrinfo(addr, port, &hints, &res));
	XXXAZ(res->ai_next);

	sa = VSA_Malloc(res->ai_addr, res->ai_addrlen);
	AN(sa);
	if (VSA_Get_Proto(sa) == AF_INET) {
		vrt.ipv4_addr = addr;
		vrt.ipv4_suckaddr = sa;
	} else if (VSA_Get_Proto(sa) == AF_INET6) {
		vrt.ipv6_addr = addr;
		vrt.ipv6_suckaddr = sa;
	} else
		WRONG("Wrong proto family");

	freeaddrinfo(res);

	dir = VRT_new_backend(ctx, &vrt);
	AN(dir);

	/*
	 * NB: A real dynamic backend should not replace the previous
	 * instance if the new one is identical.  We do it here because
	 * the d* tests requires a replacement.
	 */
	AZ(pthread_mutex_lock(&dyn->mtx));
	dir2 = dyn->dir;
	dyn->dir = dir;
	AZ(pthread_mutex_unlock(&dyn->mtx));

	if (dir2 != NULL)
		VRT_delete_backend(ctx, &dir2);

	free(sa);
}
Example #3
0
char *
mgt_VccCompile(struct cli *cli, const char *vclname, const char *vclsrc,
    int C_flag)
{
	struct vcc_priv vp;
	struct vsb *sb;
	unsigned status;

	AN(cli);

	sb = VSB_new_auto();
	XXXAN(sb);

	INIT_OBJ(&vp, VCC_PRIV_MAGIC);
	vp.src = vclsrc;

	VSB_printf(sb, "./vcl_%s.c", vclname);
	AZ(VSB_finish(sb));
	vp.srcfile = strdup(VSB_data(sb));
	AN(vp.srcfile);
	VSB_clear(sb);

	VSB_printf(sb, "./vcl_%s.so", vclname);
	AZ(VSB_finish(sb));
	vp.libfile = strdup(VSB_data(sb));
	AN(vp.srcfile);
	VSB_clear(sb);

	status = mgt_vcc_compile(&vp, sb, C_flag);

	AZ(VSB_finish(sb));
	if (VSB_len(sb) > 0)
		VCLI_Out(cli, "%s", VSB_data(sb));
	VSB_delete(sb);

	(void)unlink(vp.srcfile);
	free(vp.srcfile);

	if (status || C_flag) {
		(void)unlink(vp.libfile);
		free(vp.libfile);
		if (!C_flag) {
			VCLI_Out(cli, "VCL compilation failed");
			VCLI_SetResult(cli, CLIS_PARAM);
		}
		return(NULL);
	}

	VCLI_Out(cli, "VCL compiled.\n");

	return (vp.libfile);
}
Example #4
0
vws_init(struct waiter *w)
{
	struct vws *vws;

	CHECK_OBJ_NOTNULL(w, WAITER_MAGIC);
	vws = w->priv;
	INIT_OBJ(vws, VWS_MAGIC);
	vws->waiter = w;
	vws->dport = port_create();
	assert(vws->dport >= 0);

	AZ(pthread_create(&vws->thread, NULL, vws_thread, vws));
}
static void*
cooldown_thread(void *priv)
{
    struct vrt_ctx ctx;

    AN(priv);
    INIT_OBJ(&ctx, VRT_CTX_MAGIC);
    ctx.vcl = (struct vcl*)priv;

    VTIM_sleep(vcl_release_delay);
    VRT_rel_vcl(&ctx);
    return (NULL);
}
Example #6
0
static void *
wrk_bgthread(void *arg)
{
	struct bgthread *bt;
	struct worker wrk;

	CAST_OBJ_NOTNULL(bt, arg, BGTHREAD_MAGIC);
	THR_SetName(bt->name);
	INIT_OBJ(&wrk, WORKER_MAGIC);

	(void)bt->func(&wrk, bt->priv);

	WRONG("BgThread terminated");

	NEEDLESS_RETURN(NULL);
}
Example #7
0
void
h2h_decode_init(const struct h2_sess *h2, struct h2h_decode *d)
{

	CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC);
	CHECK_OBJ_NOTNULL(h2->new_req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(h2->new_req->http, HTTP_MAGIC);
	AN(d);
	INIT_OBJ(d, H2H_DECODE_MAGIC);
	VHD_Init(d->vhd);
	d->out_l = WS_Reserve(h2->new_req->http->ws, 0);
	assert(d->out_l > 0);	/* Can't do any work without any buffer
				   space. Require non-zero size. */
	d->out = h2->new_req->http->ws->f;
	d->reset = d->out;
}
vwp_init(struct waiter *w)
{
	struct vwp *vwp;

	CHECK_OBJ_NOTNULL(w, WAITER_MAGIC);
	vwp = w->priv;
	INIT_OBJ(vwp, VWP_MAGIC);
	vwp->waiter = w;
	AZ(pipe(vwp->pipes));
	// XXX: set write pipe non-blocking

	vwp->hpoll = 1;
	vwp_extend_pollspace(vwp);
	vwp->pollfd[0].fd = vwp->pipes[0];
	vwp->pollfd[0].events = POLLIN;
	AZ(pthread_create(&vwp->thread, NULL, vwp_main, vwp));
}
Example #9
0
static void
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
    void *specific, unsigned method, vcl_func_f *func)
{
	uintptr_t aws;
	struct vrt_ctx ctx;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	INIT_OBJ(&ctx, VRT_CTX_MAGIC);
	if (req != NULL) {
		CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
		CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
		CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
		VCL_Req2Ctx(&ctx, req);
	}
	if (bo != NULL) {
		if (req)
			assert(method == VCL_MET_PIPE);
		CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
		CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC);
		VCL_Bo2Ctx(&ctx, bo);
	}
	assert(ctx.now != 0);
	ctx.syntax = ctx.vcl->conf->syntax;
	ctx.specific = specific;
	ctx.method = method;
	wrk->handling = 0;
	ctx.handling = &wrk->handling;
	aws = WS_Snapshot(wrk->aws);
	wrk->cur_method = method;
	wrk->seen_methods |= method;
	AN(ctx.vsl);
	VSLb(ctx.vsl, SLT_VCL_call, "%s", VCL_Method_Name(method));
	func(&ctx);
	VSLb(ctx.vsl, SLT_VCL_return, "%s", VCL_Return_Name(wrk->handling));
	wrk->cur_method |= 1;		// Magic marker
	if (wrk->handling == VCL_RET_FAIL)
		wrk->stats->vcl_fail++;

	/*
	 * VCL/Vmods are not allowed to make permanent allocations from
	 * wrk->aws, but they can reserve and return from it.
	 */
	assert(aws == WS_Snapshot(wrk->aws));
}
Example #10
0
static void*
cooldown_thread(void *priv)
{
	struct vrt_ctx ctx;
	struct priv_vcl *priv_vcl;

	CAST_OBJ_NOTNULL(priv_vcl, priv, PRIV_VCL_MAGIC);
	AN(priv_vcl->vcl);
	AN(priv_vcl->vclref);

	INIT_OBJ(&ctx, VRT_CTX_MAGIC);
	ctx.vcl = priv_vcl->vcl;

	VTIM_sleep(vcl_release_delay);
	VRT_rel_vcl(&ctx, &priv_vcl->vclref);
	priv_vcl->vcl = NULL;
	return (NULL);
}
void
SES_NewPool(struct pool *pp, unsigned pool_no)
{
	char nb[8];

	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
	bprintf(nb, "req%u", pool_no);
	pp->mpl_req = MPL_New(nb, &cache_param->req_pool,
	    &cache_param->workspace_client);
	bprintf(nb, "sess%u", pool_no);
	pp->mpl_sess = MPL_New(nb, &cache_param->sess_pool,
	    &cache_param->workspace_session);

	INIT_OBJ(&pp->wf, WAITFOR_MAGIC);
	pp->wf.func = ses_handle;
	pp->wf.tmo = &cache_param->timeout_idle;
	pp->waiter = Waiter_New();
}
Example #12
0
static int
dyn_uds_init(VRT_CTX, struct xyzzy_debug_dyn_uds *uds, VCL_STRING path)
{
	VCL_BACKEND dir, dir2;
	struct vrt_backend vrt;
	struct stat st;

	if (path == NULL) {
		VRT_fail(ctx, "path is NULL");
		return (-1);
	}
	if (*path != '/') {
		VRT_fail(ctx, "path must be an absolute path: %s", path);
		return (-1);
	}
	errno = 0;
	if (stat(path, &st) != 0) {
		VRT_fail(ctx, "Cannot stat path %s: %s", path, strerror(errno));
		return (-1);
	}
	if (!S_ISSOCK(st.st_mode)) {
		VRT_fail(ctx, "%s is not a socket", path);
		return (-1);
	}

	INIT_OBJ(&vrt, VRT_BACKEND_MAGIC);
	vrt.path = path;
	vrt.vcl_name = uds->vcl_name;
	vrt.hosthdr = "localhost";
	vrt.ipv4_suckaddr = NULL;
	vrt.ipv6_suckaddr = NULL;

	if ((dir = VRT_new_backend(ctx, &vrt)) == NULL)
		return (-1);

	AZ(pthread_mutex_lock(&uds->mtx));
	dir2 = uds->dir;
	uds->dir = dir;
	AZ(pthread_mutex_unlock(&uds->mtx));

	if (dir2 != NULL)
		VRT_delete_backend(ctx, &dir2);
	return (0);
}
Example #13
0
void
WS_Init(struct ws *ws, const char *id, void *space, unsigned len)
{

	DSL(DBG_WORKSPACE, 0,
	    "WS_Init(%p, \"%s\", %p, %u)", ws, id, space, len);
	assert(space != NULL);
	INIT_OBJ(ws, WS_MAGIC);
	ws->s = space;
	assert(PAOK(space));
	len = PRNDDN(len - 1);
	ws->e = ws->s + len;
	*ws->e = 0x15;
	ws->f = ws->s;
	assert(id[0] & 0x40);
	assert(strlen(id) < sizeof ws->id);
	strcpy(ws->id, id);
	WS_Assert(ws);
}
void
V1L_Reserve(struct worker *wrk, struct ws *ws, int *fd, struct vsl_log *vsl,
    double t0)
{
	struct v1l *v1l;
	unsigned u;
	void *res;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	AZ(wrk->v1l);

	if (WS_Overflowed(ws))
		return;
	res = WS_Snapshot(ws);
	v1l = WS_Alloc(ws, sizeof *v1l);
	if (v1l == NULL)
		return;
	INIT_OBJ(v1l, V1L_MAGIC);

	v1l->ws = ws;
	v1l->res = res;

	u = WS_Reserve(ws, 0);
	u = PRNDDN(u);
	u /= sizeof(struct iovec);
	if (u == 0) {
		WS_Release(ws, 0);
		WS_MarkOverflow(ws);
		return;
	} else if (u > IOV_MAX)
		u = IOV_MAX;
	v1l->iov = (void*)PRNDUP(ws->f);
	v1l->siov = u;
	v1l->ciov = u;
	v1l->werr = 0;
	v1l->liov = 0;
	v1l->niov = 0;
	v1l->wfd = fd;
	v1l->t0 = t0;
	v1l->vsl = vsl;
	wrk->v1l = v1l;
}
Example #15
0
void
VBE_fill_director(struct backend *be)
{
	struct director *d;

	CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC);

	INIT_OBJ(be->director, DIRECTOR_MAGIC);
	d = be->director;
	d->priv = be;
	d->name = "backend";
	d->vcl_name = be->vcl_name;
	d->http1pipe = vbe_dir_http1pipe;
	d->healthy = vbe_dir_healthy;
	d->gethdrs = vbe_dir_gethdrs;
	d->getbody = vbe_dir_getbody;
	d->getip = vbe_dir_getip;
	d->finish = vbe_dir_finish;
	d->panic = vbe_panic;
}
vwe_init(struct waiter *w)
{
	struct vwe *vwe;
	struct epoll_event ee;

	CHECK_OBJ_NOTNULL(w, WAITER_MAGIC);
	vwe = w->priv;
	INIT_OBJ(vwe, VWE_MAGIC);
	vwe->waiter = w;

	vwe->epfd = epoll_create(1);
	assert(vwe->epfd >= 0);
	Lck_New(&vwe->mtx, lck_waiter);
	AZ(pipe(vwe->pipe));
	ee.events = EPOLLIN | EPOLLRDHUP;
	ee.data.ptr = vwe;
	AZ(epoll_ctl(vwe->epfd, EPOLL_CTL_ADD, vwe->pipe[0], &ee));

	AZ(pthread_create(&vwe->thread, NULL, vwe_thread, vwe));
}
Example #17
0
void
WRK_Thread(struct pool *qp, size_t stacksize, unsigned thread_workspace)
{
	struct worker *w, ww;
	unsigned char ws[thread_workspace];
	uintptr_t u;

	AN(qp);
	AN(stacksize);
	AN(thread_workspace);

	THR_SetName("cache-worker");
	w = &ww;
	INIT_OBJ(w, WORKER_MAGIC);
	w->lastused = NAN;
	AZ(pthread_cond_init(&w->cond, NULL));

	WS_Init(w->aws, "wrk", ws, thread_workspace);

	u = getpagesize();
	AN(u);
	u -= 1U;
	w->stack_start = (((uintptr_t)&qp) + u) & ~u;

	/* XXX: assuming stack grows down. */
	w->stack_end = w->stack_start - stacksize;

	VSL(SLT_WorkThread, 0, "%p start", w);

	Pool_Work_Thread(qp, w);
	AZ(w->pool);

	VSL(SLT_WorkThread, 0, "%p end", w);
	if (w->vcl != NULL)
		VCL_Rel(&w->vcl);
	AZ(pthread_cond_destroy(&w->cond));
	if (w->nbo != NULL)
		VBO_Free(&w->nbo);
	HSH_Cleanup(w);
	Pool_Sumstat(w);
}
Example #18
0
void
VDP_push(struct req *req, vdp_bytes *func, void *priv, int bottom)
{
	struct vdp_entry *vdp;

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	AN(func);

	vdp = WS_Alloc(req->ws, sizeof *vdp);
	AN(vdp);
	INIT_OBJ(vdp, VDP_ENTRY_MAGIC);
	vdp->func = func;
	vdp->priv = priv;
	if (bottom)
		VTAILQ_INSERT_TAIL(&req->vdp, vdp, list);
	else
		VTAILQ_INSERT_HEAD(&req->vdp, vdp, list);
	req->vdp_nxt = VTAILQ_FIRST(&req->vdp);

	AZ(vdp->func(req, VDP_INIT, &vdp->priv, NULL, 0));
}
Example #19
0
void
SES_Wait(struct sess *sp, const struct transport *xp)
{
	struct pool *pp;
	struct waited *wp;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(xp, TRANSPORT_MAGIC);
	pp = sp->pool;
	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
	assert(sp->fd > 0);
	/*
	 * XXX: waiter_epoll prevents us from zeroing the struct because
	 * XXX: it keeps state across calls.
	 */
	if (VTCP_nonblocking(sp->fd)) {
		SES_Delete(sp, SC_REM_CLOSE, NAN);
		return;
	}

	/*
	 * put struct waited on the workspace
	 */
	if (WS_Reserve(sp->ws, sizeof(struct waited))
	    < sizeof(struct waited)) {
		SES_Delete(sp, SC_OVERLOAD, NAN);
		return;
	}
	wp = (void*)sp->ws->f;
	INIT_OBJ(wp, WAITED_MAGIC);
	wp->fd = sp->fd;
	wp->priv1 = sp;
	wp->priv2 = (uintptr_t)xp;
	wp->idle = sp->t_idle;
	wp->func = ses_handle;
	wp->tmo = &cache_param->timeout_idle;
	if (Wait_Enter(pp->waiter, wp))
		SES_Delete(sp, SC_PIPE_OVERFLOW, NAN);
}
Example #20
0
struct waiter *
Waiter_New(void)
{
	struct waiter *w;

	AN(waiter);
	AN(waiter->name);
	AN(waiter->init);
	AN(waiter->enter);
	AN(waiter->fini);

	w = calloc(1, sizeof (struct waiter) + waiter->size);
	AN(w);
	INIT_OBJ(w, WAITER_MAGIC);
	w->priv = (void*)(w + 1);
	w->impl = waiter;
	VTAILQ_INIT(&w->waithead);
	w->heap = binheap_new(w, waited_cmp, waited_update);

	waiter->init(w);

	return (w);
}
static void
ved_include(struct req *preq, const char *src, const char *host,
    struct ecx *ecx)
{
	struct worker *wrk;
	struct req *req;
	enum req_fsm_nxt s;
	struct transport xp;

	CHECK_OBJ_NOTNULL(preq, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC);
	wrk = preq->wrk;

	if (preq->esi_level >= cache_param->max_esi_depth)
		return;

	req = Req_New(wrk, preq->sp);
	req->req_body_status = REQ_BODY_NONE;
	AZ(req->vsl->wid);
	req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER);
	VSLb(req->vsl, SLT_Begin, "req %u esi", VXID(preq->vsl->wid));
	VSLb(preq->vsl, SLT_Link, "req %u esi", VXID(req->vsl->wid));
	req->esi_level = preq->esi_level + 1;

	if (preq->esi_level == 0)
		assert(preq->top == preq);
	else
		CHECK_OBJ_NOTNULL(preq->top, REQ_MAGIC);

	req->top = preq->top;

	HTTP_Copy(req->http0, preq->http0);

	req->http0->ws = req->ws;
	req->http0->vsl = req->vsl;
	req->http0->logtag = SLT_ReqMethod;
	req->http0->conds = 0;

	http_SetH(req->http0, HTTP_HDR_URL, src);
	if (host != NULL && *host != '\0')  {
		http_Unset(req->http0, H_Host);
		http_SetHeader(req->http0, host);
	}

	http_ForceField(req->http0, HTTP_HDR_METHOD, "GET");
	http_ForceField(req->http0, HTTP_HDR_PROTO, "HTTP/1.1");

	/* Don't allow conditionalss, we can't use a 304 */
	http_Unset(req->http0, H_If_Modified_Since);
	http_Unset(req->http0, H_If_None_Match);

	/* Don't allow Range */
	http_Unset(req->http0, H_Range);

	/* Set Accept-Encoding according to what we want */
	http_Unset(req->http0, H_Accept_Encoding);
	if (ecx->isgzip)
		http_ForceHeader(req->http0, H_Accept_Encoding, "gzip");

	/* Client content already taken care of */
	http_Unset(req->http0, H_Content_Length);

	/* Reset request to status before we started messing with it */
	HTTP_Copy(req->http, req->http0);

	req->vcl = preq->vcl;
	preq->vcl = NULL;
	req->wrk = preq->wrk;

	/*
	 * XXX: We should decide if we should cache the director
	 * XXX: or not (for session/backend coupling).  Until then
	 * XXX: make sure we don't trip up the check in vcl_recv.
	 */
	req->req_step = R_STP_RECV;
	req->t_req = preq->t_req;
	assert(isnan(req->t_first));
	assert(isnan(req->t_prev));

	INIT_OBJ(&xp, TRANSPORT_MAGIC);
	xp.deliver = VED_Deliver;
	req->transport = &xp;
	req->transport_priv = ecx;

	THR_SetRequest(req);

	VSLb_ts_req(req, "Start", W_TIM_real(wrk));

	req->ws_req = WS_Snapshot(req->ws);

	while (1) {
		req->wrk = wrk;
		s = CNT_Request(wrk, req);
		if (s == REQ_FSM_DONE)
			break;
		DSL(DBG_WAITINGLIST, req->vsl->wid,
		    "loop waiting for ESI (%d)", (int)s);
		assert(s == REQ_FSM_DISEMBARK);
		AZ(req->wrk);
		(void)usleep(10000);
	}

	VRTPRIV_dynamic_kill(req->sp->privs, (uintptr_t)req);
	CNT_AcctLogCharge(wrk->stats, req);
	VSL_End(req->vsl);

	preq->vcl = req->vcl;
	req->vcl = NULL;

	req->wrk = NULL;

	THR_SetRequest(preq);
	Req_Release(req);
}
Example #22
0
char *
mgt_VccCompile(struct cli *cli, struct vclprog *vcl, const char *vclname,
    const char *vclsrc, const char *vclsrcfile, int C_flag)
{
	struct vcc_priv vp;
	struct vsb *sb;
	unsigned status;
	char buf[1024];
	FILE *fcs;
	char **av;
	int ac;

	AN(cli);

	sb = VSB_new_auto();
	XXXAN(sb);

	INIT_OBJ(&vp, VCC_PRIV_MAGIC);
	vp.vclsrc = vclsrc;
	vp.vclsrcfile = vclsrcfile;

	/*
	 * The subdirectory must have a unique name to 100% certain evade
	 * the refcounting semantics of dlopen(3).
	 *
	 * Bad implementations of dlopen(3) think the shlib you are opening
	 * is the same, if the filename is the same as one already opened.
	 *
	 * Sensible implementations do a stat(2) and requires st_ino and
	 * st_dev to also match.
	 *
	 * A correct implementation would run on filesystems which tickle
	 * st_gen, and also insist that be the identical, before declaring
	 * a match.
	 *
	 * Since no correct implementations are known to exist, we are subject
	 * to really interesting races if you do something like:
	 *
	 *	(running on 'boot' vcl)
	 *	vcl.load foo /foo.vcl
	 *	vcl.use foo
	 *	few/slow requests
	 *	vcl.use boot
	 *	vcl.discard foo
	 *	vcl.load foo /foo.vcl	// dlopen(3) says "same-same"
	 *	vcl.use foo
	 *
	 * Because discard of the first 'foo' lingers on non-zero reference
	 * count, and when it finally runs, it trashes the second 'foo' because
	 * dlopen(3) decided they were really the same thing.
	 *
	 * The Best way to reproduce this is to have regexps in the VCL.
	 */
	VSB_printf(sb, "vcl_%s.%.9f", vclname, VTIM_real());
	AZ(VSB_finish(sb));
	vp.dir = strdup(VSB_data(sb));
	AN(vp.dir);

	if (VJ_make_subdir(vp.dir, "VCL", cli->sb)) {
		free(vp.dir);
		VSB_destroy(&sb);
		VCLI_Out(cli, "VCL compilation failed");
		VCLI_SetResult(cli, CLIS_PARAM);
		return (NULL);
	}

	VSB_clear(sb);
	VSB_printf(sb, "%s/%s", vp.dir, VGC_SRC);
	AZ(VSB_finish(sb));
	vp.csrcfile = strdup(VSB_data(sb));
	AN(vp.csrcfile);
	VSB_clear(sb);

	VSB_printf(sb, "%s/%s", vp.dir, VGC_LIB);
	AZ(VSB_finish(sb));
	vp.libfile = strdup(VSB_data(sb));
	AN(vp.csrcfile);
	VSB_clear(sb);

	status = mgt_vcc_compile(&vp, sb, C_flag);

	AZ(VSB_finish(sb));
	if (VSB_len(sb) > 0)
		VCLI_Out(cli, "%s", VSB_data(sb));
	VSB_destroy(&sb);

	if (status || C_flag) {
		(void)unlink(vp.csrcfile);
		free(vp.csrcfile);
		(void)unlink(vp.libfile);
		free(vp.libfile);
		(void)rmdir(vp.dir);
		free(vp.dir);
		if (status) {
			VCLI_Out(cli, "VCL compilation failed");
			VCLI_SetResult(cli, CLIS_PARAM);
		}
		return (NULL);
	}

	fcs = fopen(vp.csrcfile, "r");
	AN(fcs);
	while (1) {
		AN(fgets(buf, sizeof buf, fcs));
		if (memcmp(buf, VCC_INFO_PREFIX, strlen(VCC_INFO_PREFIX)))
			break;
		av = VAV_Parse(buf, &ac, 0);
		AN(av);
		AZ(av[0]);
		AZ(strcmp(av[1], "/*"));
		AZ(strcmp(av[ac-1], "*/"));
		if (!strcmp(av[3], "VCL"))
			mgt_vcl_depends(vcl, av[4]);
		else if (!strcmp(av[3], "VMOD"))
			mgt_vcl_vmod(vcl, av[4], av[5]);
		else
			WRONG("Wrong VCCINFO");
		VAV_Free(av);
	}
	AZ(fclose(fcs));

	(void)unlink(vp.csrcfile);
	free(vp.csrcfile);

	free(vp.dir);

	VCLI_Out(cli, "VCL compiled.\n");

	return (vp.libfile);
}
Example #23
0
static struct vbc *
vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo)
{
	struct vbc *vc;
	double tmod;
	char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
	char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
	AN(bp->vsc);

	if (!VBE_Healthy(bp, NULL)) {
		// XXX: per backend stats ?
		VSC_C_main->backend_unhealthy++;
		return (NULL);
	}

	if (bp->max_connections > 0 && bp->n_conn >= bp->max_connections) {
		// XXX: per backend stats ?
		VSC_C_main->backend_busy++;
		return (NULL);
	}

	AZ(bo->htc);
	bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc);
	if (bo->htc == NULL)
		/* XXX: counter ? */
		return (NULL);
	bo->htc->doclose = SC_NULL;

	FIND_TMO(connect_timeout, tmod, bo, bp);
	vc = VBT_Get(bp->tcp_pool, tmod, bp, wrk);
	if (vc == NULL) {
		// XXX: Per backend stats ?
		VSC_C_main->backend_fail++;
		bo->htc = NULL;
		return (NULL);
	}

	assert(vc->fd >= 0);
	AN(vc->addr);

	Lck_Lock(&bp->mtx);
	bp->n_conn++;
	bp->vsc->conn++;
	bp->vsc->req++;
	Lck_Unlock(&bp->mtx);

	if (bp->proxy_header != 0)
		VPX_Send_Proxy(vc->fd, bp->proxy_header, bo->sp);

	VTCP_myname(vc->fd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
	VTCP_hisname(vc->fd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
	VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s",
	    vc->fd, bp->display_name, abuf2, pbuf2, abuf1, pbuf1);

	INIT_OBJ(bo->htc, HTTP_CONN_MAGIC);
	bo->htc->priv = vc;
	bo->htc->fd = vc->fd;
	FIND_TMO(first_byte_timeout,
	    bo->htc->first_byte_timeout, bo, bp);
	FIND_TMO(between_bytes_timeout,
	    bo->htc->between_bytes_timeout, bo, bp);
	return (vc);
}
Example #24
0
static void
ved_stripgzip(struct req *req, const struct boc *boc)
{
	ssize_t l;
	char *p;
	uint32_t icrc;
	uint32_t ilen;
	uint8_t *dbits;
	struct ecx *ecx;
	struct ved_foo foo;

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
	CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC);

	INIT_OBJ(&foo, VED_FOO_MAGIC);
	foo.req = req;
	foo.preq = ecx->preq;
	memset(foo.tailbuf, 0xdd, sizeof foo.tailbuf);

	/* OA_GZIPBITS is not valid until BOS_FINISHED */
	if (boc != NULL)
		ObjWaitState(req->objcore, BOS_FINISHED);

	AN(ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED));

	/*
	 * This is the interesting case: Deliver all the deflate
	 * blocks, stripping the "LAST" bit of the last one and
	 * padding it, as necessary, to a byte boundary.
	 */

	p = ObjGetAttr(req->wrk, req->objcore, OA_GZIPBITS, &l);
	AN(p);
	assert(l == 32);
	foo.start = vbe64dec(p);
	foo.last = vbe64dec(p + 8);
	foo.stop = vbe64dec(p + 16);
	foo.olen = ObjGetLen(req->wrk, req->objcore);
	assert(foo.start > 0 && foo.start < foo.olen * 8);
	assert(foo.last > 0 && foo.last < foo.olen * 8);
	assert(foo.stop > 0 && foo.stop < foo.olen * 8);
	assert(foo.last >= foo.start);
	assert(foo.last < foo.stop);

	/* The start bit must be byte aligned. */
	AZ(foo.start & 7);

	dbits = WS_Alloc(req->ws, 8);
	AN(dbits);
	foo.dbits = dbits;
	(void)ObjIterate(req->wrk, req->objcore, &foo, ved_objiterate);
	/* XXX: error check ?? */
	(void)ved_bytes(req, foo.preq, VDP_FLUSH, NULL, 0);

	icrc = vle32dec(foo.tailbuf);
	ilen = vle32dec(foo.tailbuf + 4);

	ecx->crc = crc32_combine(ecx->crc, icrc, ilen);
	ecx->l_crc += ilen;
}
Example #25
0
struct req *
Req_New(const struct worker *wrk, struct sess *sp)
{
	struct pool *pp;
	struct req *req;
	uint16_t nhttp;
	unsigned sz, hl;
	char *p, *e;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->pool;
	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);

	req = MPL_Get(pp->mpl_req, &sz);
	AN(req);
	req->magic = REQ_MAGIC;
	req->sp = sp;

	e = (char*)req + sz;
	p = (char*)(req + 1);
	p = (void*)PRNDUP(p);
	assert(p < e);

	nhttp = (uint16_t)cache_param->http_max_hdr;
	hl = HTTP_estimate(nhttp);

	req->http = HTTP_create(p, nhttp, hl);
	p += hl;
	p = (void*)PRNDUP(p);
	assert(p < e);

	req->http0 = HTTP_create(p, nhttp, hl);
	p += hl;
	p = (void*)PRNDUP(p);
	assert(p < e);

	req->resp = HTTP_create(p, nhttp, hl);
	p += hl;
	p = (void*)PRNDUP(p);
	assert(p < e);

	sz = cache_param->vsl_buffer;
	VSL_Setup(req->vsl, p, sz);
	p += sz;
	p = (void*)PRNDUP(p);

	req->vfc = (void*)p;
	INIT_OBJ(req->vfc, VFP_CTX_MAGIC);
	p = (void*)PRNDUP(p + sizeof(*req->vfc));

	req->htc = (void*)p;
	p = (void*)PRNDUP(p + sizeof(*req->htc));

	req->vdc = (void*)p;
	INIT_OBJ(req->vdc, VDP_CTX_MAGIC);
	VTAILQ_INIT(&req->vdc->vdp);
	p = (void*)PRNDUP(p + sizeof(*req->vdc));

	req->htc = (void*)p;
	INIT_OBJ(req->htc, HTTP_CONN_MAGIC);
	p = (void*)PRNDUP(p + sizeof(*req->htc));

	assert(p < e);

	WS_Init(req->ws, "req", p, e - p);

	req->t_first = NAN;
	req->t_prev = NAN;
	req->t_req = NAN;

	req->topreq = req;

	return (req);
}