示例#1
0
static void
wslr(struct vsl_log *vsl, enum VSL_tag_e tag, int id, txt t)
{
	unsigned l, mlen;

	Tcheck(t);
	if (id == -1)
		id = vsl->wid;
	mlen = cache_param->shm_reclen;

	/* Truncate */
	l = Tlen(t);
	if (l > mlen) {
		l = mlen;
		t.e = t.b + l;
	}

	assert(vsl->wlp < vsl->wle);

	/* Wrap if necessary */
	if (VSL_END(vsl->wlp, l) >= vsl->wle)
		VSL_Flush(vsl, 1);
	assert(VSL_END(vsl->wlp, l) < vsl->wle);
	memcpy(VSL_DATA(vsl->wlp), t.b, l);
	vsl_hdr(tag, vsl->wlp, l, id);
	vsl->wlp = VSL_END(vsl->wlp, l);
	assert(vsl->wlp < vsl->wle);
	vsl->wlr++;
	if (cache_param->diag_bitmap & 0x10000)
		VSL_Flush(vsl, 0);
}
示例#2
0
static void
wsl(struct vsl_log *vsl, enum VSL_tag_e tag, int id, const char *fmt,
    va_list ap)
{
	char *p;
	unsigned n, mlen;
	txt t;

	AN(fmt);
	mlen = cache_param->shm_reclen;

	if (strchr(fmt, '%') == NULL) {
		t.b = TRUST_ME(fmt);
		t.e = strchr(t.b, '\0');
		wslr(vsl, tag, id, t);
	} else {
		assert(vsl->wlp < vsl->wle);

		/* Wrap if we cannot fit a full size record */
		if (VSL_END(vsl->wlp, mlen) >= vsl->wle)
			VSL_Flush(vsl, 1);

		p = VSL_DATA(vsl->wlp);
		n = vsnprintf(p, mlen, fmt, ap);
		if (n > mlen)
			n = mlen;	/* we truncate long fields */
		vsl_hdr(tag, vsl->wlp, n, id);
		vsl->wlp = VSL_END(vsl->wlp, n);
		assert(vsl->wlp < vsl->wle);
		vsl->wlr++;
	}
	if (cache_param->diag_bitmap & 0x10000)
		VSL_Flush(vsl, 0);
}
示例#3
0
void
SES_ReleaseReq(struct req *req)
{
	struct sess *sp;
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	/* Make sure the request counters have all been zeroed */
#define ACCT(foo) \
	AZ(req->acct.foo);
#include "tbl/acct_fields_req.h"
#undef ACCT

	AZ(req->vcl);
	if (req->vsl->wid)
		VSL_End(req->vsl);
	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	AN(pp->pool);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	MPL_AssertSane(req);
	VSL_Flush(req->vsl, 0);
	req->sp = NULL;
	MPL_Free(pp->mpl_req, req);
}
示例#4
0
void
VDI_CloseFd(struct vbc **vbp)
{
	struct backend *bp;
	struct vbc *vc;

	AN(vbp);
	vc = *vbp;
	*vbp = NULL;
	CHECK_OBJ_NOTNULL(vc, VBC_MAGIC);
	CHECK_OBJ_NOTNULL(vc->backend, BACKEND_MAGIC);
	assert(vc->fd >= 0);

	bp = vc->backend;

	VSLb(vc->vsl, SLT_BackendClose, "%s", bp->display_name);

	/*
	 * Checkpoint log to flush all info related to this connection
	 * before the OS reuses the FD
	 */
	VSL_Flush(vc->vsl, 0);
	vc->vsl = NULL;

	VTCP_close(&vc->fd);
	VBE_DropRefConn(bp);
	vc->backend = NULL;
	VBE_ReleaseConn(vc);
}
示例#5
0
void
VDI_RecycleFd(struct vbc **vbp)
{
	struct backend *bp;
	struct vbc *vc;

	AN(vbp);
	vc = *vbp;
	*vbp = NULL;
	CHECK_OBJ_NOTNULL(vc, VBC_MAGIC);
	CHECK_OBJ_NOTNULL(vc->backend, BACKEND_MAGIC);
	assert(vc->fd >= 0);

	bp = vc->backend;

	VSLb(vc->vsl, SLT_BackendReuse, "%s", bp->display_name);

	/* XXX: revisit this hack */
	VSL_Flush(vc->vsl, 0);
	vc->vsl = NULL;

	Lck_Lock(&bp->mtx);
	VSC_C_main->backend_recycle++;
	VTAILQ_INSERT_HEAD(&bp->connlist, vc, list);
	VBE_DropRefLocked(bp);
}
示例#6
0
static enum req_fsm_nxt
cnt_restart(struct worker *wrk, struct req *req)
{
	unsigned wid, owid;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	req->director_hint = NULL;
	if (++req->restarts >= cache_param->max_restarts) {
		VSLb(req->vsl, SLT_VCL_Error, "Too many restarts");
		req->err_code = 503;
		req->req_step = R_STP_SYNTH;
	} else {
		wid = VXID_Get(&wrk->vxid_pool);
		// XXX: ReqEnd + ReqAcct ?
		VSLb_ts_req(req, "Restart", W_TIM_real(wrk));
		VSLb(req->vsl, SLT_Link, "req %u restart", wid);
		VSLb(req->vsl, SLT_End, "%s", "");
		VSL_Flush(req->vsl, 0);
		owid = req->vsl->wid & VSL_IDENTMASK;
		req->vsl->wid = wid | VSL_CLIENTMARKER;
		VSLb(req->vsl, SLT_Begin, "req %u restart", owid);
		VSLb_ts_req(req, "Start", req->t_prev);
		req->err_code = 0;
		req->req_step = R_STP_RECV;
	}
	return (REQ_FSM_MORE);
}
示例#7
0
void
SES_ReleaseReq(struct req *req)
{
	struct sess *sp;
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	AZ(req->vcl);
#define ACCT(foo)	AZ(req->acct_req.foo);
#include "tbl/acct_fields.h"
#undef ACCT
	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	AN(pp->pool);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	MPL_AssertSane(req);
	if (req->vsl->wid != 0)
		/* Non-released VXID - assume it was from a req */
		VSLb(req->vsl, SLT_End, "%s", "");
	VSL_Flush(req->vsl, 0);
	req->sp = NULL;
	MPL_Free(pp->mpl_req, req);
}
示例#8
0
static void
cnt_diag(struct req *req, const char *state)
{

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	VSLb(req->vsl,  SLT_Debug, "vxid %u STP_%s sp %p vcl %p",
	    req->vsl->wid, state, req->sp, req->vcl);
	VSL_Flush(req->vsl, 0);
}
示例#9
0
void
VBO_DerefBusyObj(struct worker *wrk, struct busyobj **pbo)
{
	struct busyobj *bo;
	struct objcore *oc = NULL;
	unsigned r;

	CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC);
	AN(pbo);
	bo = *pbo;
	*pbo = NULL;
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_ORNULL(bo->fetch_objcore, OBJCORE_MAGIC);
	CHECK_OBJ_ORNULL(bo->fetch_obj, OBJECT_MAGIC);
	if (bo->fetch_objcore != NULL) {
		oc = bo->fetch_objcore;
		CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
		CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
		Lck_Lock(&oc->objhead->mtx);
		assert(bo->refcount > 0);
		r = --bo->refcount;
		Lck_Unlock(&oc->objhead->mtx);
	} else {
		Lck_Lock(&bo->mtx);
		assert(bo->refcount > 0);
		r = --bo->refcount;
		Lck_Unlock(&bo->mtx);
	}

	if (r)
		return;

	VSLb(bo->vsl, SLT_End, "%s", "");
	VSL_Flush(bo->vsl, 0);

	if (bo->fetch_objcore != NULL) {
		AN(wrk);
		(void)HSH_DerefObjCore(&wrk->stats, &bo->fetch_objcore);
	}

	VCL_Rel(&bo->vcl);

	if (bo->vary != NULL)
		free(bo->vary);

	memset(&bo->refcount, 0,
	    sizeof *bo - offsetof(struct busyobj, refcount));

	if (cache_param->bo_cache && wrk != NULL && wrk->nbo == NULL)
		wrk->nbo = bo;
	else
		VBO_Free(&bo);
}
示例#10
0
void
SES_ReleaseReq(struct sess *sp)
{
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	AN(pp->pool);
	CHECK_OBJ_NOTNULL(sp->req, REQ_MAGIC);
	MPL_AssertSane(sp->req);
	VSL_Flush(sp->req->vsl, 0);
	MPL_Free(pp->mpl_req, sp->req);
	sp->req = NULL;
}
static struct objcore *
ban_lurker_getfirst(struct vsl_log *vsl, struct ban *bt)
{
	struct objhead *oh;
	struct objcore *oc;

	while (1) {
		Lck_Lock(&ban_mtx);
		oc = VTAILQ_FIRST(&bt->objcore);
		CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
		if (oc == &oc_marker) {
			VTAILQ_REMOVE(&bt->objcore, oc, ban_list);
			Lck_Unlock(&ban_mtx);
			return (NULL);
		}
		oh = oc->objhead;
		CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
		if (!Lck_Trylock(&oh->mtx)) {
			if (oc->refcnt == 0) {
				Lck_Unlock(&oh->mtx);
			} else {
				/*
				 * We got the lock, and the oc is not being
				 * dismantled under our feet.
				 * Take it off the ban and (optimistically)
				 * put it on the * destination ban
				 */
				AZ(oc->flags & OC_F_BUSY);
				oc->refcnt += 1;
				VTAILQ_REMOVE(&bt->objcore, oc, ban_list);
				VTAILQ_INSERT_TAIL(&bt->objcore, oc, ban_list);
				Lck_Unlock(&oh->mtx);
				Lck_Unlock(&ban_mtx);
				break;
			}
		}

		/* Try again, later */
		Lck_Unlock(&ban_mtx);
		VSC_C_main->bans_lurker_contention++;
		VSL_Flush(vsl, 0);
		VTIM_sleep(cache_param->ban_lurker_sleep);
	}
	return (oc);
}
示例#12
0
exp_thread(struct worker *wrk, void *priv)
{
	struct objcore *oc;
	double t = 0, tnext = 0;
	struct exp_priv *ep;
	unsigned flags = 0;

	CAST_OBJ_NOTNULL(ep, priv, EXP_PRIV_MAGIC);
	ep->wrk = wrk;
	VSL_Setup(&ep->vsl, NULL, 0);
	ep->heap = binheap_new(NULL, object_cmp, object_update);
	AN(ep->heap);
	while (1) {

		Lck_Lock(&ep->mtx);
		oc = VSTAILQ_FIRST(&ep->inbox);
		CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC);
		if (oc != NULL) {
			assert(oc->refcnt >= 1);
			VSTAILQ_REMOVE(&ep->inbox, oc, objcore, exp_list);
			VSC_C_main->exp_received++;
			tnext = 0;
			flags = oc->exp_flags;
			if (flags & OC_EF_REMOVE)
				oc->exp_flags = 0;
			else
				oc->exp_flags &= OC_EF_REFD;
		} else if (tnext > t) {
			VSL_Flush(&ep->vsl, 0);
			Pool_Sumstat(wrk);
			(void)Lck_CondWait(&ep->condvar, &ep->mtx, tnext);
		}
		Lck_Unlock(&ep->mtx);

		t = VTIM_real();

		if (oc != NULL)
			exp_inbox(ep, oc, flags);
		else
			tnext = exp_expire(ep, t);
	}
	NEEDLESS(return NULL);
}
示例#13
0
static enum fetch_step
vbf_stp_retry(struct worker *wrk, struct busyobj *bo)
{
	unsigned owid, wid;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

	VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk));

	// XXX: BereqEnd + BereqAcct ?
	wid = VXID_Get(&wrk->vxid_pool);
	VSLb(bo->vsl, SLT_Link, "bereq %u retry", wid);
	VSLb(bo->vsl, SLT_End, "%s", "");
	VSL_Flush(bo->vsl, 0);
	owid = bo->vsl->wid & VSL_IDENTMASK;
	bo->vsl->wid = wid | VSL_BACKENDMARKER;
	VSLb(bo->vsl, SLT_Begin, "bereq %u retry", owid);
	VSLb_ts_busyobj(bo, "Start", bo->t_prev);

	return (F_STP_STARTFETCH);
}
示例#14
0
pan_ic(const char *func, const char *file, int line, const char *cond,
    int err, enum vas_e kind)
{
	const char *q;
	struct req *req;
	struct busyobj *bo;

	AZ(pthread_mutex_lock(&panicstr_mtx)); /* Won't be released,
						  we're going to die
						  anyway */
	switch(kind) {
	case VAS_WRONG:
		VSB_printf(pan_vsp,
		    "Wrong turn at %s:%d:\n%s\n", file, line, cond);
		break;
	case VAS_VCL:
		VSB_printf(pan_vsp,
		    "Panic from VCL:\n  %s\n", cond);
		break;
	case VAS_MISSING:
		VSB_printf(pan_vsp,
		    "Missing errorhandling code in %s(), %s line %d:\n"
		    "  Condition(%s) not true.",
		    func, file, line, cond);
		break;
	case VAS_INCOMPLETE:
		VSB_printf(pan_vsp,
		    "Incomplete code in %s(), %s line %d:\n",
		    func, file, line);
		break;
	default:
	case VAS_ASSERT:
		VSB_printf(pan_vsp,
		    "Assert error in %s(), %s line %d:\n"
		    "  Condition(%s) not true.\n",
		    func, file, line, cond);
		break;
	}
	if (err)
		VSB_printf(pan_vsp, "errno = %d (%s)\n", err, strerror(err));

	q = THR_GetName();
	if (q != NULL)
		VSB_printf(pan_vsp, "thread = (%s)\n", q);

	VSB_printf(pan_vsp, "ident = %s,%s\n",
	    VSB_data(vident) + 1, WAIT_GetName());

	pan_backtrace();

	if (!FEATURE(FEATURE_SHORT_PANIC)) {
		req = THR_GetRequest();
		if (req != NULL) {
			pan_req(req);
			VSL_Flush(req->vsl, 0);
		}
		bo = THR_GetBusyobj();
		if (bo != NULL) {
			pan_busyobj(bo);
			VSL_Flush(bo->vsl, 0);
		}
	}
	VSB_printf(pan_vsp, "\n");
	VSB_bcat(pan_vsp, "", 1);	/* NUL termination */

	if (FEATURE(FEATURE_NO_COREDUMP))
		exit(4);
	else
		abort();
}
示例#15
0
static struct vbc *
vbe_GetVbe(struct busyobj *bo, struct vdi_simple *vs)
{
	struct vbc *vc;
	struct backend *bp;

	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC);
	bp = vs->backend;
	CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);

	/* first look for vbc's we can recycle */
	while (1) {
		Lck_Lock(&bp->mtx);
		vc = VTAILQ_FIRST(&bp->connlist);
		if (vc != NULL) {
			bp->refcount++;
			assert(vc->backend == bp);
			assert(vc->fd >= 0);
			AN(vc->addr);
			VTAILQ_REMOVE(&bp->connlist, vc, list);
		}
		Lck_Unlock(&bp->mtx);
		if (vc == NULL)
			break;
		if (vbe_CheckFd(vc->fd)) {
			/* XXX locking of stats */
			VSC_C_main->backend_reuse += 1;
			VSLb(bo->vsl, SLT_Backend, "%d %s %s",
			    vc->fd, bo->director->vcl_name,
			    bp->display_name);
			vc->vdis = vs;
			vc->recycled = 1;
			return (vc);
		}
		VSC_C_main->backend_toolate++;
		VSLb(bo->vsl, SLT_BackendClose, "%d %s toolate",
		    vc->fd, bp->display_name);

		/* Checkpoint log to flush all info related to this connection
		   before the OS reuses the FD */
		VSL_Flush(bo->vsl, 0);

		VTCP_close(&vc->fd);
		VBE_DropRefConn(bp);
		vc->backend = NULL;
		VBE_ReleaseConn(vc);
	}

	if (!VBE_Healthy(bp, NULL)) {
		VSC_C_main->backend_unhealthy++;
		return (NULL);
	}

	if (vs->vrt->max_connections > 0 &&
	    bp->n_conn >= vs->vrt->max_connections) {
		VSC_C_main->backend_busy++;
		return (NULL);
	}

	vc = vbe_NewConn();
	assert(vc->fd == -1);
	AZ(vc->backend);
	bes_conn_try(bo, vc, vs);
	if (vc->fd < 0) {
		VBE_ReleaseConn(vc);
		VSC_C_main->backend_fail++;
		return (NULL);
	}
	vc->backend = bp;
	VSC_C_main->backend_conn++;
	VSLb(bo->vsl, SLT_Backend, "%d %s %s",
	    vc->fd, bo->director->vcl_name, bp->display_name);
	vc->vdis = vs;
	return (vc);
}
示例#16
0
pan_ic(const char *func, const char *file, int line, const char *cond,
    enum vas_e kind)
{
	const char *q;
	struct req *req;
	struct busyobj *bo;
	struct sigaction sa;
	int err = errno;

	AZ(pthread_mutex_lock(&panicstr_mtx)); /* Won't be released,
						  we're going to die
						  anyway */

	/*
	 * should we trigger a SIGSEGV while handling a panic, our sigsegv
	 * handler would hide the panic, so we need to reset the handler to
	 * default
	 */
	memset(&sa, 0, sizeof sa);
	sa.sa_handler = SIG_DFL;
	(void)sigaction(SIGSEGV, &sa, NULL);
	/* Set SIGABRT back to default so the final abort() has the
	   desired effect */
	(void)sigaction(SIGABRT, &sa, NULL);

	switch(kind) {
	case VAS_WRONG:
		VSB_printf(pan_vsb,
		    "Wrong turn at %s:%d:\n%s\n", file, line, cond);
		break;
	case VAS_VCL:
		VSB_printf(pan_vsb,
		    "Panic from VCL:\n  %s\n", cond);
		break;
	case VAS_MISSING:
		VSB_printf(pan_vsb,
		    "Missing errorhandling code in %s(), %s line %d:\n"
		    "  Condition(%s) not true.",
		    func, file, line, cond);
		break;
	case VAS_INCOMPLETE:
		VSB_printf(pan_vsb,
		    "Incomplete code in %s(), %s line %d:\n",
		    func, file, line);
		break;
	default:
	case VAS_ASSERT:
		VSB_printf(pan_vsb,
		    "Assert error in %s(), %s line %d:\n"
		    "  Condition(%s) not true.\n",
		    func, file, line, cond);
		break;
	}
	if (err)
		VSB_printf(pan_vsb, "errno = %d (%s)\n", err, strerror(err));

	q = THR_GetName();
	if (q != NULL)
		VSB_printf(pan_vsb, "thread = (%s)\n", q);

	VSB_printf(pan_vsb, "version = %s\n", VCS_version);
	VSB_printf(pan_vsb, "ident = %s,%s\n",
	    VSB_data(vident) + 1, Waiter_GetName());

	pan_backtrace(pan_vsb);

	if (!FEATURE(FEATURE_SHORT_PANIC)) {
		req = THR_GetRequest();
		if (req != NULL) {
			pan_req(pan_vsb, req);
			VSL_Flush(req->vsl, 0);
		}
		bo = THR_GetBusyobj();
		if (bo != NULL) {
			pan_busyobj(pan_vsb, bo);
			VSL_Flush(bo->vsl, 0);
		}
	}
	VSB_printf(pan_vsb, "\n");
	VSB_bcat(pan_vsb, "", 1);	/* NUL termination */

	if (FEATURE(FEATURE_NO_COREDUMP))
		exit(4);
	else
		abort();
}