static void
vca_eev(const struct epoll_event *ep)
{
	struct sess *ss[NEEV], *sp;
	int i, j;

	AN(ep->data.ptr);
	if (ep->data.ptr == vca_pipes) {
		if (ep->events & EPOLLIN || ep->events & EPOLLPRI) {
			j = 0;
			i = read(vca_pipes[0], ss, sizeof ss);
			if (i == -1 && errno == EAGAIN)
				return;
			while (i >= sizeof ss[0]) {
				CHECK_OBJ_NOTNULL(ss[j], SESS_MAGIC);
				assert(ss[j]->fd >= 0);
				AZ(ss[j]->obj);
				VTAILQ_INSERT_TAIL(&sesshead, ss[j], list);
				vca_cond_modadd(ss[j]->fd, ss[j]);
				j++;
				i -= sizeof ss[0];
			}
			assert(i == 0);
		}
	} else {
		CAST_OBJ_NOTNULL(sp, ep->data.ptr, SESS_MAGIC);
		if (ep->events & EPOLLIN || ep->events & EPOLLPRI) {
			i = HTC_Rx(sp->htc);
			if (i == 0) {
				vca_modadd(sp->fd, sp, EPOLL_CTL_MOD);
				return;	/* more needed */
			}
			VTAILQ_REMOVE(&sesshead, sp, list);
			vca_handover(sp, i);
		} else if (ep->events & EPOLLERR) {
			VTAILQ_REMOVE(&sesshead, sp, list);
			vca_close_session(sp, "ERR");
			SES_Delete(sp);
		} else if (ep->events & EPOLLHUP) {
			VTAILQ_REMOVE(&sesshead, sp, list);
			vca_close_session(sp, "HUP");
			SES_Delete(sp);
		} else if (ep->events & EPOLLRDHUP) {
			VTAILQ_REMOVE(&sesshead, sp, list);
			vca_close_session(sp, "RHUP");
			SES_Delete(sp);
		}
	}
}
Exemple #2
0
int
SES_Schedule(struct sess *sp)
{
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	AZ(sp->wrk);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	AN(pp->pool);

	AZ(sp->wrk);
	sp->task.func = ses_pool_task;
	sp->task.priv = sp;

	if (Pool_Task(pp->pool, &sp->task, POOL_QUEUE_FRONT)) {
		VSC_C_main->client_drop_late++;
		sp->t_idle = VTIM_real();
		if (sp->req != NULL && sp->req->vcl != NULL) {
			/*
			 * A session parked on a busy object can come here
			 * after it wakes up.  Loose the VCL reference.
			 */
			VCL_Rel(&sp->req->vcl);
		}
		SES_Delete(sp, "dropped", sp->t_idle);
		return (1);
	}
	return (0);
}
int
SES_Schedule(struct sess *sp)
{
	struct sesspool *pp;

	pp = ses_getpool(sp);
	AZ(sp->wrk);

	AN(pp->pool);

	if (Pool_Schedule(pp->pool, sp)) {
		VSC_C_main->client_drop_late++;
		sp->t_idle = VTIM_real();
		if (sp->req->vcl != NULL) {
			/*
			 * A session parked on a busy object can come here
			 * after it wakes up.  Loose the VCL reference.
			 */
			VCL_Rel(&sp->req->vcl);
		}
		SES_Delete(sp, "dropped", sp->t_idle);
		return (1);
	}
	return (0);
}
static void
vwk_sess_ev(struct vwk *vwk, const struct kevent *kp, double now)
{
	struct sess *sp;

	AN(kp->udata);
	assert(kp->udata != vwk->pipes);
	CAST_OBJ_NOTNULL(sp, kp->udata, SESS_MAGIC);
	DSL(DBG_WAITER, sp->vxid, "KQ: sp %p kev data %lu flags 0x%x%s",
	    sp, (unsigned long)kp->data, kp->flags,
	    (kp->flags & EV_EOF) ? " EOF" : "");

	if (kp->data > 0) {
		VTAILQ_REMOVE(&vwk->sesshead, sp, list);
		SES_Handle(sp, now);
		return;
	} else if (kp->flags & EV_EOF) {
		VTAILQ_REMOVE(&vwk->sesshead, sp, list);
		SES_Delete(sp, SC_REM_CLOSE, now);
		return;
	} else {
		VSL(SLT_Debug, sp->vxid,
		    "KQ: sp %p kev data %lu flags 0x%x%s",
		    sp, (unsigned long)kp->data, kp->flags,
		    (kp->flags & EV_EOF) ? " EOF" : "");
	}
}
static void *
vca_kqueue_main(void *arg)
{
    struct kevent ke[NKEV], *kp;
    int j, n, dotimer;
    double deadline;
    struct sess *sp;

    THR_SetName("cache-kqueue");
    (void)arg;

    kq = kqueue();
    assert(kq >= 0);

    j = 0;
    EV_SET(&ke[j], 0, EVFILT_TIMER, EV_ADD, 0, 100, NULL);
    j++;
    EV_SET(&ke[j], vca_pipes[0], EVFILT_READ, EV_ADD, 0, 0, vca_pipes);
    j++;
    AZ(kevent(kq, ke, j, NULL, 0, NULL));

    nki = 0;
    while (1) {
        dotimer = 0;
        n = kevent(kq, ki, nki, ke, NKEV, NULL);
        assert(n >= 1 && n <= NKEV);
        nki = 0;
        for (kp = ke, j = 0; j < n; j++, kp++) {
            if (kp->filter == EVFILT_TIMER) {
                dotimer = 1;
                continue;
            }
            assert(kp->filter == EVFILT_READ);
            vca_kev(kp);
        }
        if (!dotimer)
            continue;
        /*
         * Make sure we have no pending changes for the fd's
         * we are about to close, in case the accept(2) in the
         * other thread creates new fd's betwen our close and
         * the kevent(2) at the top of this loop, the kernel
         * would not know we meant "the old fd of this number".
         */
        vca_kq_flush();
        deadline = TIM_real() - params->sess_timeout;
        for (;;) {
            sp = VTAILQ_FIRST(&sesshead);
            if (sp == NULL)
                break;
            if (sp->t_open > deadline)
                break;
            VTAILQ_REMOVE(&sesshead, sp, list);
            // XXX: not yet (void)TCP_linger(sp->fd, 0);
            vca_close_session(sp, "timeout");
            SES_Delete(sp);
        }
    }
}
void
SES_Handle(struct sess *sp, int status)
{

	switch (status) {
	case -2:
		SES_Delete(sp, "blast");
		break;
	case -1:
		SES_Delete(sp, "no request");
		break;
	case 1:
		sp->step = STP_START;
		(void)SES_Schedule(sp);
		break;
	default:
		WRONG("Unexpected return from HTC_Rx()");
	}
}
ses_handle(struct waited *wp, enum wait_event ev, double now)
{
	struct sess *sp;
	struct pool *pp;
	struct pool_task *tp;
	const struct transport *xp;

	CHECK_OBJ_NOTNULL(wp, WAITED_MAGIC);
	CAST_OBJ_NOTNULL(sp, wp->priv1, SESS_MAGIC);
	CAST_OBJ_NOTNULL(xp, (const void*)wp->priv2, TRANSPORT_MAGIC);
	AN(wp->priv2);
	assert((void *)sp->ws->f == wp);
	wp->magic = 0;
	wp = NULL;

	WS_Release(sp->ws, 0);

	switch (ev) {
	case WAITER_TIMEOUT:
		SES_Delete(sp, SC_RX_TIMEOUT, now);
		break;
	case WAITER_REMCLOSE:
		SES_Delete(sp, SC_REM_CLOSE, now);
		break;
	case WAITER_ACTION:
		pp = sp->pool;
		CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
		assert(sizeof *tp <= WS_Reserve(sp->ws, sizeof *tp));
		tp = (void*)sp->ws->f;
		tp->func = xp->unwait;
		tp->priv = sp;
		if (Pool_Task(pp, tp, TASK_QUEUE_REQ))
			SES_Delete(sp, SC_OVERLOAD, now);
		break;
	case WAITER_CLOSE:
		WRONG("Should not see WAITER_CLOSE on client side");
		break;
	default:
		WRONG("Wrong event in ses_handle");
	}
}
static void
vwe_eev(struct vwe *vwe, const struct epoll_event *ep, double now)
{
	struct sess *ss[NEEV], *sp;
	int i, j;

	AN(ep->data.ptr);
	if (ep->data.ptr == vwe->pipes) {
		if (ep->events & EPOLLIN || ep->events & EPOLLPRI) {
			j = 0;
			i = read(vwe->pipes[0], ss, sizeof ss);
			if (i == -1 && errno == EAGAIN)
				return;
			while (i >= sizeof ss[0]) {
				CHECK_OBJ_NOTNULL(ss[j], SESS_MAGIC);
				assert(ss[j]->fd >= 0);
				VTAILQ_INSERT_TAIL(&vwe->sesshead, ss[j], list);
				vwe_cond_modadd(vwe, ss[j]->fd, ss[j]);
				j++;
				i -= sizeof ss[0];
			}
			assert(i == 0);
		}
	} else {
		CAST_OBJ_NOTNULL(sp, ep->data.ptr, SESS_MAGIC);
		if (ep->events & EPOLLIN || ep->events & EPOLLPRI) {
			VTAILQ_REMOVE(&vwe->sesshead, sp, list);
			SES_Handle(sp, now);
		} else if (ep->events & EPOLLERR) {
			VTAILQ_REMOVE(&vwe->sesshead, sp, list);
			SES_Delete(sp, SC_REM_CLOSE, now);
		} else if (ep->events & EPOLLHUP) {
			VTAILQ_REMOVE(&vwe->sesshead, sp, list);
			SES_Delete(sp, SC_REM_CLOSE, now);
		} else if (ep->events & EPOLLRDHUP) {
			VTAILQ_REMOVE(&vwe->sesshead, sp, list);
			SES_Delete(sp, SC_REM_CLOSE, now);
		}
	}
}
static void
vca_kev(const struct kevent *kp)
{
    int i, j;
    struct sess *sp;
    struct sess *ss[NKEV];

    AN(kp->udata);
    if (kp->udata == vca_pipes) {
        j = 0;
        i = read(vca_pipes[0], ss, sizeof ss);
        if (i == -1 && errno == EAGAIN)
            return;
        while (i >= sizeof ss[0]) {
            CHECK_OBJ_NOTNULL(ss[j], SESS_MAGIC);
            assert(ss[j]->sp_fd >= 0);
            AZ(ss[j]->obj);
            VTAILQ_INSERT_TAIL(&sesshead, ss[j], list);
            vca_kq_sess(ss[j], EV_ADD | EV_ONESHOT);
            j++;
            i -= sizeof ss[0];
        }
        assert(i == 0);
        return;
    }
    CAST_OBJ_NOTNULL(sp, kp->udata, SESS_MAGIC);
    DSL(0x04, SLT_Debug, sp->id, "KQ: sp %p kev data %lu flags 0x%x%s",
        sp, (unsigned long)kp->data, kp->flags,
        (kp->flags & EV_EOF) ? " EOF" : "");

    assert(sp->id == kp->ident);
    assert(sp->sp_fd == sp->id);
    if (kp->data > 0) {
        i = HTC_Rx(sp->htc);
        if (i == 0) {
            vca_kq_sess(sp, EV_ADD | EV_ONESHOT);
            return;	/* more needed */
        }
        VTAILQ_REMOVE(&sesshead, sp, list);
        vca_handover(sp, i);
        return;
    } else if (kp->flags & EV_EOF) {
        VTAILQ_REMOVE(&sesshead, sp, list);
        vca_close_session(sp, "EOF");
        SES_Delete(sp);
        return;
    } else {
        VSL(SLT_Debug, sp->id, "KQ: sp %p kev data %lu flags 0x%x%s",
            sp, (unsigned long)kp->data, kp->flags,
            (kp->flags & EV_EOF) ? " EOF" : "");
    }
}
static void
vca_ports_pass(struct sess *sp)
{
	int r;
       r = port_send(solaris_dport, 0, TRUST_ME(sp));
       if (r == -1 && errno == EAGAIN) {
	       VSC_C_main->sess_pipe_overflow++;
	       vca_close_session(sp, "session pipe overflow");
	       SES_Delete(sp);
	       return;
       }
       AZ(r);
}
Exemple #11
0
void
SES_Wait(struct sess *sp, const struct transport *xp)
{
	struct pool *pp;
	struct waited *wp;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(xp, TRANSPORT_MAGIC);
	pp = sp->pool;
	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
	assert(sp->fd > 0);
	/*
	 * XXX: waiter_epoll prevents us from zeroing the struct because
	 * XXX: it keeps state across calls.
	 */
	if (VTCP_nonblocking(sp->fd)) {
		SES_Delete(sp, SC_REM_CLOSE, NAN);
		return;
	}

	/*
	 * put struct waited on the workspace
	 */
	if (WS_Reserve(sp->ws, sizeof(struct waited))
	    < sizeof(struct waited)) {
		SES_Delete(sp, SC_OVERLOAD, NAN);
		return;
	}
	wp = (void*)sp->ws->f;
	INIT_OBJ(wp, WAITED_MAGIC);
	wp->fd = sp->fd;
	wp->priv1 = sp;
	wp->priv2 = (uintptr_t)xp;
	wp->idle = sp->t_idle;
	wp->func = ses_handle;
	wp->tmo = &cache_param->timeout_idle;
	if (Wait_Enter(pp->waiter, wp))
		SES_Delete(sp, SC_PIPE_OVERFLOW, NAN);
}
static void *
vwe_thread(void *priv)
{
	struct epoll_event ev[NEEV], *ep;
	struct sess *sp;
	char junk;
	double now, deadline;
	int dotimer, i, n;
	struct vwe *vwe;

	CAST_OBJ_NOTNULL(vwe, priv, VWE_MAGIC);

	THR_SetName("cache-epoll");

	vwe->epfd = epoll_create(1);
	assert(vwe->epfd >= 0);

	vwe_modadd(vwe, vwe->pipes[0], vwe->pipes, EPOLL_CTL_ADD);
	vwe_modadd(vwe, vwe->timer_pipes[0], vwe->timer_pipes, EPOLL_CTL_ADD);

	while (1) {
		dotimer = 0;
		n = epoll_wait(vwe->epfd, ev, NEEV, -1);
		now = VTIM_real();
		for (ep = ev, i = 0; i < n; i++, ep++) {
			if (ep->data.ptr == vwe->timer_pipes &&
			    (ep->events == EPOLLIN || ep->events == EPOLLPRI))
			{
				assert(read(vwe->timer_pipes[0], &junk, 1));
				dotimer = 1;
			} else
				vwe_eev(vwe, ep, now);
		}
		if (!dotimer)
			continue;

		/* check for timeouts */
		deadline = now - cache_param->timeout_idle;
		for (;;) {
			sp = VTAILQ_FIRST(&vwe->sesshead);
			if (sp == NULL)
				break;
			if (sp->t_idle > deadline)
				break;
			VTAILQ_REMOVE(&vwe->sesshead, sp, list);
			// XXX: not yet VTCP_linger(sp->fd, 0);
			SES_Delete(sp, SC_RX_TIMEOUT, now);
		}
	}
	return (NULL);
}
void
SES_Handle(struct sess *sp, double now)
{
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	AN(pp->pool);
	sp->task.func = ses_sess_pool_task;
	sp->task.priv = sp;
	if (Pool_Task(pp->pool, &sp->task, POOL_QUEUE_FRONT))
		SES_Delete(sp, SC_OVERLOAD, now);
}
Exemple #14
0
void
SES_Wait(struct sess *sp)
{
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	/*
	 * XXX: waiter_epoll prevents us from zeroing the struct because
	 * XXX: it keeps state across calls.
	 */
	if (VTCP_nonblocking(sp->fd)) {
		SES_Delete(sp, SC_REM_CLOSE, NAN);
		return;
	}
	sp->waited.magic = WAITED_MAGIC;
	sp->waited.fd = sp->fd;
	sp->waited.ptr = sp;
	sp->waited.idle = sp->t_idle;
	if (Wait_Enter(pp->http1_waiter, &sp->waited))
		SES_Delete(sp, SC_PIPE_OVERFLOW, NAN);
}
static void *
vca_main(void *arg)
{
	struct epoll_event ev[NEEV], *ep;
	struct sess *sp;
	char junk;
	double deadline;
	int dotimer, i, n;

	THR_SetName("cache-epoll");
	(void)arg;

	epfd = epoll_create(1);
	assert(epfd >= 0);

	vca_modadd(vca_pipes[0], vca_pipes, EPOLL_CTL_ADD);
	vca_modadd(dotimer_pipe[0], dotimer_pipe, EPOLL_CTL_ADD);

	while (1) {
		dotimer = 0;
		n = epoll_wait(epfd, ev, NEEV, -1);
		for (ep = ev, i = 0; i < n; i++, ep++) {
			if (ep->data.ptr == dotimer_pipe &&
			    (ep->events == EPOLLIN || ep->events == EPOLLPRI))
			{
				assert(read(dotimer_pipe[0], &junk, 1));
				dotimer = 1;
			} else
				vca_eev(ep);
		}
		if (!dotimer)
			continue;

		/* check for timeouts */
		deadline = TIM_real() - params->sess_timeout;
		for (;;) {
			sp = VTAILQ_FIRST(&sesshead);
			if (sp == NULL)
				break;
			if (sp->t_open > deadline)
				break;
			VTAILQ_REMOVE(&sesshead, sp, list);
			// XXX: not yet VTCP_linger(sp->fd, 0);
			vca_close_session(sp, "timeout");
			SES_Delete(sp);
		}
	}
	return NULL;
}
static inline void
vca_port_ev(port_event_t *ev) {
	struct sess *sp;
	if(ev->portev_source == PORT_SOURCE_USER) {
		CAST_OBJ_NOTNULL(sp, ev->portev_user, SESS_MAGIC);
		assert(sp->fd >= 0);
		AZ(sp->obj);
		VTAILQ_INSERT_TAIL(&sesshead, sp, list);
		vca_add(sp->fd, sp);
	} else {
		int i;
		assert(ev->portev_source == PORT_SOURCE_FD);
		CAST_OBJ_NOTNULL(sp, ev->portev_user, SESS_MAGIC);
		assert(sp->fd >= 0);
		if(ev->portev_events & POLLERR) {
			vca_del(sp->fd);
			VTAILQ_REMOVE(&sesshead, sp, list);			
			vca_close_session(sp, "EOF");
			SES_Delete(sp);
			return;
		}
		i = HTC_Rx(sp->htc);

		if (i == 0) {
			/* incomplete header, wait for more data */
			vca_add(sp->fd, sp);
			return;
		}

		/* 
		 * note: the original man page for port_associate(3C) states:
		 *
		 *    When an event for a PORT_SOURCE_FD object is retrieved,
		 *    the object no longer has an association with the port.
		 *
		 * This can be read along the lines of sparing the
		 * port_dissociate after port_getn(), but in fact,
		 * port_dissociate should be used
		 *
		 * Ref: http://opensolaris.org/jive/thread.jspa?threadID=129476&tstart=0
		 */
		vca_del(sp->fd);
		VTAILQ_REMOVE(&sesshead, sp, list);

		/* vca_handover will also handle errors */
		vca_handover(sp, i);
	}
	return;
}
Exemple #17
0
ses_handle(struct waited *wp, enum wait_event ev, double now)
{
	struct sess *sp;
	struct sesspool *pp;
	struct pool_task *tp;

	CHECK_OBJ_NOTNULL(wp, WAITED_MAGIC);
	CAST_OBJ_NOTNULL(sp, wp->ptr, SESS_MAGIC);

	AZ(sp->ws->r);

	switch (ev) {
	case WAITER_TIMEOUT:
		SES_Delete(sp, SC_RX_TIMEOUT, now);
		break;
	case WAITER_REMCLOSE:
		SES_Delete(sp, SC_REM_CLOSE, now);
		break;
	case WAITER_ACTION:
		pp = sp->sesspool;
		CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
		AN(pp->pool);
		assert(sizeof *tp == WS_Reserve(sp->ws, sizeof *tp));
		tp = (void*)sp->ws->f;
		tp->func = SES_Proto_Sess;
		tp->priv = sp;
		if (Pool_Task(pp->pool, tp, POOL_QUEUE_FRONT))
			SES_Delete(sp, SC_OVERLOAD, now);
		break;
	case WAITER_CLOSE:
		WRONG("Should not see WAITER_CLOSE on client side");
		break;
	default:
		WRONG("Wrong event in ses_handle");
	}
}
static int
http1_req_cleanup(struct sess *sp, struct worker *wrk, struct req *req)
{
	AZ(wrk->aws->r);
	AZ(req->ws->r);
	Req_Cleanup(sp, wrk, req);

	if (sp->fd >= 0 && req->doclose != SC_NULL)
		SES_Close(sp, req->doclose);

	if (sp->fd < 0) {
		wrk->stats->sess_closed++;
		AZ(req->vcl);
		Req_Release(req);
		SES_Delete(sp, SC_NULL, NAN);
		return (1);
	}

	return (0);
}
Exemple #19
0
http1_reembark(struct worker *wrk, struct req *req)
{
	struct sess *sp;

	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);

	http1_setstate(sp, H1BUSY);

	if (!SES_Reschedule_Req(req))
		return;

	/* Couldn't schedule, ditch */
	wrk->stats->busy_wakeup--;
	wrk->stats->busy_killed++;
	AN (req->vcl);
	VCL_Rel(&req->vcl);
	CNT_AcctLogCharge(wrk->stats, req);
	Req_Release(req);
	SES_Delete(sp, SC_OVERLOAD, NAN);
	DSL(DBG_WAITINGLIST, req->vsl->wid, "kill from waiting list");
	usleep(10000);
}
Exemple #20
0
int
SES_Schedule(struct sess *sp)
{
	struct sessmem *sm;
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);

	AZ(sp->wrk);

	sm = sp->mem;
	CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC);

	pp = sm->pool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);

	AN(pp->pool);

	if (Pool_Schedule(pp->pool, sp)) {
		SES_Delete(sp, "dropped");
		return (1);
	}
	return (0);
}
int
SES_ScheduleReq(struct req *req)
{
	struct sess *sp;
	struct sesspool *pp;

	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	pp = sp->sesspool;
	CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC);
	AN(pp->pool);

	sp->task.func = ses_req_pool_task;
	sp->task.priv = req;

	if (Pool_Task(pp->pool, &sp->task, POOL_QUEUE_FRONT)) {
		AN (req->vcl);
		VCL_Rel(&req->vcl);
		SES_Delete(sp, SC_OVERLOAD, NAN);
		return (1);
	}
	return (0);
}
static enum http1_cleanup_ret
http1_cleanup(struct sess *sp, struct worker *wrk, struct req *req)
{

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	CHECK_OBJ_ORNULL(req->vcl, VCL_CONF_MAGIC);

	req->director_hint = NULL;
	req->restarts = 0;

	AZ(req->esi_level);

	if (req->vcl != NULL) {
		if (wrk->vcl != NULL)
			VCL_Rel(&wrk->vcl);
		wrk->vcl = req->vcl;
		req->vcl = NULL;
	}

	/* Charge and log byte counters */
	AN(req->vsl->wid);
	CNT_AcctLogCharge(wrk->stats, req);
	req->req_bodybytes = 0;
	req->resp_hdrbytes = 0;
	req->resp_bodybytes = 0;

	VSL_End(req->vsl);

	if (!isnan(req->t_prev) && req->t_prev > 0.)
		sp->t_idle = req->t_prev;
	else
		sp->t_idle = W_TIM_real(wrk);

	req->t_first = NAN;
	req->t_prev = NAN;
	req->t_req = NAN;
	req->req_body_status = REQ_BODY_INIT;

	req->hash_always_miss = 0;
	req->hash_ignore_busy = 0;

	if (sp->fd >= 0 && req->doclose != SC_NULL)
		SES_Close(sp, req->doclose);

	if (sp->fd < 0) {
		wrk->stats->sess_closed++;
		AZ(req->vcl);
		SES_ReleaseReq(req);
		SES_Delete(sp, SC_NULL, NAN);
		return (SESS_DONE_RET_GONE);
	}

	WS_Reset(req->ws, NULL);
	WS_Reset(wrk->aws, NULL);

	if (HTTP1_Reinit(req->htc) == HTTP1_COMPLETE) {
		AZ(req->vsl->wid);
		req->t_first = req->t_req = sp->t_idle;
		wrk->stats->sess_pipeline++;
		req->acct.req_hdrbytes += Tlen(req->htc->rxbuf);
		return (SESS_DONE_RET_START);
	} else {
		if (Tlen(req->htc->rxbuf))
			wrk->stats->sess_readahead++;
		return (SESS_DONE_RET_WAIT);
	}
}
static void *
vca_main(void *arg)
{
	struct sess *sp;

	/*
	 * timeouts:
	 *
	 * min_ts : Minimum timeout for port_getn
	 * min_t  : ^ equivalent in floating point representation
	 *
	 * max_ts : Maximum timeout for port_getn
	 * max_t  : ^ equivalent in floating point representation
	 *
	 * with (nevents == 1), we should always choose the correct port_getn
	 * timeout to check session timeouts, so max is just a safety measure
	 * (if this implementation is correct, it could be set to an "infinte"
	 *  value)
	 *
	 * with (nevents > 1), min and max define the acceptable range for
	 * - additional latency of keep-alive connections and
	 * - additional tolerance for handling session timeouts
	 *
	 */
	static struct timespec min_ts = {0L,    100L /*ms*/  * 1000L /*us*/  * 1000L /*ns*/};
	static double          min_t  = 0.1; /* 100    ms*/
	static struct timespec max_ts = {1L, 0L}; 		/* 1 second */
	static double	       max_t  = 1.0;			/* 1 second */

	struct timespec ts;
	struct timespec *timeout;

	(void)arg;

	solaris_dport = port_create();
	assert(solaris_dport >= 0);

	timeout = &max_ts;

	while (1) {
		port_event_t ev[MAX_EVENTS];
		int nevents, ei, ret;
		double now, deadline;

		/*
		 * XXX Do we want to scale this up dynamically to increase
		 *     efficiency in high throughput situations? - would need to
		 *     start with one to keep latency low at any rate
		 *
		 *     Note: when increasing nevents, we must lower min_ts
		 *	     and max_ts
		 */
		nevents = 1;

		/*
		 * see disucssion in
		 * - https://issues.apache.org/bugzilla/show_bug.cgi?id=47645
		 * - http://mail.opensolaris.org/pipermail/networking-discuss/2009-August/011979.html
		 *
		 * comment from apr/poll/unix/port.c :
		 *
		 * This confusing API can return an event at the same time
		 * that it reports EINTR or ETIME.
		 *
		 */

		ret = port_getn(solaris_dport, ev, MAX_EVENTS, &nevents, timeout);

		if (ret < 0)
			assert((errno == EINTR) || (errno == ETIME));

		for (ei=0; ei<nevents; ei++) {
			vca_port_ev(ev + ei);
		}

		/* check for timeouts */
		now = TIM_real();
		deadline = now - params->sess_timeout;

		/*
		 * This loop assumes that the oldest sessions are always at the
		 * beginning of the list (which is the case if we guarantee to
		 * enqueue at the tail only
		 *
		 */

		for (;;) {
			sp = VTAILQ_FIRST(&sesshead);
			if (sp == NULL)
				break;
			if (sp->t_open > deadline) {
				break;
			}
			VTAILQ_REMOVE(&sesshead, sp, list);
			if(sp->fd != -1) {
				vca_del(sp->fd);
			}
			vca_close_session(sp, "timeout");
			SES_Delete(sp);
		}

		/*
		 * Calculate the timeout for the next get_portn
		 */

		if (sp) {
			double tmo = (sp->t_open + params->sess_timeout) - now;

			/* we should have removed all sps whose timeout has passed */
			assert(tmo > 0.0);

			if (tmo < min_t) {
				timeout = &min_ts;
			} else if (tmo > max_t) {
				timeout = &max_ts;
			} else {
				/* TIM_t2ts() ? see #630 */
				ts.tv_sec = (int)floor(tmo);
				ts.tv_nsec = 1e9 * (tmo - ts.tv_sec);
				timeout = &ts;
			}
		} else {
			timeout = &max_ts;
		}
	}
}
static void *
vwk_thread(void *priv)
{
	struct vwk *vwk;
	struct kevent ke[NKEV], *kp;
	int j, n, dotimer;
	double now, deadline;
	struct sess *sp;

	CAST_OBJ_NOTNULL(vwk, priv, VWK_MAGIC);
	THR_SetName("cache-kqueue");

	vwk->kq = kqueue();
	assert(vwk->kq >= 0);

	j = 0;
	EV_SET(&ke[j], 0, EVFILT_TIMER, EV_ADD, 0, 100, NULL);
	j++;
	EV_SET(&ke[j], vwk->pipes[0], EVFILT_READ, EV_ADD, 0, 0, vwk->pipes);
	j++;
	AZ(kevent(vwk->kq, ke, j, NULL, 0, NULL));

	vwk->nki = 0;
	while (1) {
		dotimer = 0;
		n = kevent(vwk->kq, vwk->ki, vwk->nki, ke, NKEV, NULL);
		now = VTIM_real();
		assert(n <= NKEV);
		if (n == 0) {
			/* This happens on OSX in m00011.vtc */
			dotimer = 1;
			(void)usleep(10000);
		}
		vwk->nki = 0;
		for (kp = ke, j = 0; j < n; j++, kp++) {
			if (kp->filter == EVFILT_TIMER) {
				dotimer = 1;
			} else if (kp->filter == EVFILT_READ &&
			    kp->udata == vwk->pipes) {
				vwk_pipe_ev(vwk, kp);
			} else {
				assert(kp->filter == EVFILT_READ);
				vwk_sess_ev(vwk, kp, now);
			}
		}
		if (!dotimer)
			continue;
		/*
		 * Make sure we have no pending changes for the fd's
		 * we are about to close, in case the accept(2) in the
		 * other thread creates new fd's betwen our close and
		 * the kevent(2) at the top of this loop, the kernel
		 * would not know we meant "the old fd of this number".
		 */
		vwk_kq_flush(vwk);
		deadline = now - cache_param->timeout_idle;
		for (;;) {
			sp = VTAILQ_FIRST(&vwk->sesshead);
			if (sp == NULL)
				break;
			if (sp->t_idle > deadline)
				break;
			VTAILQ_REMOVE(&vwk->sesshead, sp, list);
			// XXX: not yet (void)VTCP_linger(sp->fd, 0);
			SES_Delete(sp, SC_RX_TIMEOUT, now);
		}
	}
	NEEDLESS_RETURN(NULL);
}
void
HTTP1_Session(struct worker *wrk, struct req *req)
{
	enum htc_status_e hs;
	struct sess *sp;
	const char *st;
	int i;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);

	/*
	 * Whenever we come in from the acceptor or waiter, we need to set
	 * blocking mode.  It would be simpler to do this in the acceptor
	 * or waiter, but we'd rather do the syscall in the worker thread.
	 * On systems which return errors for ioctl, we close early
	 */
	if (http1_getstate(sp) == H1NEWREQ && VTCP_blocking(sp->fd)) {
		if (errno == ECONNRESET)
			SES_Close(sp, SC_REM_CLOSE);
		else
			SES_Close(sp, SC_TX_ERROR);
		AN(Req_Cleanup(sp, wrk, req));
		return;
	}

	while (1) {
		st = http1_getstate(sp);
		if (st == H1NEWREQ) {
			assert(isnan(req->t_prev));
			assert(isnan(req->t_req));
			AZ(req->vcl);
			AZ(req->esi_level);

			hs = HTC_RxStuff(req->htc, HTTP1_Complete,
			    &req->t_first, &req->t_req,
			    sp->t_idle + cache_param->timeout_linger,
			    sp->t_idle + cache_param->timeout_idle,
			    cache_param->http_req_size);
			XXXAZ(req->htc->ws->r);
			if (hs < HTC_S_EMPTY) {
				req->acct.req_hdrbytes +=
				    req->htc->rxbuf_e - req->htc->rxbuf_b;
				CNT_AcctLogCharge(wrk->stats, req);
				Req_Release(req);
				switch(hs) {
				case HTC_S_CLOSE:
					SES_Delete(sp, SC_REM_CLOSE, NAN);
					return;
				case HTC_S_TIMEOUT:
					SES_Delete(sp, SC_RX_TIMEOUT, NAN);
					return;
				case HTC_S_OVERFLOW:
					SES_Delete(sp, SC_RX_OVERFLOW, NAN);
					return;
				case HTC_S_EOF:
					SES_Delete(sp, SC_REM_CLOSE, NAN);
					return;
				default:
					WRONG("htc_status (bad)");
				}
			}
			if (hs == HTC_S_IDLE) {
				wrk->stats->sess_herd++;
				Req_Release(req);
				SES_Wait(sp, &HTTP1_transport);
				return;
			}
			if (hs != HTC_S_COMPLETE)
				WRONG("htc_status (nonbad)");

			i = http1_dissect(wrk, req);
			req->acct.req_hdrbytes +=
			    req->htc->rxbuf_e - req->htc->rxbuf_b;
			if (i) {
				SES_Close(req->sp, req->doclose);
				http1_setstate(sp, H1CLEANUP);
			} else {
				req->req_step = R_STP_RECV;
				http1_setstate(sp, H1PROC);
			}
		} else if (st == H1BUSY) {
			/*
			 * Return from waitinglist.
			 * Check to see if the remote has left.
			 */
			if (VTCP_check_hup(sp->fd)) {
				AN(req->hash_objhead);
				(void)HSH_DerefObjHead(wrk, &req->hash_objhead);
				AZ(req->hash_objhead);
				SES_Close(sp, SC_REM_CLOSE);
				AN(Req_Cleanup(sp, wrk, req));
				return;
			}
			http1_setstate(sp, H1PROC);
		} else if (st == H1PROC) {
			req->transport = &HTTP1_transport;
			if (CNT_Request(wrk, req) == REQ_FSM_DISEMBARK) {
				req->task.func = http1_req;
				req->task.priv = req;
				http1_setstate(sp, H1BUSY);
				return;
			}
			req->transport = NULL;
			http1_setstate(sp, H1CLEANUP);
		} else if (st == H1CLEANUP) {
			if (Req_Cleanup(sp, wrk, req))
				return;
			HTC_RxInit(req->htc, req->ws);
			if (req->htc->rxbuf_e != req->htc->rxbuf_b)
				wrk->stats->sess_readahead++;
			http1_setstate(sp, H1NEWREQ);
		} else {
			WRONG("Wrong H1 session state");
		}
	}
}
static int
cnt_done(struct sess *sp)
{
    double dh, dp, da;
    int i;
    struct worker *wrk;

    CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
    wrk = sp->wrk;
    CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
    CHECK_OBJ_ORNULL(sp->vcl, VCL_CONF_MAGIC);

    AZ(wrk->obj);
    AZ(wrk->busyobj);
    sp->director = NULL;
    sp->restarts = 0;

    wrk->busyobj = NULL;

    SES_Charge(sp);

    /* If we did an ESI include, don't mess up our state */
    if (sp->esi_level > 0)
        return (1);

    if (sp->vcl != NULL) {
        if (wrk->vcl != NULL)
            VCL_Rel(&wrk->vcl);
        wrk->vcl = sp->vcl;
        sp->vcl = NULL;
    }


    sp->t_end = W_TIM_real(wrk);
    WSP(sp, SLT_Debug, "PHK req %.9f resp %.9f end %.9f open %.9f",
        sp->t_req, sp->t_resp, sp->t_end,  sp->t_open);
    if (sp->xid == 0) {
        // sp->t_req = sp->t_end;
        sp->t_resp = sp->t_end;
    } else {
        dp = sp->t_resp - sp->t_req;
        da = sp->t_end - sp->t_resp;
        dh = sp->t_req - sp->t_open;
        /* XXX: Add StatReq == StatSess */
        /* XXX: Workaround for pipe */
        if (sp->fd >= 0) {
            WSP(sp, SLT_Length, "%ju",
                (uintmax_t)sp->req_bodybytes);
        }
        WSP(sp, SLT_ReqEnd, "%u %.9f %.9f %.9f %.9f %.9f",
            sp->xid, sp->t_req, sp->t_end, dh, dp, da);
    }
    sp->xid = 0;
    WSL_Flush(wrk, 0);

    sp->t_open = sp->t_end;
    sp->t_resp = NAN;

    sp->req_bodybytes = 0;

    sp->t_req = NAN;
    sp->hash_always_miss = 0;
    sp->hash_ignore_busy = 0;

    if (sp->fd >= 0 && sp->doclose != NULL) {
        /*
         * This is an orderly close of the connection; ditch nolinger
         * before we close, to get queued data transmitted.
         */
        // XXX: not yet (void)VTCP_linger(sp->fd, 0);
        SES_Close(sp, sp->doclose);
    }

    if (sp->fd < 0) {
        wrk->stats.sess_closed++;
        SES_Delete(sp, NULL);
        return (1);
    }

    if (wrk->stats.client_req >= cache_param->wthread_stats_rate)
        WRK_SumStat(wrk);
    /* Reset the workspace to the session-watermark */
    WS_Reset(sp->ws, sp->ws_ses);
    WS_Reset(wrk->ws, NULL);

    i = HTC_Reinit(sp->htc);
    if (i == 1) {
        wrk->stats.sess_pipeline++;
        sp->step = STP_START;
        return (0);
    }
    if (Tlen(sp->htc->rxbuf)) {
        wrk->stats.sess_readahead++;
        sp->step = STP_WAIT;
        return (0);
    }
    if (cache_param->session_linger > 0) {
        wrk->stats.sess_linger++;
        sp->step = STP_WAIT;
        return (0);
    }
    wrk->stats.sess_herd++;
    Pool_Wait(sp);
    return (1);
}
static int
cnt_done(struct sess *sp)
{
    double dh, dp, da;
    int i;

    CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
    CHECK_OBJ_ORNULL(sp->vcl, VCL_CONF_MAGIC);

    AZ(sp->obj);
    AZ(sp->vbe);
    sp->director = NULL;
    sp->restarts = 0;

    if (sp->vcl != NULL && sp->esis == 0) {
        if (sp->wrk->vcl != NULL)
            VCL_Rel(&sp->wrk->vcl);
        sp->wrk->vcl = sp->vcl;
        sp->vcl = NULL;
    }

    sp->t_end = TIM_real();
    sp->wrk->lastused = sp->t_end;
    if (sp->xid == 0) {
        sp->t_req = sp->t_end;
        sp->t_resp = sp->t_end;
    }
    dp = sp->t_resp - sp->t_req;
    da = sp->t_end - sp->t_resp;
    dh = sp->t_req - sp->t_open;
    WSL(sp->wrk, SLT_ReqEnd, sp->id, "%u %.9f %.9f %.9f %.9f %.9f",
        sp->xid, sp->t_req, sp->t_end, dh, dp, da);

    sp->xid = 0;
    sp->t_open = sp->t_end;
    sp->t_resp = NAN;
    WSL_Flush(sp->wrk, 0);

    /* If we did an ESI include, don't mess up our state */
    if (sp->esis > 0)
        return (1);

    sp->t_req = NAN;

    if (sp->fd >= 0 && sp->doclose != NULL) {
        /*
         * This is an orderly close of the connection; ditch nolinger
         * before we close, to get queued data transmitted.
         */
        TCP_linger(sp->fd, 0);
        vca_close_session(sp, sp->doclose);
    }
    if (sp->fd < 0) {
        SES_Charge(sp);
        VSL_stats->sess_closed++;
        sp->wrk = NULL;
        SES_Delete(sp);
        return (1);
    }

    /* Reset the workspace to the session-watermark */
    WS_Reset(sp->ws, sp->ws_ses);

    i = HTC_Reinit(sp->htc);
    if (i == 1) {
        VSL_stats->sess_pipeline++;
        sp->step = STP_START;
        return (0);
    }
    if (Tlen(sp->htc->rxbuf)) {
        VSL_stats->sess_readahead++;
        sp->step = STP_WAIT;
        return (0);
    }
    if (params->session_linger > 0) {
        VSL_stats->sess_linger++;
        sp->step = STP_WAIT;
        return (0);
    }
    VSL_stats->sess_herd++;
    SES_Charge(sp);
    sp->wrk = NULL;
    vca_return_session(sp);
    return (1);
}
static void
HTTP1_Session(struct worker *wrk, struct req *req)
{
	enum htc_status_e hs;
	struct sess *sp;
	const char *st;
	int i;

	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
	sp = req->sp;
	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);

	/*
	 * Whenever we come in from the acceptor or waiter, we need to set
	 * blocking mode.  It would be simpler to do this in the acceptor
	 * or waiter, but we'd rather do the syscall in the worker thread.
	 */
	if (http1_getstate(sp) == H1NEWREQ)
		VTCP_blocking(sp->fd);

	req->transport = &HTTP1_transport;

	while (1) {
		st = http1_getstate(sp);
		if (st == H1NEWREQ) {
			CHECK_OBJ_NOTNULL(req->transport, TRANSPORT_MAGIC);
			assert(isnan(req->t_prev));
			assert(isnan(req->t_req));
			AZ(req->vcl);
			AZ(req->esi_level);
			AN(req->htc->ws->r);

			hs = HTC_RxStuff(req->htc, HTTP1_Complete,
			    &req->t_first, &req->t_req,
			    sp->t_idle + cache_param->timeout_linger,
			    sp->t_idle + cache_param->timeout_idle,
			    NAN,
			    cache_param->http_req_size);
			AZ(req->htc->ws->r);
			if (hs < HTC_S_EMPTY) {
				req->acct.req_hdrbytes +=
				    req->htc->rxbuf_e - req->htc->rxbuf_b;
				Req_Cleanup(sp, wrk, req);
				Req_Release(req);
				switch (hs) {
				case HTC_S_CLOSE:
					SES_Delete(sp, SC_REM_CLOSE, NAN);
					return;
				case HTC_S_TIMEOUT:
					SES_Delete(sp, SC_RX_TIMEOUT, NAN);
					return;
				case HTC_S_OVERFLOW:
					SES_Delete(sp, SC_RX_OVERFLOW, NAN);
					return;
				case HTC_S_EOF:
					SES_Delete(sp, SC_REM_CLOSE, NAN);
					return;
				default:
					WRONG("htc_status (bad)");
				}
			}
			if (hs == HTC_S_IDLE) {
				wrk->stats->sess_herd++;
				Req_Cleanup(sp, wrk, req);
				Req_Release(req);
				SES_Wait(sp, &HTTP1_transport);
				return;
			}
			if (hs != HTC_S_COMPLETE)
				WRONG("htc_status (nonbad)");

			if (H2_prism_complete(req->htc) == HTC_S_COMPLETE) {
				if (!FEATURE(FEATURE_HTTP2)) {
					SES_Close(req->sp, SC_REQ_HTTP20);
					AZ(req->ws->r);
					AZ(wrk->aws->r);
					http1_setstate(sp, H1CLEANUP);
					continue;
				}
				http1_setstate(sp, NULL);
				H2_PU_Sess(wrk, sp, req);
				return;
			}

			i = http1_dissect(wrk, req);
			req->acct.req_hdrbytes +=
			    req->htc->rxbuf_e - req->htc->rxbuf_b;
			if (i) {
				assert(req->doclose > 0);
				SES_Close(req->sp, req->doclose);
				AZ(req->ws->r);
				AZ(wrk->aws->r);
				http1_setstate(sp, H1CLEANUP);
				continue;
			}
			if (http_HdrIs(req->http, H_Upgrade, "h2c")) {
				if (!FEATURE(FEATURE_HTTP2)) {
					VSLb(req->vsl, SLT_Debug,
					    "H2 upgrade attempt");
				} else if (req->htc->body_status != BS_NONE) {
					VSLb(req->vsl, SLT_Debug,
					    "H2 upgrade attempt has body");
				} else {
					http1_setstate(sp, NULL);
					req->err_code = 2;
					H2_OU_Sess(wrk, sp, req);
					return;
				}
			}
			req->req_step = R_STP_TRANSPORT;
			http1_setstate(sp, H1PROC);
		} else if (st == H1PROC) {
			req->task.func = http1_req;
			req->task.priv = req;
			wrk->stats->client_req++;
			CNT_Embark(wrk, req);
			if (req->req_step == R_STP_TRANSPORT)
				VCL_TaskEnter(req->vcl, req->privs);
			if (CNT_Request(req) == REQ_FSM_DISEMBARK)
				return;
			AZ(req->vcl0);
			req->task.func = NULL;
			req->task.priv = NULL;
			AZ(req->ws->r);
			AZ(wrk->aws->r);
			http1_setstate(sp, H1CLEANUP);
		} else if (st == H1CLEANUP) {

			AZ(wrk->aws->r);
			AZ(req->ws->r);

			if (sp->fd >= 0 && req->doclose != SC_NULL)
				SES_Close(sp, req->doclose);

			if (sp->fd < 0) {
				wrk->stats->sess_closed++;
				Req_Cleanup(sp, wrk, req);
				Req_Release(req);
				SES_Delete(sp, SC_NULL, NAN);
				return;
			}

			Req_Cleanup(sp, wrk, req);
			HTC_RxInit(req->htc, req->ws);
			if (req->htc->rxbuf_e != req->htc->rxbuf_b)
				wrk->stats->sess_readahead++;
			http1_setstate(sp, H1NEWREQ);
		} else {
			WRONG("Wrong H1 session state");
		}
	}
}
static enum req_fsm_nxt
http1_wait(struct sess *sp, struct worker *wrk, struct req *req)
{
	int j, tmo;
	struct pollfd pfd[1];
	double now, when;
	enum sess_close why = SC_NULL;
	enum http1_status_e hs;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	assert(req->sp == sp);

	AZ(req->vcl);
	AZ(req->esi_level);
	AZ(isnan(sp->t_idle));
	assert(isnan(req->t_first));
	assert(isnan(req->t_prev));
	assert(isnan(req->t_req));

	tmo = (int)(1e3 * cache_param->timeout_linger);
	while (1) {
		pfd[0].fd = sp->fd;
		pfd[0].events = POLLIN;
		pfd[0].revents = 0;
		j = poll(pfd, 1, tmo);
		assert(j >= 0);
		now = VTIM_real();
		if (j != 0)
			hs = HTTP1_Rx(req->htc);
		else
			hs = HTTP1_Complete(req->htc);
		if (hs == HTTP1_COMPLETE) {
			/* Got it, run with it */
			if (isnan(req->t_first))
				req->t_first = now;
			if (isnan(req->t_req))
				req->t_req = now;
			req->acct.req_hdrbytes += Tlen(req->htc->rxbuf);
			return (REQ_FSM_MORE);
		} else if (hs == HTTP1_ERROR_EOF) {
			why = SC_REM_CLOSE;
			break;
		} else if (hs == HTTP1_OVERFLOW) {
			why = SC_RX_OVERFLOW;
			break;
		} else if (hs == HTTP1_ALL_WHITESPACE) {
			/* Nothing but whitespace */
			when = sp->t_idle + cache_param->timeout_idle;
			if (when < now) {
				why = SC_RX_TIMEOUT;
				break;
			}
			when = sp->t_idle + cache_param->timeout_linger;
			tmo = (int)(1e3 * (when - now));
			if (when < now || tmo == 0) {
				wrk->stats->sess_herd++;
				SES_ReleaseReq(req);
				WAIT_Enter(sp);
				return (REQ_FSM_DONE);
			}
		} else {
			/* Working on it */
			if (isnan(req->t_first))
				/* Record first byte received time stamp */
				req->t_first = now;
			when = sp->t_idle + cache_param->timeout_req;
			tmo = (int)(1e3 * (when - now));
			if (when < now || tmo == 0) {
				why = SC_RX_TIMEOUT;
				break;
			}
		}
	}
	req->acct.req_hdrbytes += Tlen(req->htc->rxbuf);
	CNT_AcctLogCharge(wrk->stats, req);
	SES_ReleaseReq(req);
	assert(why != SC_NULL);
	SES_Delete(sp, why, now);
	return (REQ_FSM_DONE);
}