vfp_nop_bytes(struct worker *wrk, struct http_conn *htc, ssize_t bytes) { ssize_t l, wl; struct storage *st; AZ(wrk->busyobj->fetch_failed); while (bytes > 0) { st = FetchStorage(wrk, 0); if (st == NULL) return(-1); l = st->space - st->len; if (l > bytes) l = bytes; wl = HTC_Read(wrk, htc, st->ptr + st->len, l); if (wl <= 0) return (wl); st->len += wl; wrk->busyobj->fetch_obj->len += wl; bytes -= wl; if (wrk->busyobj->do_stream) RES_StreamPoll(wrk); } return (1); }
static enum req_fsm_nxt cnt_miss(struct worker *wrk, struct req *req) { CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); AN(req->vcl); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); VCL_miss_method(req->vcl, wrk, req, NULL, NULL); switch (wrk->handling) { case VCL_RET_FETCH: wrk->stats->cache_miss++; VBF_Fetch(wrk, req, req->objcore, req->stale_oc, VBF_NORMAL); if (req->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &req->stale_oc); req->req_step = R_STP_FETCH; return (REQ_FSM_MORE); case VCL_RET_SYNTH: req->req_step = R_STP_SYNTH; break; case VCL_RET_RESTART: req->req_step = R_STP_RESTART; break; case VCL_RET_PASS: req->req_step = R_STP_PASS; break; default: WRONG("Illegal return from vcl_miss{}"); } VRY_Clear(req); if (req->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &req->stale_oc); AZ(HSH_DerefObjCore(wrk, &req->objcore)); return (REQ_FSM_MORE); }
static void cmd_http_sendhex(CMD_ARGS) { struct http *hp; char buf[3], *q; uint8_t *p; int i, j, l; (void)cmd; (void)vl; CAST_OBJ_NOTNULL(hp, priv, HTTP_MAGIC); AN(av[1]); AZ(av[2]); l = strlen(av[1]) / 2; p = malloc(l); AN(p); q = av[1]; for (i = 0; i < l; i++) { while (vct_issp(*q)) q++; if (*q == '\0') break; memcpy(buf, q, 2); q += 2; buf[2] = '\0'; if (!vct_ishex(buf[0]) || !vct_ishex(buf[1])) vtc_log(hp->vl, 0, "Illegal Hex char \"%c%c\"", buf[0], buf[1]); p[i] = (uint8_t)strtoul(buf, NULL, 16); } vtc_hexdump(hp->vl, 4, "sendhex", (void*)p, i); j = write(hp->fd, p, i); assert(j == i); free(p); }
static const char * ban_error(struct ban_proto *bp, const char *fmt, ...) { va_list ap; CHECK_OBJ_NOTNULL(bp, BAN_PROTO_MAGIC); AN(bp->vsb); /* First error is sticky */ if (bp->err == NULL) { if (fmt == ban_build_err_no_mem) { bp->err = ban_build_err_no_mem; } else { /* Record the error message in the vsb */ VSB_clear(bp->vsb); va_start(ap, fmt); (void)VSB_vprintf(bp->vsb, fmt, ap); va_end(ap); AZ(VSB_finish(bp->vsb)); bp->err = VSB_data(bp->vsb); } } return (bp->err); }
static void vxp_expr_num(struct vxp *vxp, struct vex_rhs **prhs) { char *endptr; AN(prhs); AZ(*prhs); if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected number got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } AN(vxp->t->dec); ALLOC_OBJ(*prhs, VEX_RHS_MAGIC); AN(*prhs); if (strchr(vxp->t->dec, '.')) { (*prhs)->type = VEX_FLOAT; (*prhs)->val_float = VNUM(vxp->t->dec); if (isnan((*prhs)->val_float)) { VSB_printf(vxp->sb, "Floating point parse error "); vxp_ErrWhere(vxp, vxp->t, -1); return; } } else { (*prhs)->type = VEX_INT; (*prhs)->val_int = strtoll(vxp->t->dec, &endptr, 0); while (isspace(*endptr)) endptr++; if (*endptr != '\0') { VSB_printf(vxp->sb, "Integer parse error "); vxp_ErrWhere(vxp, vxp->t, -1); return; } } vxp_NextToken(vxp); }
int SES_Schedule(struct sess *sp) { struct sessmem *sm; struct sesspool *pp; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); AZ(sp->wrk); sm = sp->mem; CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC); pp = sm->pool; CHECK_OBJ_NOTNULL(pp, SESSPOOL_MAGIC); AN(pp->pool); if (Pool_Schedule(pp->pool, sp)) { SES_Delete(sp, "dropped"); return (1); } return (0); }
static void ses_vsl_socket(struct sess *sp, const char *lsockname) { struct sockaddr_storage ss; socklen_t sl; char laddr[ADDR_BUFSIZE]; char lport[PORT_BUFSIZE]; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); AN(lsockname); AN(sp->local_addr); sl = sizeof ss; AZ(getsockname(sp->fd, (void*)&ss, &sl)); AN(VSA_Build(sp->local_addr, &ss, sl)); assert(VSA_Sane(sp->local_addr)); VTCP_name(sp->remote_addr, sp->addr, sizeof sp->addr, sp->port, sizeof sp->port); VTCP_name(sp->local_addr, laddr, sizeof laddr, lport, sizeof lport); VSL(SLT_Begin, sp->vxid, "sess"); VSL(SLT_SessOpen, sp->vxid, "%s %s %s %s %s %.6f %d", sp->addr, sp->port, lsockname, laddr, lport, sp->t_open, sp->fd); }
static void cmd_http_expect_close(CMD_ARGS) { struct http *hp; struct pollfd fds[1]; char c; int i; (void)cmd; (void)vl; CAST_OBJ_NOTNULL(hp, priv, HTTP_MAGIC); AZ(av[1]); assert(hp->sfd >= 0); vtc_log(vl, 4, "Expecting close (fd = %d)", hp->fd); while (1) { fds[0].fd = hp->fd; fds[0].events = POLLIN | POLLHUP | POLLERR; fds[0].revents = 0; i = poll(fds, 1, 1000); if (i == 0) vtc_log(vl, 0, "Expected close: timeout"); if (i != 1 || !(fds[0].revents & POLLIN)) vtc_log(vl, 0, "Expected close: poll = %d, revents = 0x%x", i, fds[0].revents); i = read(hp->fd, &c, 1); if (i == 0) break; if (i == 1 && vct_islws(c)) continue; vtc_log(vl, 0, "Expecting close: read = %d, c = 0x%02x", i, c); } vtc_log(vl, 4, "fd=%d EOF, as expected", hp->fd); }
void VBP_Control(const struct backend *be, int enable) { struct vbp_target *vt; CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC); vt = be->probe; CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); vbp_reset(vt); vbp_update_backend(vt); Lck_Lock(&vbp_mtx); if (enable) { assert(vt->heap_idx == BINHEAP_NOIDX); vt->due = VTIM_real(); binheap_insert(vbp_heap, vt); AZ(pthread_cond_signal(&vbp_cond)); } else { assert(vt->heap_idx != BINHEAP_NOIDX); binheap_delete(vbp_heap, vt->heap_idx); } Lck_Unlock(&vbp_mtx); }
struct storage * FetchStorage(const struct sess *sp, ssize_t sz) { ssize_t l; struct storage *st; st = VTAILQ_LAST(&sp->obj->store, storagehead); if (st != NULL && st->len < st->space) return (st); l = fetchfrag; if (l == 0) l = sz; if (l == 0) l = params->fetch_chunksize * 1024LL; st = STV_alloc(sp, l); if (st == NULL) { errno = ENOMEM; return (NULL); } AZ(st->len); VTAILQ_INSERT_TAIL(&sp->obj->store, st, list); return (st); }
struct vsb * VEP_Finish(struct vep_state *vep) { ssize_t l, lcb; CHECK_OBJ_NOTNULL(vep, VEP_MAGIC); if (vep->include_src) VSB_destroy(&vep->include_src); if (vep->attr_vsb) VSB_destroy(&vep->attr_vsb); if (vep->state != VEP_START && vep->state != VEP_BOM && vep->state != VEP_TESTXML && vep->state != VEP_NOTXML && vep->state != VEP_NEXTTAG) { vep_error(vep, "VEP ended inside a tag"); } if (vep->o_pending) vep_mark_common(vep, vep->ver_p, vep->last_mark); if (vep->o_wait > 0) { lcb = vep->cb(vep->vc, vep->cb_priv, 0, VGZ_ALIGN); vep_emit_common(vep, lcb - vep->o_last, vep->last_mark); } // NB: We don't account for PAD+SUM+LEN in gzip'ed objects (void)vep->cb(vep->vc, vep->cb_priv, 0, VGZ_FINISH); AZ(VSB_finish(vep->vsb)); l = VSB_len(vep->vsb); if (vep->esi_found && l > 0) return (vep->vsb); VSB_destroy(&vep->vsb); return (NULL); }
static void vcc_expr4(struct vcc *tl, struct expr **e, enum var_type fmt) { struct expr *e1, *e2; const struct symbol *sym; double d; *e = NULL; if (tl->t->tok == '(') { SkipToken(tl, '('); vcc_expr0(tl, &e2, fmt); ERRCHK(tl); SkipToken(tl, ')'); *e = vcc_expr_edit(e2->fmt, "(\v1)", e2, NULL); return; } switch(tl->t->tok) { case ID: /* * XXX: what if var and func/proc had same name ? * XXX: look for SYM_VAR first for consistency ? */ sym = VCC_FindSymbol(tl, tl->t, SYM_NONE); if (sym == NULL || sym->eval == NULL) { VSB_printf(tl->sb, "Symbol not found: "); vcc_ErrToken(tl, tl->t); VSB_printf(tl->sb, " (expected type %s):\n", vcc_Type(fmt)); vcc_ErrWhere(tl, tl->t); return; } AN(sym); switch(sym->kind) { case SYM_VAR: case SYM_FUNC: case SYM_BACKEND: AN(sym->eval); AZ(*e); sym->eval(tl, e, sym); return; default: break; } VSB_printf(tl->sb, "Symbol type (%s) can not be used in expression.\n", VCC_SymKind(tl, sym)); vcc_ErrWhere(tl, tl->t); return; case CSTR: assert(fmt != VOID); e1 = vcc_new_expr(); EncToken(e1->vsb, tl->t); e1->fmt = STRING; e1->t1 = tl->t; e1->constant = 1; vcc_NextToken(tl); AZ(VSB_finish(e1->vsb)); *e = e1; break; case CNUM: /* * XXX: %g may not have enough decimals by default * XXX: but %a is ugly, isn't it ? */ assert(fmt != VOID); if (fmt == DURATION) { vcc_RTimeVal(tl, &d); ERRCHK(tl); e1 = vcc_mk_expr(DURATION, "%g", d); } else if (fmt == BYTES) { vcc_ByteVal(tl, &d); ERRCHK(tl); e1 = vcc_mk_expr(BYTES, "%.1f", d); ERRCHK(tl); } else if (fmt == REAL) { e1 = vcc_mk_expr(REAL, "%g", vcc_DoubleVal(tl)); ERRCHK(tl); } else { e1 = vcc_mk_expr(INT, "%.*s", PF(tl->t)); vcc_NextToken(tl); } e1->constant = 1; *e = e1; break; default: VSB_printf(tl->sb, "Unknown token "); vcc_ErrToken(tl, tl->t); VSB_printf(tl->sb, " when looking for %s\n\n", vcc_Type(fmt)); vcc_ErrWhere(tl, tl->t); break; } }
static void init_macro(void) { AZ(pthread_mutex_init(¯o_mtx, NULL)); }
pan_ic(const char *func, const char *file, int line, const char *cond, int err, enum vas_e kind) { const char *q; struct req *req; struct busyobj *bo; AZ(pthread_mutex_lock(&panicstr_mtx)); /* Won't be released, we're going to die anyway */ switch(kind) { case VAS_WRONG: VSB_printf(pan_vsp, "Wrong turn at %s:%d:\n%s\n", file, line, cond); break; case VAS_VCL: VSB_printf(pan_vsp, "Panic from VCL:\n %s\n", cond); break; case VAS_MISSING: VSB_printf(pan_vsp, "Missing errorhandling code in %s(), %s line %d:\n" " Condition(%s) not true.", func, file, line, cond); break; case VAS_INCOMPLETE: VSB_printf(pan_vsp, "Incomplete code in %s(), %s line %d:\n", func, file, line); break; default: case VAS_ASSERT: VSB_printf(pan_vsp, "Assert error in %s(), %s line %d:\n" " Condition(%s) not true.\n", func, file, line, cond); break; } if (err) VSB_printf(pan_vsp, "errno = %d (%s)\n", err, strerror(err)); q = THR_GetName(); if (q != NULL) VSB_printf(pan_vsp, "thread = (%s)\n", q); VSB_printf(pan_vsp, "ident = %s,%s\n", VSB_data(vident) + 1, WAIT_GetName()); pan_backtrace(); if (!FEATURE(FEATURE_SHORT_PANIC)) { req = THR_GetRequest(); if (req != NULL) { pan_req(req); VSL_Flush(req->vsl, 0); } bo = THR_GetBusyobj(); if (bo != NULL) { pan_busyobj(bo); VSL_Flush(bo->vsl, 0); } } VSB_printf(pan_vsp, "\n"); VSB_bcat(pan_vsp, "", 1); /* NUL termination */ if (FEATURE(FEATURE_NO_COREDUMP)) exit(4); else abort(); }
static void * mpl_guard(void *priv) { struct mempool *mpl; struct memitem *mi = NULL; double mpl_slp __state_variable__(mpl_slp); double last = 0; CAST_OBJ_NOTNULL(mpl, priv, MEMPOOL_MAGIC); mpl_slp = 0.15; // random while (1) { VTIM_sleep(mpl_slp); mpl_slp = 0.814; // random mpl->t_now = VTIM_real(); if (mi != NULL && (mpl->n_pool > mpl->param->max_pool || mi->size < *mpl->cur_size)) { FREE_OBJ(mi); mi = NULL; } if (mi == NULL && mpl->n_pool < mpl->param->min_pool) mi = mpl_alloc(mpl); if (mpl->n_pool < mpl->param->min_pool && mi != NULL) { /* can do */ } else if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { /* can do */ } else if (!VTAILQ_EMPTY(&mpl->surplus)) { /* can do */ } else if (last + .1 * mpl->param->max_age < mpl->t_now) { /* should do */ } else if (mpl->self_destruct) { /* can do */ } else { continue; /* nothing to do */ } mpl_slp = 0.314; // random if (Lck_Trylock(&mpl->mtx)) continue; if (mpl->self_destruct) { AZ(mpl->live); while (1) { if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); if (mi != NULL) { mpl->vsc->pool = --mpl->n_pool; VTAILQ_REMOVE(&mpl->list, mi, list); } } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) VTAILQ_REMOVE(&mpl->surplus, mi, list); } if (mi == NULL) break; FREE_OBJ(mi); mi = NULL; } VSM_Free(mpl->vsc); Lck_Unlock(&mpl->mtx); Lck_Delete(&mpl->mtx); FREE_OBJ(mpl); break; } if (mpl->n_pool < mpl->param->min_pool && mi != NULL && mi->size >= *mpl->cur_size) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = ++mpl->n_pool; mi->touched = mpl->t_now; VTAILQ_INSERT_HEAD(&mpl->list, mi, list); mi = NULL; mpl_slp = .01; // random } if (mpl->n_pool > mpl->param->max_pool && mi == NULL) { mi = VTAILQ_FIRST(&mpl->list); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); mpl->vsc->pool = --mpl->n_pool; mpl->vsc->surplus++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } if (mi == NULL) { mi = VTAILQ_FIRST(&mpl->surplus); if (mi != NULL) { CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); VTAILQ_REMOVE(&mpl->surplus, mi, list); mpl_slp = .01; // random } } if (mi == NULL && mpl->n_pool > mpl->param->min_pool) { mi = VTAILQ_LAST(&mpl->list, memhead_s); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); if (mi->touched + mpl->param->max_age < mpl->t_now) { mpl->vsc->pool = --mpl->n_pool; mpl->vsc->timeout++; VTAILQ_REMOVE(&mpl->list, mi, list); mpl_slp = .01; // random } else { mi = NULL; last = mpl->t_now; } } else if (mpl->n_pool <= mpl->param->min_pool) { last = mpl->t_now; } Lck_Unlock(&mpl->mtx); if (mi != NULL) { FREE_OBJ(mi); mi = NULL; } } return (NULL); }
static void ved_stripgzip(struct req *req, struct busyobj *bo) { ssize_t start, last, stop, lpad; ssize_t l; char *p; uint32_t icrc; uint32_t ilen; uint64_t olen; uint8_t *dbits; uint8_t *pp; uint8_t tailbuf[8]; enum objiter_status ois; void *oi; void *sp; ssize_t sl, ll, dl; struct ecx *ecx; struct req *preq; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); preq = ecx->preq; if (bo != NULL) VBO_waitstate(bo, BOS_FINISHED); AN(ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED)); /* * This is the interesting case: Deliver all the deflate * blocks, stripping the "LAST" bit of the last one and * padding it, as necessary, to a byte boundary. */ p = ObjGetattr(req->wrk, req->objcore, OA_GZIPBITS, &l); AN(p); assert(l == 32); start = vbe64dec(p); last = vbe64dec(p + 8); stop = vbe64dec(p + 16); olen = ObjGetLen(req->wrk, req->objcore); assert(start > 0 && start < olen * 8); assert(last > 0 && last < olen * 8); assert(stop > 0 && stop < olen * 8); assert(last >= start); assert(last < stop); /* The start bit must be byte aligned. */ AZ(start & 7); /* * XXX: optimize for the case where the 'last' * XXX: bit is in a empty copy block */ memset(tailbuf, 0xdd, sizeof tailbuf); dbits = WS_Alloc(req->ws, 8); AN(dbits); ll = 0; oi = ObjIterBegin(req->wrk, req->objcore); do { ois = ObjIter(req->objcore, oi, &sp, &sl); pp = sp; if (sl > 0) { /* Skip over the GZIP header */ dl = start / 8 - ll; if (dl > 0) { /* Before start, skip */ if (dl > sl) dl = sl; ll += dl; sl -= dl; pp += dl; } } if (sl > 0) { /* The main body of the object */ dl = last / 8 - ll; if (dl > 0) { if (dl > sl) dl = sl; if (ved_bytes(req, preq, VDP_NULL, pp, dl)) break; ll += dl; sl -= dl; pp += dl; } } if (sl > 0 && ll == last / 8) { /* Remove the "LAST" bit */ dbits[0] = *pp; dbits[0] &= ~(1U << (last & 7)); if (ved_bytes(req, preq, VDP_NULL, dbits, 1)) break; ll++; sl--; pp++; } if (sl > 0) { /* Last block */ dl = stop / 8 - ll; if (dl > 0) { if (dl > sl) dl = sl; if (ved_bytes(req, preq, VDP_NULL, pp, dl)) break; ll += dl; sl -= dl; pp += dl; } } if (sl > 0 && (stop & 7) && ll == stop / 8) { /* Add alignment to byte boundary */ dbits[1] = *pp; ll++; sl--; pp++; switch((int)(stop & 7)) { case 1: /* * x000.... * 00000000 00000000 11111111 11111111 */ case 3: /* * xxx000.. * 00000000 00000000 11111111 11111111 */ case 5: /* * xxxxx000 * 00000000 00000000 11111111 11111111 */ dbits[2] = 0x00; dbits[3] = 0x00; dbits[4] = 0xff; dbits[5] = 0xff; lpad = 5; break; case 2: /* xx010000 00000100 00000001 00000000 */ dbits[1] |= 0x08; dbits[2] = 0x20; dbits[3] = 0x80; dbits[4] = 0x00; lpad = 4; break; case 4: /* xxxx0100 00000001 00000000 */ dbits[1] |= 0x20; dbits[2] = 0x80; dbits[3] = 0x00; lpad = 3; break; case 6: /* xxxxxx01 00000000 */ dbits[1] |= 0x80; dbits[2] = 0x00; lpad = 2; break; case 7: /* * xxxxxxx0 * 00...... * 00000000 00000000 11111111 11111111 */ dbits[2] = 0x00; dbits[3] = 0x00; dbits[4] = 0x00; dbits[5] = 0xff; dbits[6] = 0xff; lpad = 6; break; case 0: /* xxxxxxxx */ default: WRONG("compiler must be broken"); } if (ved_bytes(req, preq, VDP_NULL, dbits + 1, lpad)) break; } if (sl > 0) { /* Recover GZIP tail */ dl = olen - ll; assert(dl >= 0); if (dl > sl) dl = sl; if (dl > 0) { assert(dl <= 8); l = ll - (olen - 8); assert(l >= 0); assert(l <= 8); assert(l + dl <= 8); memcpy(tailbuf + l, pp, dl); ll += dl; sl -= dl; pp += dl; } } } while (ois == OIS_DATA || ois == OIS_STREAM); ObjIterEnd(req->objcore, &oi); (void)ved_bytes(req, preq, VDP_FLUSH, NULL, 0); icrc = vle32dec(tailbuf); ilen = vle32dec(tailbuf + 4); ecx->crc = crc32_combine(ecx->crc, icrc, ilen); ecx->l_crc += ilen; }
void VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, struct objcore *oldoc, enum vbf_fetch_mode_e mode) { struct boc *boc; struct busyobj *bo; const char *how; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); AN(oc->flags & OC_F_BUSY); CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC); bo = VBO_GetBusyObj(wrk, req); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); boc = HSH_RefBoc(oc); CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); switch (mode) { case VBF_PASS: how = "pass"; bo->do_pass = 1; break; case VBF_NORMAL: how = "fetch"; break; case VBF_BACKGROUND: how = "bgfetch"; bo->is_bgfetch = 1; break; default: WRONG("Wrong fetch mode"); } VSLb(bo->vsl, SLT_Begin, "bereq %u %s", VXID(req->vsl->wid), how); VSLb(req->vsl, SLT_Link, "bereq %u %s", VXID(bo->vsl->wid), how); THR_SetBusyobj(bo); bo->sp = req->sp; SES_Ref(bo->sp); AN(bo->vcl); oc->boc->vary = req->vary_b; req->vary_b = NULL; HSH_Ref(oc); AZ(bo->fetch_objcore); bo->fetch_objcore = oc; AZ(bo->stale_oc); if (oldoc != NULL) { assert(oldoc->refcnt > 0); HSH_Ref(oldoc); bo->stale_oc = oldoc; } AZ(bo->req); bo->req = req; bo->fetch_task.priv = bo; bo->fetch_task.func = vbf_fetch_thread; if (Pool_Task(wrk->pool, &bo->fetch_task, TASK_QUEUE_BO)) { wrk->stats->fetch_no_thread++; (void)vbf_stp_fail(req->wrk, bo); if (bo->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); HSH_DerefBoc(wrk, oc); SES_Rel(bo->sp); VBO_ReleaseBusyObj(wrk, &bo); } else { bo = NULL; /* ref transferred to fetch thread */ if (mode == VBF_BACKGROUND) { ObjWaitState(oc, BOS_REQ_DONE); (void)VRB_Ignore(req); } else { ObjWaitState(oc, BOS_STREAM); if (oc->boc->state == BOS_FAILED) { AN((oc->flags & OC_F_FAILED)); } else { AZ(oc->flags & OC_F_BUSY); } } } AZ(bo); VSLb_ts_req(req, "Fetch", W_TIM_real(wrk)); assert(oc->boc == boc); HSH_DerefBoc(wrk, oc); if (mode == VBF_BACKGROUND) (void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY); THR_SetBusyobj(NULL); }
vbf_fetch_thread(struct worker *wrk, void *priv) { struct busyobj *bo; enum fetch_step stp; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC); THR_SetBusyobj(bo); stp = F_STP_MKBEREQ; assert(isnan(bo->t_first)); assert(isnan(bo->t_prev)); VSLb_ts_busyobj(bo, "Start", W_TIM_real(wrk)); bo->wrk = wrk; wrk->vsl = bo->vsl; #if 0 if (bo->stale_oc != NULL) { CHECK_OBJ_NOTNULL(bo->stale_oc, OBJCORE_MAGIC); /* We don't want the oc/stevedore ops in fetching thread */ if (!ObjCheckFlag(wrk, bo->stale_oc, OF_IMSCAND)) (void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); } #endif while (stp != F_STP_DONE) { CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); assert(bo->fetch_objcore->boc->refcount >= 1); switch (stp) { #define FETCH_STEP(l, U, arg) \ case F_STP_##U: \ stp = vbf_stp_##l arg; \ break; #include "tbl/steps.h" default: WRONG("Illegal fetch_step"); } } assert(bo->director_state == DIR_S_NULL); http_Teardown(bo->bereq); http_Teardown(bo->beresp); if (bo->fetch_objcore->boc->state == BOS_FINISHED) { AZ(bo->fetch_objcore->flags & OC_F_FAILED); VSLb(bo->vsl, SLT_Length, "%ju", (uintmax_t)ObjGetLen(bo->wrk, bo->fetch_objcore)); } // AZ(bo->fetch_objcore->boc); // XXX if (bo->stale_oc != NULL) (void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); wrk->vsl = NULL; HSH_DerefBoc(wrk, bo->fetch_objcore); SES_Rel(bo->sp); VBO_ReleaseBusyObj(wrk, &bo); THR_SetBusyobj(NULL); }
static enum fetch_step vbf_stp_fetch(struct worker *wrk, struct busyobj *bo) { const char *p; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC); assert(wrk->handling == VCL_RET_DELIVER); if (vbf_figure_out_vfp(bo)) { (bo)->htc->doclose = SC_OVERLOAD; VDI_Finish((bo)->wrk, bo); return (F_STP_ERROR); } if (bo->fetch_objcore->flags & OC_F_PRIVATE) AN(bo->uncacheable); bo->fetch_objcore->boc->len_so_far = 0; if (VFP_Open(bo->vfc)) { (void)VFP_Error(bo->vfc, "Fetch pipeline failed to open"); bo->htc->doclose = SC_RX_BODY; VDI_Finish(bo->wrk, bo); return (F_STP_ERROR); } if (vbf_beresp2obj(bo)) { (void)VFP_Error(bo->vfc, "Could not get storage"); bo->htc->doclose = SC_RX_BODY; VFP_Close(bo->vfc); VDI_Finish(bo->wrk, bo); return (F_STP_ERROR); } if (bo->do_esi) ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_ESIPROC, 1); if (bo->do_gzip || (bo->is_gzip && !bo->do_gunzip)) ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_GZIPED, 1); if (bo->do_gzip || bo->do_gunzip) ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_CHGGZIP, 1); if (!(bo->fetch_objcore->flags & OC_F_PASS) && http_IsStatus(bo->beresp, 200) && ( http_GetHdr(bo->beresp, H_Last_Modified, &p) || http_GetHdr(bo->beresp, H_ETag, &p))) ObjSetFlag(bo->wrk, bo->fetch_objcore, OF_IMSCAND, 1); if (bo->htc->body_status != BS_NONE && VDI_GetBody(bo->wrk, bo) != 0) { (void)VFP_Error(bo->vfc, "GetBody failed - workspace_backend overflow?"); VFP_Close(bo->vfc); bo->htc->doclose = SC_OVERLOAD; VDI_Finish(bo->wrk, bo); return (F_STP_ERROR); } assert(bo->fetch_objcore->boc->refcount >= 1); assert(bo->fetch_objcore->boc->state == BOS_REQ_DONE); if (bo->do_stream) { ObjSetState(wrk, bo->fetch_objcore, BOS_PREP_STREAM); HSH_Unbusy(wrk, bo->fetch_objcore); ObjSetState(wrk, bo->fetch_objcore, BOS_STREAM); } VSLb(bo->vsl, SLT_Fetch_Body, "%u %s %s", bo->htc->body_status, body_status_2str(bo->htc->body_status), bo->do_stream ? "stream" : "-"); if (bo->htc->body_status != BS_NONE) { assert(bo->htc->body_status != BS_ERROR); return (F_STP_FETCHBODY); } AZ(bo->vfc->failed); return (F_STP_FETCHEND); }
static enum fetch_step vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo) { int i; double now; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); AZ(bo->storage); bo->storage = bo->do_pass ? stv_transient : STV_next(); if (bo->retries > 0) http_Unset(bo->bereq, "\012X-Varnish:"); http_PrintfHeader(bo->bereq, "X-Varnish: %u", VXID(bo->vsl->wid)); VCL_backend_fetch_method(bo->vcl, wrk, NULL, bo, NULL); bo->uncacheable = bo->do_pass; if (wrk->handling == VCL_RET_ABANDON || wrk->handling == VCL_RET_FAIL) return (F_STP_FAIL); assert (wrk->handling == VCL_RET_FETCH); HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE); AZ(bo->htc); VFP_Setup(bo->vfc, wrk); bo->vfc->oc = bo->fetch_objcore; bo->vfc->resp = bo->beresp; bo->vfc->req = bo->bereq; i = VDI_GetHdr(wrk, bo); now = W_TIM_real(wrk); VSLb_ts_busyobj(bo, "Beresp", now); if (i) { assert(bo->director_state == DIR_S_NULL); return (F_STP_ERROR); } http_VSL_log(bo->beresp); if (bo->htc->body_status == BS_ERROR) { bo->htc->doclose = SC_RX_BODY; VDI_Finish(bo->wrk, bo); VSLb(bo->vsl, SLT_Error, "Body cannot be fetched"); assert(bo->director_state == DIR_S_NULL); return (F_STP_ERROR); } if (!http_GetHdr(bo->beresp, H_Date, NULL)) { /* * RFC 2616 14.18 Date: The Date general-header field * represents the date and time at which the message was * originated, having the same semantics as orig-date in * RFC 822. ... A received message that does not have a * Date header field MUST be assigned one by the recipient * if the message will be cached by that recipient or * gatewayed via a protocol which requires a Date. * * If we didn't get a Date header, we assign one here. */ http_TimeHeader(bo->beresp, "Date: ", now); } /* * These two headers can be spread over multiple actual headers * and we rely on their content outside of VCL, so collect them * into one line here. */ http_CollectHdr(bo->beresp, H_Cache_Control); http_CollectHdr(bo->beresp, H_Vary); if (bo->fetch_objcore->flags & OC_F_PRIVATE) { /* private objects have negative TTL */ bo->fetch_objcore->t_origin = now; bo->fetch_objcore->ttl = -1.; bo->fetch_objcore->grace = 0; bo->fetch_objcore->keep = 0; } else { /* What does RFC2616 think about TTL ? */ RFC2616_Ttl(bo, now, &bo->fetch_objcore->t_origin, &bo->fetch_objcore->ttl, &bo->fetch_objcore->grace, &bo->fetch_objcore->keep ); } AZ(bo->do_esi); AZ(bo->was_304); if (http_IsStatus(bo->beresp, 304)) { if (bo->stale_oc != NULL && ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND)) { if (ObjCheckFlag(bo->wrk, bo->stale_oc, OF_CHGGZIP)) { /* * If we changed the gzip status of the object * the stored Content_Encoding controls we * must weaken any new ETag we get. */ http_Unset(bo->beresp, H_Content_Encoding); RFC2616_Weaken_Etag(bo->beresp); } http_Unset(bo->beresp, H_Content_Length); HTTP_Merge(bo->wrk, bo->stale_oc, bo->beresp); assert(http_IsStatus(bo->beresp, 200)); bo->was_304 = 1; } else if (!bo->do_pass) { /* * Backend sent unallowed 304 */ VSLb(bo->vsl, SLT_Error, "304 response but not conditional fetch"); bo->htc->doclose = SC_RX_BAD; VDI_Finish(bo->wrk, bo); return (F_STP_ERROR); } } VCL_backend_response_method(bo->vcl, wrk, NULL, bo, NULL); if (wrk->handling == VCL_RET_ABANDON || wrk->handling == VCL_RET_FAIL) { bo->htc->doclose = SC_RESP_CLOSE; VDI_Finish(bo->wrk, bo); return (F_STP_FAIL); } if (wrk->handling == VCL_RET_RETRY) { if (bo->htc->body_status != BS_NONE) bo->htc->doclose = SC_RESP_CLOSE; if (bo->director_state != DIR_S_NULL) VDI_Finish(bo->wrk, bo); if (bo->retries++ < cache_param->max_retries) return (F_STP_RETRY); VSLb(bo->vsl, SLT_VCL_Error, "Too many retries, delivering 503"); assert(bo->director_state == DIR_S_NULL); return (F_STP_ERROR); } assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE); if (bo->fetch_objcore->boc->state != BOS_REQ_DONE) { bo->req = NULL; ObjSetState(wrk, bo->fetch_objcore, BOS_REQ_DONE); } if (bo->do_esi) bo->do_stream = 0; if (wrk->handling == VCL_RET_PASS) { bo->fetch_objcore->flags |= OC_F_HFP; bo->uncacheable = 1; wrk->handling = VCL_RET_DELIVER; } if (bo->do_pass || bo->uncacheable) bo->fetch_objcore->flags |= OC_F_PASS; assert(wrk->handling == VCL_RET_DELIVER); return (bo->was_304 ? F_STP_CONDFETCH : F_STP_FETCH); }
static enum fetch_step vbf_stp_fetchbody(struct worker *wrk, struct busyobj *bo) { ssize_t l; uint8_t *ptr; enum vfp_status vfps = VFP_ERROR; ssize_t est; struct vfp_ctx *vfc; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); vfc = bo->vfc; CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC); AN(vfc->vfp_nxt); est = bo->htc->content_length; if (est < 0) est = 0; do { if (vfc->oc->flags & OC_F_ABANDON) { /* * A pass object and delivery was terminated * We don't fail the fetch, in order for hit-for-pass * objects to be created. */ AN(vfc->oc->flags & OC_F_PASS); VSLb(wrk->vsl, SLT_Debug, "Fetch: Pass delivery abandoned"); bo->htc->doclose = SC_RX_BODY; break; } AZ(vfc->failed); l = est; assert(l >= 0); if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK) { bo->htc->doclose = SC_RX_BODY; break; } AZ(vfc->failed); vfps = VFP_Suck(vfc, ptr, &l); if (l > 0 && vfps != VFP_ERROR) { bo->acct.beresp_bodybytes += l; VFP_Extend(vfc, l); if (est >= l) est -= l; else est = 0; } } while (vfps == VFP_OK); if (vfc->failed) { (void)VFP_Error(vfc, "Fetch pipeline failed to process"); bo->htc->doclose = SC_RX_BODY; VFP_Close(vfc); VDI_Finish(wrk, bo); if (!bo->do_stream) { assert(bo->fetch_objcore->boc->state < BOS_STREAM); // XXX: doclose = ? return (F_STP_ERROR); } else { wrk->stats->fetch_failed++; return (F_STP_FAIL); } } ObjTrimStore(wrk, vfc->oc); return (F_STP_FETCHEND); }
static void vxp_expr_lhs(struct vxp *vxp, struct vex_lhs **plhs) { char *p; int i; AN(plhs); AZ(*plhs); ALLOC_OBJ(*plhs, VEX_LHS_MAGIC); AN(*plhs); (*plhs)->tags = vbit_init(SLT__MAX); (*plhs)->level = -1; if (vxp->t->tok == '{') { /* Transaction level limits */ vxp_NextToken(vxp); if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected integer got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } (*plhs)->level = (int)strtol(vxp->t->dec, &p, 0); if ((*plhs)->level < 0) { VSB_printf(vxp->sb, "Expected positive integer "); vxp_ErrWhere(vxp, vxp->t, -1); return; } if (*p == '-') { (*plhs)->level_pm = -1; p++; } else if (*p == '+') { (*plhs)->level_pm = 1; p++; } if (*p) { VSB_printf(vxp->sb, "Syntax error in level limit "); vxp_ErrWhere(vxp, vxp->t, -1); return; } vxp_NextToken(vxp); ExpectErr(vxp, '}'); vxp_NextToken(vxp); } while (1) { /* The tags this expression applies to */ if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected VSL tag name got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } i = VSL_Glob2Tags(vxp->t->dec, -1, vsl_vbm_bitset, (*plhs)->tags); if (i == -1) { VSB_printf(vxp->sb, "Tag name matches zero tags "); vxp_ErrWhere(vxp, vxp->t, -1); return; } if (i == -2) { VSB_printf(vxp->sb, "Tag name is ambiguous "); vxp_ErrWhere(vxp, vxp->t, -1); return; } if (i == -3) { VSB_printf(vxp->sb, "Syntax error in tag name "); vxp_ErrWhere(vxp, vxp->t, -1); return; } assert(i > 0); vxp_NextToken(vxp); if (vxp->t->tok != ',') break; vxp_NextToken(vxp); } if (vxp->t->tok == ':') { /* Record prefix */ vxp_NextToken(vxp); if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected string got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } AN(vxp->t->dec); (*plhs)->prefix = strdup(vxp->t->dec); AN((*plhs)->prefix); (*plhs)->prefixlen = strlen((*plhs)->prefix); vxp_NextToken(vxp); } if (vxp->t->tok == '[') { /* LHS field [] */ vxp_NextToken(vxp); if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected integer got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } (*plhs)->field = (int)strtol(vxp->t->dec, &p, 0); if (*p || (*plhs)->field <= 0) { VSB_printf(vxp->sb, "Expected positive integer "); vxp_ErrWhere(vxp, vxp->t, -1); return; } vxp_NextToken(vxp); ExpectErr(vxp, ']'); vxp_NextToken(vxp); } }
static enum fetch_step vbf_stp_error(struct worker *wrk, struct busyobj *bo) { ssize_t l, ll, o; double now; uint8_t *ptr; struct vsb *synth_body; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(bo->fetch_objcore, OBJCORE_MAGIC); AN(bo->fetch_objcore->flags & OC_F_BUSY); assert(bo->director_state == DIR_S_NULL); wrk->stats->fetch_failed++; now = W_TIM_real(wrk); VSLb_ts_busyobj(bo, "Error", now); if (bo->fetch_objcore->stobj->stevedore != NULL) ObjFreeObj(bo->wrk, bo->fetch_objcore); if (bo->storage == NULL) bo->storage = STV_next(); // XXX: reset all beresp flags ? HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); http_PutResponse(bo->beresp, "HTTP/1.1", 503, "Backend fetch failed"); http_TimeHeader(bo->beresp, "Date: ", now); http_SetHeader(bo->beresp, "Server: Varnish"); bo->fetch_objcore->t_origin = now; if (!VTAILQ_EMPTY(&bo->fetch_objcore->objhead->waitinglist)) { /* * If there is a waitinglist, it means that there is no * grace-able object, so cache the error return for a * short time, so the waiting list can drain, rather than * each objcore on the waiting list sequentially attempt * to fetch from the backend. */ bo->fetch_objcore->ttl = 1; bo->fetch_objcore->grace = 5; bo->fetch_objcore->keep = 5; } else { bo->fetch_objcore->ttl = 0; bo->fetch_objcore->grace = 0; bo->fetch_objcore->keep = 0; } synth_body = VSB_new_auto(); AN(synth_body); VCL_backend_error_method(bo->vcl, wrk, NULL, bo, synth_body); AZ(VSB_finish(synth_body)); if (wrk->handling == VCL_RET_ABANDON || wrk->handling == VCL_RET_FAIL) { VSB_destroy(&synth_body); return (F_STP_FAIL); } if (wrk->handling == VCL_RET_RETRY) { VSB_destroy(&synth_body); if (bo->retries++ < cache_param->max_retries) return (F_STP_RETRY); VSLb(bo->vsl, SLT_VCL_Error, "Too many retries, failing"); return (F_STP_FAIL); } assert(wrk->handling == VCL_RET_DELIVER); assert(bo->vfc->wrk == bo->wrk); assert(bo->vfc->oc == bo->fetch_objcore); assert(bo->vfc->resp == bo->beresp); assert(bo->vfc->req == bo->bereq); if (vbf_beresp2obj(bo)) { (void)VFP_Error(bo->vfc, "Could not get storage"); VSB_destroy(&synth_body); return (F_STP_FAIL); } ll = VSB_len(synth_body); o = 0; while (ll > 0) { l = ll; if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK) break; if (l > ll) l = ll; memcpy(ptr, VSB_data(synth_body) + o, l); VFP_Extend(bo->vfc, l); ll -= l; o += l; } AZ(ObjSetU64(wrk, bo->fetch_objcore, OA_LEN, o)); VSB_destroy(&synth_body); HSH_Unbusy(wrk, bo->fetch_objcore); ObjSetState(wrk, bo->fetch_objcore, BOS_FINISHED); return (F_STP_DONE); }
static void vca_ports_init(void) { AZ(pthread_create(&vca_ports_thread, NULL, vca_main, NULL)); }
static int vbf_beresp2obj(struct busyobj *bo) { unsigned l, l2; const char *b; uint8_t *bp; struct vsb *vary = NULL; int varyl = 0; l = 0; /* Create Vary instructions */ if (!(bo->fetch_objcore->flags & OC_F_PRIVATE)) { varyl = VRY_Create(bo, &vary); if (varyl > 0) { AN(vary); assert(varyl == VSB_len(vary)); l += PRNDUP((intptr_t)varyl); } else if (varyl < 0) { /* * Vary parse error * Complain about it, and make this a pass. */ VSLb(bo->vsl, SLT_Error, "Illegal 'Vary' header from backend, " "making this a pass."); bo->uncacheable = 1; AZ(vary); } else /* No vary */ AZ(vary); } l2 = http_EstimateWS(bo->beresp, bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); l += l2; if (bo->uncacheable) bo->fetch_objcore->flags |= OC_F_PASS; if (!vbf_allocobj(bo, l)) { if (vary != NULL) VSB_destroy(&vary); AZ(vary); return (-1); } if (vary != NULL) { AN(ObjSetAttr(bo->wrk, bo->fetch_objcore, OA_VARY, varyl, VSB_data(vary))); VSB_destroy(&vary); } AZ(ObjSetU32(bo->wrk, bo->fetch_objcore, OA_VXID, VXID(bo->vsl->wid))); /* for HTTP_Encode() VSLH call */ bo->beresp->logtag = SLT_ObjMethod; /* Filter into object */ bp = ObjSetAttr(bo->wrk, bo->fetch_objcore, OA_HEADERS, l2, NULL); AN(bp); HTTP_Encode(bo->beresp, bp, l2, bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); if (http_GetHdr(bo->beresp, H_Last_Modified, &b)) AZ(ObjSetDouble(bo->wrk, bo->fetch_objcore, OA_LASTMODIFIED, VTIM_parse(b))); else AZ(ObjSetDouble(bo->wrk, bo->fetch_objcore, OA_LASTMODIFIED, floor(bo->fetch_objcore->t_origin))); return (0); }
void child_main(void) { setbuf(stdout, NULL); setbuf(stderr, NULL); printf("Child starts\n"); cache_param = heritage.param; AZ(pthread_key_create(&req_key, NULL)); AZ(pthread_key_create(&bo_key, NULL)); AZ(pthread_key_create(&name_key, NULL)); THR_SetName("cache-main"); VSM_Init(); /* First, LCK needs it. */ LCK_Init(); /* Second, locking */ Lck_New(&vxid_lock, lck_vxid); CLI_Init(); PAN_Init(); VFP_Init(); VCL_Init(); HTTP_Init(); VBO_Init(); VBP_Init(); VBE_InitCfg(); Pool_Init(); V1P_Init(); EXP_Init(); HSH_Init(heritage.hash); BAN_Init(); VCA_Init(); SMP_Init(); STV_open(); VMOD_Init(); BAN_Compile(); VRND_Seed(); srand48(random()); CLI_AddFuncs(debug_cmds); /* Wait for persistent storage to load if asked to */ if (FEATURE(FEATURE_WAIT_SILO)) SMP_Ready(); CLI_Run(); BAN_Shutdown(); STV_close(); printf("Child dies\n"); }
VDP_ESI(struct req *req, enum vdp_action act, void **priv, const void *ptr, ssize_t len) { uint8_t *q, *r; ssize_t l = 0; uint32_t icrc = 0; uint8_t tailbuf[8 + 5]; const uint8_t *pp; struct ecx *ecx, *pecx; int retval = 0; if (act == VDP_INIT) { AZ(*priv); ALLOC_OBJ(ecx, ECX_MAGIC); AN(ecx); ecx->preq = req; *priv = ecx; return (0); } CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); if (act == VDP_FINI) { FREE_OBJ(ecx); *priv = NULL; return (0); } pp = ptr; while (1) { switch (ecx->state) { case 0: ecx->p = ObjGetattr(req->wrk, req->objcore, OA_ESIDATA, &l); AN(ecx->p); assert(l > 0); ecx->e = ecx->p + l; if (*ecx->p == VEC_GZ) { ecx->isgzip = 1; ecx->p++; } if (req->esi_level == 0) { /* * Only the top level document gets to * decide this. */ if (ecx->isgzip) { assert(sizeof gzip_hdr == 10); /* Send out the gzip header */ retval = VDP_bytes(req, VDP_NULL, gzip_hdr, 10); ecx->l_crc = 0; ecx->crc = crc32(0L, Z_NULL, 0); } } ecx->state = 1; break; case 1: if (ecx->p >= ecx->e) { ecx->state = 2; break; } switch (*ecx->p) { case VEC_V1: case VEC_V2: case VEC_V8: ecx->l = ved_decode_len(req, &ecx->p); if (ecx->l < 0) return (-1); if (ecx->isgzip) { assert(*ecx->p == VEC_C1 || *ecx->p == VEC_C2 || *ecx->p == VEC_C8); l = ved_decode_len(req, &ecx->p); if (l < 0) return (-1); icrc = vbe32dec(ecx->p); ecx->p += 4; if (ecx->isgzip) { ecx->crc = crc32_combine( ecx->crc, icrc, l); ecx->l_crc += l; } } ecx->state = 3; break; case VEC_S1: case VEC_S2: case VEC_S8: ecx->l = ved_decode_len(req, &ecx->p); if (ecx->l < 0) return (-1); Debug("SKIP1(%d)\n", (int)ecx->l); ecx->state = 4; break; case VEC_INCL: ecx->p++; q = (void*)strchr((const char*)ecx->p, '\0'); AN(q); q++; r = (void*)strchr((const char*)q, '\0'); AN(r); if (VDP_bytes(req, VDP_FLUSH, NULL, 0)) { ecx->p = ecx->e; break; } Debug("INCL [%s][%s] BEGIN\n", q, ecx->p); ved_include(req, (const char*)q, (const char*)ecx->p, ecx); Debug("INCL [%s][%s] END\n", q, ecx->p); ecx->p = r + 1; break; default: VSLb(req->vsl, SLT_Error, "ESI corruption line %d 0x%02x [%s]\n", __LINE__, *ecx->p, ecx->p); WRONG("ESI-codes: Illegal code"); } break; case 2: if (ecx->isgzip && req->esi_level == 0) { /* * We are bytealigned here, so simply emit * a gzip literal block with finish bit set. */ tailbuf[0] = 0x01; tailbuf[1] = 0x00; tailbuf[2] = 0x00; tailbuf[3] = 0xff; tailbuf[4] = 0xff; /* Emit CRC32 */ vle32enc(tailbuf + 5, ecx->crc); /* MOD(2^32) length */ vle32enc(tailbuf + 9, ecx->l_crc); (void)VDP_bytes(req, VDP_NULL, tailbuf, 13); } if (req->transport->deliver == VED_Deliver) { CAST_OBJ_NOTNULL(pecx, req->transport_priv, ECX_MAGIC); pecx->crc = crc32_combine(pecx->crc, ecx->crc, ecx->l_crc); pecx->l_crc += ecx->l_crc; } retval = VDP_bytes(req, VDP_FLUSH, NULL, 0); ecx->state = 99; return (retval); case 3: case 4: /* * There is no guarantee that the 'l' bytes are all * in the same storage segment, so loop over storage * until we have processed them all. */ if (ecx->l <= len) { if (ecx->state == 3) retval = VDP_bytes(req, act, pp, ecx->l); len -= ecx->l; pp += ecx->l; ecx->state = 1; break; } if (ecx->state == 3 && len > 0) retval = VDP_bytes(req, act, pp, len); ecx->l -= len; return (retval); case 99: /* * VEP does not account for the PAD+CRC+LEN * so we can see up to approx 15 bytes here. */ return (retval); default: WRONG("FOO"); break; } if (retval) return (retval); } }
void THR_SetBusyobj(const struct busyobj *bo) { AZ(pthread_setspecific(bo_key, bo)); }
static void ved_include(struct req *preq, const char *src, const char *host, struct ecx *ecx) { struct worker *wrk; struct req *req; enum req_fsm_nxt s; struct transport xp; CHECK_OBJ_NOTNULL(preq, REQ_MAGIC); CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC); wrk = preq->wrk; if (preq->esi_level >= cache_param->max_esi_depth) return; req = Req_New(wrk, preq->sp); req->req_body_status = REQ_BODY_NONE; AZ(req->vsl->wid); req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER); VSLb(req->vsl, SLT_Begin, "req %u esi", VXID(preq->vsl->wid)); VSLb(preq->vsl, SLT_Link, "req %u esi", VXID(req->vsl->wid)); req->esi_level = preq->esi_level + 1; if (preq->esi_level == 0) assert(preq->top == preq); else CHECK_OBJ_NOTNULL(preq->top, REQ_MAGIC); req->top = preq->top; HTTP_Copy(req->http0, preq->http0); req->http0->ws = req->ws; req->http0->vsl = req->vsl; req->http0->logtag = SLT_ReqMethod; req->http0->conds = 0; http_SetH(req->http0, HTTP_HDR_URL, src); if (host != NULL && *host != '\0') { http_Unset(req->http0, H_Host); http_SetHeader(req->http0, host); } http_ForceField(req->http0, HTTP_HDR_METHOD, "GET"); http_ForceField(req->http0, HTTP_HDR_PROTO, "HTTP/1.1"); /* Don't allow conditionalss, we can't use a 304 */ http_Unset(req->http0, H_If_Modified_Since); http_Unset(req->http0, H_If_None_Match); /* Don't allow Range */ http_Unset(req->http0, H_Range); /* Set Accept-Encoding according to what we want */ http_Unset(req->http0, H_Accept_Encoding); if (ecx->isgzip) http_ForceHeader(req->http0, H_Accept_Encoding, "gzip"); /* Client content already taken care of */ http_Unset(req->http0, H_Content_Length); /* Reset request to status before we started messing with it */ HTTP_Copy(req->http, req->http0); req->vcl = preq->vcl; preq->vcl = NULL; req->wrk = preq->wrk; /* * XXX: We should decide if we should cache the director * XXX: or not (for session/backend coupling). Until then * XXX: make sure we don't trip up the check in vcl_recv. */ req->req_step = R_STP_RECV; req->t_req = preq->t_req; assert(isnan(req->t_first)); assert(isnan(req->t_prev)); INIT_OBJ(&xp, TRANSPORT_MAGIC); xp.deliver = VED_Deliver; req->transport = &xp; req->transport_priv = ecx; THR_SetRequest(req); VSLb_ts_req(req, "Start", W_TIM_real(wrk)); req->ws_req = WS_Snapshot(req->ws); while (1) { req->wrk = wrk; s = CNT_Request(wrk, req); if (s == REQ_FSM_DONE) break; DSL(DBG_WAITINGLIST, req->vsl->wid, "loop waiting for ESI (%d)", (int)s); assert(s == REQ_FSM_DISEMBARK); AZ(req->wrk); (void)usleep(10000); } VRTPRIV_dynamic_kill(req->sp->privs, (uintptr_t)req); CNT_AcctLogCharge(wrk->stats, req); VSL_End(req->vsl); preq->vcl = req->vcl; req->vcl = NULL; req->wrk = NULL; THR_SetRequest(preq); Req_Release(req); }
void THR_SetRequest(const struct req *req) { AZ(pthread_setspecific(req_key, req)); }