static enum fetch_step vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo) { CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); AZ(vbf_beresp2obj(bo)); if (ObjHasAttr(bo->wrk, bo->stale_oc, OA_ESIDATA)) AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_ESIDATA)); AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_FLAGS)); AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_GZIPBITS)); if (bo->do_stream) { ObjSetState(wrk, bo->fetch_objcore, BOS_PREP_STREAM); HSH_Unbusy(wrk, bo->fetch_objcore); ObjSetState(wrk, bo->fetch_objcore, BOS_STREAM); } if (ObjIterate(wrk, bo->stale_oc, bo, vbf_objiterator, 0)) (void)VFP_Error(bo->vfc, "Template object failed"); if (bo->stale_oc->flags & OC_F_FAILED) (void)VFP_Error(bo->vfc, "Template object failed"); if (bo->vfc->failed) { VDI_Finish(bo->wrk, bo); wrk->stats->fetch_failed++; return (F_STP_FAIL); } return (F_STP_FETCHEND); }
static enum fetch_step vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo) { CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); AZ(vbf_beresp2obj(bo)); if (ObjHasAttr(bo->wrk, bo->stale_oc, OA_ESIDATA)) AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_ESIDATA)); AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_FLAGS)); AZ(ObjCopyAttr(bo->wrk, bo->fetch_objcore, bo->stale_oc, OA_GZIPBITS)); if (bo->do_stream) { ObjSetState(wrk, bo->fetch_objcore, BOS_PREP_STREAM); HSH_Unbusy(wrk, bo->fetch_objcore); ObjSetState(wrk, bo->fetch_objcore, BOS_STREAM); } if (ObjIterate(wrk, bo->stale_oc, bo, vbf_objiterator, 0)) (void)VFP_Error(bo->vfc, "Template object failed"); if (bo->stale_oc->flags & OC_F_FAILED) (void)VFP_Error(bo->vfc, "Template object failed"); if (bo->vfc->failed) { VDI_Finish(bo->wrk, bo); return (F_STP_FAIL); } AZ(ObjSetU64(wrk, bo->fetch_objcore, OA_LEN, bo->fetch_objcore->boc->len_so_far)); if (!bo->do_stream) HSH_Unbusy(wrk, bo->fetch_objcore); HSH_Kill(bo->stale_oc); /* Recycle the backend connection before setting BOS_FINISHED to give predictable backend reuse behavior for varnishtest */ VDI_Finish(bo->wrk, bo); ObjSetState(wrk, bo->fetch_objcore, BOS_FINISHED); VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk)); return (F_STP_DONE); }
ssize_t VRB_Iterate(struct req *req, objiterate_f *func, void *priv) { int i; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); AN(func); switch(req->req_body_status) { case REQ_BODY_CACHED: if (req->req_bodybytes > 0 && ObjIterate(req->wrk, req->body_oc, priv, func, 0)) return (-1); return (0); case REQ_BODY_NONE: return (0); case REQ_BODY_WITH_LEN: case REQ_BODY_WITHOUT_LEN: break; case REQ_BODY_TAKEN: VSLb(req->vsl, SLT_VCL_Error, "Uncached req.body can only be consumed once."); return (-1); case REQ_BODY_FAIL: VSLb(req->vsl, SLT_FetchError, "Had failed reading req.body before."); return (-1); default: WRONG("Wrong req_body_status in VRB_Iterate()"); } Lck_Lock(&req->sp->mtx); if (req->req_body_status == REQ_BODY_WITH_LEN || req->req_body_status == REQ_BODY_WITHOUT_LEN) { req->req_body_status = REQ_BODY_TAKEN; i = 0; } else i = -1; Lck_Unlock(&req->sp->mtx); if (i) { VSLb(req->vsl, SLT_VCL_Error, "Multiple attempts to access non-cached req.body"); return (i); } return (vrb_pull(req, -1, func, priv)); }
static void ved_stripgzip(struct req *req, const struct boc *boc) { ssize_t l; char *p; uint32_t icrc; uint32_t ilen; uint8_t *dbits; struct ecx *ecx; struct ved_foo foo; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); INIT_OBJ(&foo, VED_FOO_MAGIC); foo.req = req; foo.preq = ecx->preq; memset(foo.tailbuf, 0xdd, sizeof foo.tailbuf); /* OA_GZIPBITS is not valid until BOS_FINISHED */ if (boc != NULL) ObjWaitState(req->objcore, BOS_FINISHED); AN(ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED)); /* * This is the interesting case: Deliver all the deflate * blocks, stripping the "LAST" bit of the last one and * padding it, as necessary, to a byte boundary. */ p = ObjGetAttr(req->wrk, req->objcore, OA_GZIPBITS, &l); AN(p); assert(l == 32); foo.start = vbe64dec(p); foo.last = vbe64dec(p + 8); foo.stop = vbe64dec(p + 16); foo.olen = ObjGetLen(req->wrk, req->objcore); assert(foo.start > 0 && foo.start < foo.olen * 8); assert(foo.last > 0 && foo.last < foo.olen * 8); assert(foo.stop > 0 && foo.stop < foo.olen * 8); assert(foo.last >= foo.start); assert(foo.last < foo.stop); /* The start bit must be byte aligned. */ AZ(foo.start & 7); dbits = WS_Alloc(req->ws, 8); AN(dbits); foo.dbits = dbits; (void)ObjIterate(req->wrk, req->objcore, &foo, ved_objiterate); /* XXX: error check ?? */ (void)ved_bytes(req, foo.preq, VDP_FLUSH, NULL, 0); icrc = vle32dec(foo.tailbuf); ilen = vle32dec(foo.tailbuf + 4); ecx->crc = crc32_combine(ecx->crc, icrc, ilen); ecx->l_crc += ilen; }
int VRB_Iterate(struct req *req, objiterate_f *func, void *priv) { char buf[8192]; ssize_t l; int i; struct vfp_ctx *vfc; enum vfp_status vfps = VFP_ERROR; int ret = 0; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); AN(func); switch(req->req_body_status) { case REQ_BODY_CACHED: if (ObjIterate(req->wrk, req->body_oc, priv, func)) return (-1); return (0); case REQ_BODY_NONE: return (0); case REQ_BODY_WITH_LEN: case REQ_BODY_WITHOUT_LEN: break; case REQ_BODY_TAKEN: VSLb(req->vsl, SLT_VCL_Error, "Uncached req.body can only be consumed once."); return (-1); case REQ_BODY_FAIL: VSLb(req->vsl, SLT_FetchError, "Had failed reading req.body before."); return (-1); default: WRONG("Wrong req_body_status in VRB_IterateReqBody()"); } Lck_Lock(&req->sp->mtx); if (req->req_body_status == REQ_BODY_WITH_LEN || req->req_body_status == REQ_BODY_WITHOUT_LEN) { req->req_body_status = REQ_BODY_TAKEN; i = 0; } else i = -1; Lck_Unlock(&req->sp->mtx); if (i) { VSLb(req->vsl, SLT_VCL_Error, "Multiple attempts to access non-cached req.body"); return (i); } CHECK_OBJ_NOTNULL(req->htc, HTTP_CONN_MAGIC); vfc = req->htc->vfc; VFP_Setup(vfc); vfc->http = req->http; vfc->wrk = req->wrk; V1F_Setup_Fetch(vfc, req->htc); if (VFP_Open(vfc) < 0) { VSLb(req->vsl, SLT_FetchError, "Could not open Fetch Pipeline"); return (-1); } do { l = sizeof buf; vfps = VFP_Suck(vfc, buf, &l); if (vfps == VFP_ERROR) { req->req_body_status = REQ_BODY_FAIL; ret = -1; break; } else if (l > 0) { req->req_bodybytes += l; req->acct.req_bodybytes += l; l = func(priv, 1, buf, l); if (l) { req->req_body_status = REQ_BODY_FAIL; ret = -1; break; } } } while (vfps == VFP_OK); VFP_Close(vfc); VSLb_ts_req(req, "ReqBody", VTIM_real()); return (ret); }