size_t V1L_Write(const struct worker *wrk, const void *ptr, ssize_t len) { struct v1l *v1l; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); v1l = wrk->v1l; CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); AN(v1l->wfd); if (len == 0 || *v1l->wfd < 0) return (0); if (len == -1) len = strlen(ptr); if (v1l->niov >= v1l->siov - (v1l->ciov < v1l->siov ? 1 : 0)) (void)V1L_Flush(wrk); v1l->iov[v1l->niov].iov_base = TRUST_ME(ptr); v1l->iov[v1l->niov].iov_len = len; v1l->liov += len; v1l->niov++; if (v1l->ciov < v1l->siov) { assert(v1l->niov < v1l->siov); v1l->cliov += len; } return (len); }
V1D_Deliver(struct req *req, struct boc *boc, int sendbody) { int err = 0; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_ORNULL(boc, BOC_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); if (sendbody) { if (http_GetHdr(req->resp, H_Content_Length, NULL)) req->res_mode |= RES_LEN; else if (req->http->protover == 11) { req->res_mode |= RES_CHUNKED; http_SetHeader(req->resp, "Transfer-Encoding: chunked"); } else { req->res_mode |= RES_EOF; req->doclose = SC_TX_EOF; } } VSLb(req->vsl, SLT_Debug, "RES_MODE %x", req->res_mode); if (req->doclose) { if (!http_HdrIs(req->resp, H_Connection, "close")) { http_Unset(req->resp, H_Connection); http_SetHeader(req->resp, "Connection: close"); } } else if (!http_GetHdr(req->resp, H_Connection, NULL)) http_SetHeader(req->resp, "Connection: keep-alive"); if (sendbody && req->resp_len != 0) VDP_push(req, v1d_bytes, NULL, 1); AZ(req->wrk->v1l); V1L_Reserve(req->wrk, req->ws, &req->sp->fd, req->vsl, req->t_prev); if (WS_Overflowed(req->ws)) { v1d_error(req, "workspace_client overflow"); AZ(req->wrk->v1l); return; } req->acct.resp_hdrbytes += HTTP1_Write(req->wrk, req->resp, HTTP1_Resp); if (DO_DEBUG(DBG_FLUSH_HEAD)) (void)V1L_Flush(req->wrk); if (sendbody && req->resp_len != 0) { if (req->res_mode & RES_CHUNKED) V1L_Chunked(req->wrk); err = VDP_DeliverObj(req); if (!err && (req->res_mode & RES_CHUNKED)) V1L_EndChunk(req->wrk); } if ((V1L_FlushRelease(req->wrk) || err) && req->sp->fd >= 0) SES_Close(req->sp, SC_REM_CLOSE); AZ(req->wrk->v1l); VDP_close(req); }
vbf_iter_req_body(void *priv, unsigned flush, const void *ptr, ssize_t l) { struct busyobj *bo; CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); if (l > 0) { (void)V1L_Write(bo->wrk, ptr, l); if (flush && V1L_Flush(bo->wrk)) return (-1); } return (0); }
vbf_iter_req_body(void *priv, int flush, const void *ptr, ssize_t l) { struct busyobj *bo; CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); if (l > 0) { bo->acct.bereq_bodybytes += V1L_Write(bo->wrk, ptr, l); if (flush && V1L_Flush(bo->wrk)) return (-1); } return (0); }
vbf_iter_req_body(struct req *req, void *priv, void *ptr, size_t l) { struct busyobj *bo; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); if (l > 0) { bo->acct.bereq_bodybytes += V1L_Write(bo->wrk, ptr, l); if (V1L_Flush(bo->wrk)) return (-1); } return (0); }
unsigned V1L_FlushRelease(struct worker *wrk) { struct v1l *v1l; unsigned u; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); u = V1L_Flush(wrk); v1l = wrk->v1l; wrk->v1l = NULL; CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); WS_Release(v1l->ws, 0); WS_Reset(v1l->ws, v1l->res); return (u); }
void V1L_EndChunk(const struct worker *wrk) { struct v1l *v1l; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); v1l = wrk->v1l; CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); assert(v1l->ciov < v1l->siov); (void)V1L_Flush(wrk); v1l->ciov = v1l->siov; v1l->niov = 0; v1l->cliov = 0; (void)V1L_Write(wrk, "0\r\n\r\n", -1); }
v1d_bytes(struct req *req, enum vdp_action act, void **priv, const void *ptr, ssize_t len) { ssize_t wl = 0; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); (void)priv; if (act == VDP_INIT || act == VDP_FINI) return (0); AZ(req->vdp_nxt); /* always at the bottom of the pile */ if (len > 0) wl = V1L_Write(req->wrk, ptr, len); req->acct.resp_bodybytes += len; if (act > VDP_NULL && V1L_Flush(req->wrk)) return (-1); if (len != wl) return (-1); return (0); }
void V1L_Chunked(const struct worker *wrk) { struct v1l *v1l; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); v1l = wrk->v1l; CHECK_OBJ_NOTNULL(v1l, V1L_MAGIC); assert(v1l->ciov == v1l->siov); /* * If there are not space for chunked header, a chunk of data and * a chunk tail, we might as well flush right away. */ if (v1l->niov + 3 >= v1l->siov) (void)V1L_Flush(wrk); v1l->ciov = v1l->niov++; v1l->cliov = 0; assert(v1l->ciov < v1l->siov); assert(v1l->niov < v1l->siov); }
void V1D_Deliver(struct req *req, struct busyobj *bo) { const char *r; enum objiter_status ois; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); req->res_mode = 0; /* * Determine ESI status first. Not dependent on wantbody, because * we want ESI to supress C-L in HEAD too. */ if (!req->disable_esi && ObjGetattr(req->wrk, req->objcore, OA_ESIDATA, NULL) != NULL) req->res_mode |= RES_ESI; /* * ESI-childen don't care about headers -> early escape */ if (req->esi_level > 0) { ESI_DeliverChild(req, bo); return; } if (req->res_mode & RES_ESI) { RFC2616_Weaken_Etag(req->resp); } else if (http_IsStatus(req->resp, 304)) { http_Unset(req->resp, H_Content_Length); req->wantbody = 0; } else if (bo == NULL && !http_GetHdr(req->resp, H_Content_Length, NULL)) { http_PrintfHeader(req->resp, "Content-Length: %ju", (uintmax_t)ObjGetLen( req->wrk, req->objcore)); } if (cache_param->http_gzip_support && ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED) && !RFC2616_Req_Gzip(req->http)) { /* * We don't know what it uncompresses to * XXX: we could cache that, but would still deliver * XXX: with multiple writes because of the gunzip buffer */ req->res_mode |= RES_GUNZIP; VDP_push(req, VDP_gunzip, NULL, 0); } if (req->res_mode & RES_ESI) { /* Gunzip could have added back a C-L */ http_Unset(req->resp, H_Content_Length); } /* * Range comes after the others and pushes on bottom because it * can generate a correct C-L header. */ if (cache_param->http_range_support && http_IsStatus(req->resp, 200)) { http_SetHeader(req->resp, "Accept-Ranges: bytes"); if (req->wantbody && http_GetHdr(req->http, H_Range, &r)) VRG_dorange(req, bo, r); } if (http_GetHdr(req->resp, H_Content_Length, NULL)) req->res_mode |= RES_LEN; if (req->wantbody && !(req->res_mode & RES_LEN)) { if (req->http->protover >= 11) { req->res_mode |= RES_CHUNKED; http_SetHeader(req->resp, "Transfer-Encoding: chunked"); } else { req->res_mode |= RES_EOF; req->doclose = SC_TX_EOF; } } VSLb(req->vsl, SLT_Debug, "RES_MODE %x", req->res_mode); if (req->doclose) { if (!http_HdrIs(req->resp, H_Connection, "close")) { http_Unset(req->resp, H_Connection); http_SetHeader(req->resp, "Connection: close"); } } else if (!http_GetHdr(req->resp, H_Connection, NULL)) http_SetHeader(req->resp, "Connection: keep-alive"); VDP_push(req, v1d_bytes, NULL, 1); V1L_Reserve(req->wrk, req->ws, &req->sp->fd, req->vsl, req->t_prev); req->acct.resp_hdrbytes += HTTP1_Write(req->wrk, req->resp, HTTP1_Resp); if (DO_DEBUG(DBG_FLUSH_HEAD)) (void)V1L_Flush(req->wrk); ois = OIS_DONE; if (req->wantbody) { if (req->res_mode & RES_CHUNKED) V1L_Chunked(req->wrk); ois = VDP_DeliverObj(req); (void)VDP_bytes(req, VDP_FLUSH, NULL, 0); if (ois == OIS_DONE && (req->res_mode & RES_CHUNKED)) V1L_EndChunk(req->wrk); } if ((V1L_FlushRelease(req->wrk) || ois != OIS_DONE) && req->sp->fd >= 0) SES_Close(req->sp, SC_REM_CLOSE); VDP_close(req); }