unsigned WRW_Write(const struct worker *wrk, const void *ptr, int len) { struct wrw *wrw; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); wrw = wrk->wrw; CHECK_OBJ_NOTNULL(wrw, WRW_MAGIC); AN(wrw->wfd); if (len == 0 || *wrw->wfd < 0) return (0); if (len == -1) len = strlen(ptr); if (wrw->niov >= wrw->siov - (wrw->ciov < wrw->siov ? 1 : 0)) (void)WRW_Flush(wrk); wrw->iov[wrw->niov].iov_base = TRUST_ME(ptr); wrw->iov[wrw->niov].iov_len = len; wrw->liov += len; wrw->niov++; if (wrw->ciov < wrw->siov) { assert(wrw->niov < wrw->siov); wrw->cliov += len; } return (len); }
int FetchReqBody(const struct sess *sp) { unsigned long content_length; char buf[8192]; char *ptr, *endp; int rdcnt; if (http_GetHdr(sp->req->http, H_Content_Length, &ptr)) { content_length = strtoul(ptr, &endp, 10); /* XXX should check result of conversion */ while (content_length) { if (content_length > sizeof buf) rdcnt = sizeof buf; else rdcnt = content_length; rdcnt = HTC_Read(sp->wrk, sp->req->htc, buf, rdcnt); if (rdcnt <= 0) return (1); content_length -= rdcnt; if (!sp->req->sendbody) continue; (void)WRW_Write(sp->wrk, buf, rdcnt); /* XXX: stats ? */ if (WRW_Flush(sp->wrk)) return (2); } } if (http_GetHdr(sp->req->http, H_Transfer_Encoding, NULL)) { /* XXX: Handle chunked encoding. */ WSP(sp, SLT_Debug, "Transfer-Encoding in request"); return (1); } return (0); }
unsigned WRW_FlushRelease(struct worker *w) { unsigned u; CHECK_OBJ_NOTNULL(w, WORKER_MAGIC); AN(w->wrw.wfd); u = WRW_Flush(w); WRW_Release(w); return (u); }
unsigned WRW_FlushRelease(struct worker *wrk, uint64_t *pacc) { unsigned u; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AN(wrk->wrw->wfd); u = WRW_Flush(wrk); wrw_release(wrk, pacc); return (u); }
void VGZ_WrwFlush(const struct worker *wrk, struct vgz *vg) { CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(vg, VGZ_MAGIC); if (vg->m_len == 0) return; (void)WRW_Write(wrk, vg->m_buf, vg->m_len); (void)WRW_Flush(wrk); vg->m_len = 0; }
vbf_iter_req_body(struct req *req, void *priv, void *ptr, size_t l) { struct worker *wrk; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CAST_OBJ_NOTNULL(wrk, priv, WORKER_MAGIC); if (l > 0) { (void)WRW_Write(wrk, ptr, l); if (WRW_Flush(wrk)) return (-1); } return (0); }
void WRW_EndChunk(struct worker *w) { struct wrw *wrw; CHECK_OBJ_NOTNULL(w, WORKER_MAGIC); wrw = &w->wrw; assert(wrw->ciov < wrw->siov); (void)WRW_Flush(w); wrw->ciov = wrw->siov; wrw->niov = 0; wrw->cliov = 0; (void)WRW_Write(w, "0\r\n\r\n", -1); }
v1d_bytes(struct req *req, enum vdp_action act, const void *ptr, ssize_t len) { ssize_t wl = 0; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); assert(req->vdp_nxt == -1); /* always at the bottom of the pile */ if (len > 0) wl = WRW_Write(req->wrk, ptr, len); if (wl > 0) req->acct_req.bodybytes += wl; if (act > VDP_NULL && WRW_Flush(req->wrk)) return (-1); if (len != wl) return (-1); return (0); }
void WRW_Chunked(struct worker *w) { struct wrw *wrw; CHECK_OBJ_NOTNULL(w, WORKER_MAGIC); wrw = &w->wrw; assert(wrw->ciov == wrw->siov); /* * If there are not space for chunked header, a chunk of data and * a chunk tail, we might as well flush right away. */ if (wrw->niov + 3 >= wrw->siov) (void)WRW_Flush(w); wrw->ciov = wrw->niov++; wrw->cliov = 0; assert(wrw->ciov < wrw->siov); }
unsigned WRW_Write(struct worker *w, const void *ptr, int len) { struct wrw *wrw; CHECK_OBJ_NOTNULL(w, WORKER_MAGIC); wrw = &w->wrw; AN(wrw->wfd); if (len == 0 || *wrw->wfd < 0) return (0); if (len == -1) len = strlen(ptr); if (wrw->niov == wrw->siov + (wrw->ciov < wrw->siov ? 1 : 0)) (void)WRW_Flush(w); wrw->iov[wrw->niov].iov_base = TRUST_ME(ptr); wrw->iov[wrw->niov].iov_len = len; wrw->liov += len; if (wrw->ciov < wrw->siov) wrw->cliov += len; wrw->niov++; return (len); }
int VGZ_WrwGunzip(struct worker *wrk, struct vgz *vg, const void *ibuf, ssize_t ibufl) { int i; size_t dl; const void *dp; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(vg, VGZ_MAGIC); AN(vg->m_buf); VGZ_Ibuf(vg, ibuf, ibufl); if (ibufl == 0) return (VGZ_OK); do { if (vg->m_len == vg->m_sz) i = VGZ_STUCK; else { i = VGZ_Gunzip(vg, &dp, &dl); vg->m_len += dl; } if (i < VGZ_OK) { /* XXX: VSL ? */ return (-1); } if (vg->m_len == vg->m_sz || i == VGZ_STUCK) { wrk->acct_tmp.bodybytes += vg->m_len; (void)WRW_Write(wrk, vg->m_buf, vg->m_len); (void)WRW_Flush(wrk); vg->m_len = 0; VGZ_Obuf(vg, vg->m_buf, vg->m_sz); } } while (!VGZ_IbufEmpty(vg)); if (i == VGZ_STUCK) i = VGZ_OK; return (i); }
void WRW_Sendfile(struct worker *wrk, int fd, off_t off, unsigned len) { struct wrw *wrw; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); wrw = &wrk->wrw; AN(wrw->wfd); assert(fd >= 0); assert(len > 0); #if defined(__FreeBSD__) || defined(__DragonFly__) do { struct sf_hdtr sfh; memset(&sfh, 0, sizeof sfh); if (wrw->niov > 0) { sfh.headers = wrw->iov; sfh.hdr_cnt = wrw->niov; } if (sendfile(fd, *wrw->wfd, off, len, &sfh, NULL, 0) != 0) wrw->werr++; wrw->liov = 0; wrw->niov = 0; } while (0); #elif defined(__linux__) do { if (WRW_Flush(wrk) == 0 && sendfile(*wrw->wfd, fd, &off, len) != len) wrw->werr++; } while (0); #elif defined(__sun) && defined(HAVE_SENDFILEV) do { sendfilevec_t svvec[cache_param->http_headers * 2 + 1]; size_t xferred = 0, expected = 0; int i; for (i = 0; i < wrw->niov; i++) { svvec[i].sfv_fd = SFV_FD_SELF; svvec[i].sfv_flag = 0; svvec[i].sfv_off = (off_t) wrw->iov[i].iov_base; svvec[i].sfv_len = wrw->iov[i].iov_len; expected += svvec[i].sfv_len; } svvec[i].sfv_fd = fd; svvec[i].sfv_flag = 0; svvec[i].sfv_off = off; svvec[i].sfv_len = len; expected += svvec[i].sfv_len; if (sendfilev(*wrw->wfd, svvec, i, &xferred) == -1 || xferred != expected) wrw->werr++; wrw->liov = 0; wrw->niov = 0; } while (0); #elif defined(__sun) && defined(HAVE_SENDFILE) do { if (WRW_Flush(wrk) == 0 && sendfile(*wrw->wfd, fd, &off, len) != len) wrw->werr++; } while (0); #else #error Unknown sendfile() implementation #endif }