/* Return the size of consumed data or -1 */ int deflate_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out) { int ret; z_stream *strm = &comp_ctx->strm; char *out_data = bi_end(out); int out_len = out->size - buffer_len(out); if (in_len <= 0) return 0; if (out_len <= 0) return -1; strm->next_in = (unsigned char *)in_data; strm->avail_in = in_len; strm->next_out = (unsigned char *)out_data; strm->avail_out = out_len; ret = deflate(strm, Z_NO_FLUSH); if (ret != Z_OK) return -1; /* deflate update the available data out */ out->i += out_len - strm->avail_out; return in_len - strm->avail_in; }
int deflate_flush(struct comp_ctx *comp_ctx, struct buffer *out, int flag) { int ret; int out_len = 0; z_stream *strm = &comp_ctx->strm; strm->next_out = (unsigned char *)bi_end(out); strm->avail_out = out->size - buffer_len(out); ret = deflate(strm, flag); if (ret != Z_OK && ret != Z_STREAM_END) return -1; out_len = (out->size - buffer_len(out)) - strm->avail_out; out->i += out_len; /* compression limit */ if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */ (idle_pct < compress_min_idle)) { /* idle */ /* decrease level */ if (comp_ctx->cur_lvl > 0) { comp_ctx->cur_lvl--; deflateParams(&comp_ctx->strm, comp_ctx->cur_lvl, Z_DEFAULT_STRATEGY); } } else if (comp_ctx->cur_lvl < global.tune.comp_maxlevel) { /* increase level */ comp_ctx->cur_lvl++ ; deflateParams(&comp_ctx->strm, comp_ctx->cur_lvl, Z_DEFAULT_STRATEGY); } return out_len; }
/* Compresses the data accumulated using add_data(), and optionally sends the * format-specific trailer if <finish> is non-null. <out> is expected to have a * large enough free non-wrapping space as verified by http_comp_buffer_init(). * The number of bytes emitted is reported. */ static int rfc195x_flush_or_finish(struct comp_ctx *comp_ctx, struct buffer *out, int finish) { struct slz_stream *strm = &comp_ctx->strm; const char *in_ptr; int in_len; int out_len; in_ptr = comp_ctx->direct_ptr; in_len = comp_ctx->direct_len; if (comp_ctx->queued) { in_ptr = comp_ctx->queued->p; in_len = comp_ctx->queued->i; } out_len = out->i; if (in_ptr) out->i += slz_encode(strm, bi_end(out), in_ptr, in_len, !finish); if (finish) out->i += slz_finish(strm, bi_end(out)); out_len = out->i - out_len; /* very important, we must wipe the data we've just flushed */ comp_ctx->direct_len = 0; comp_ctx->direct_ptr = NULL; comp_ctx->queued = NULL; /* Verify compression rate limiting and CPU usage */ if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */ (idle_pct < compress_min_idle)) { /* idle */ if (comp_ctx->cur_lvl > 0) strm->level = --comp_ctx->cur_lvl; } else if (comp_ctx->cur_lvl < global.tune.comp_maxlevel && comp_ctx->cur_lvl < 1) { strm->level = ++comp_ctx->cur_lvl; } /* and that's all */ return out_len; }
/* * Process data * Return size of consumed data or -1 on error */ int identity_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out) { char *out_data = bi_end(out); int out_len = out->size - buffer_len(out); if (out_len < in_len) return -1; memcpy(out_data, in_data, in_len); out->i += in_len; return in_len; }
/* Return the size of consumed data or -1. The output buffer is unused at this * point, we only keep a reference to the input data or a copy of them if the * reference is already used. */ static int rfc195x_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out) { static THREAD_LOCAL struct buffer *tmpbuf = &buf_empty; if (in_len <= 0) return 0; if (comp_ctx->direct_ptr && !comp_ctx->queued) { /* data already being pointed to, we're in front of fragmented * data and need a buffer now. We reuse the same buffer, as it's * not used out of the scope of a series of add_data()*, end(). */ if (unlikely(!tmpbuf->size)) { /* this is the first time we need the compression buffer */ if (b_alloc(&tmpbuf) == NULL) return -1; /* no memory */ } b_reset(tmpbuf); memcpy(bi_end(tmpbuf), comp_ctx->direct_ptr, comp_ctx->direct_len); tmpbuf->i += comp_ctx->direct_len; comp_ctx->direct_ptr = NULL; comp_ctx->direct_len = 0; comp_ctx->queued = tmpbuf; /* fall through buffer copy */ } if (comp_ctx->queued) { /* data already pending */ memcpy(bi_end(comp_ctx->queued), in_data, in_len); comp_ctx->queued->i += in_len; return in_len; } comp_ctx->direct_ptr = in_data; comp_ctx->direct_len = in_len; return in_len; }
/* Tries to copy block <blk> at once into the channel's buffer after length * controls. The chn->o and to_forward pointers are updated. If the channel * input is closed, -2 is returned. If the block is too large for this buffer, * -3 is returned. If there is not enough room left in the buffer, -1 is * returned. Otherwise the number of bytes copied is returned (0 being a valid * number). Channel flag READ_PARTIAL is updated if some data can be * transferred. Channel flag CF_WAKE_WRITE is set if the write fails because * the buffer is full. */ int bi_putblk(struct channel *chn, const char *blk, int len) { int max; if (unlikely(channel_input_closed(chn))) return -2; max = buffer_max_len(chn); if (unlikely(len > max - buffer_len(chn->buf))) { /* we can't write this chunk right now because the buffer is * almost full or because the block is too large. Return the * available space or -2 if impossible. */ if (len > max) return -3; chn->flags |= CF_WAKE_WRITE; return -1; } if (unlikely(len == 0)) return 0; /* OK so the data fits in the buffer in one or two blocks */ max = buffer_contig_space(chn->buf); memcpy(bi_end(chn->buf), blk, MIN(len, max)); if (len > max) memcpy(chn->buf->data, blk + max, len - max); chn->buf->i += len; chn->total += len; if (chn->to_forward) { unsigned long fwd = len; if (chn->to_forward != CHN_INFINITE_FORWARD) { if (fwd > chn->to_forward) fwd = chn->to_forward; chn->to_forward -= fwd; } b_adv(chn->buf, fwd); } /* notify that some data was read from the SI into the buffer */ chn->flags |= CF_READ_PARTIAL; return len; }
/* Tries to copy character <c> into the channel's buffer after some length * controls. The chn->o and to_forward pointers are updated. If the channel * input is closed, -2 is returned. If there is not enough room left in the * buffer, -1 is returned. Otherwise the number of bytes copied is returned * (1). Channel flag READ_PARTIAL is updated if some data can be transferred. * Channel flag CF_WAKE_WRITE is set if the write fails because the buffer is * full. */ int bi_putchr(struct channel *chn, char c) { if (unlikely(channel_input_closed(chn))) return -2; if (channel_full(chn)) { chn->flags |= CF_WAKE_WRITE; return -1; } *bi_end(chn->buf) = c; chn->buf->i++; chn->flags |= CF_READ_PARTIAL; if (chn->to_forward >= 1) { if (chn->to_forward != CHN_INFINITE_FORWARD) chn->to_forward--; b_adv(chn->buf, 1); } chn->total++; return 1; }
/* Receive up to <count> bytes from connection <conn>'s socket and store them * into buffer <buf>. Only one call to recv() is performed, unless the * buffer wraps, in which case a second call may be performed. The connection's * flags are updated with whatever special event is detected (error, read0, * empty). The caller is responsible for taking care of those events and * avoiding the call if inappropriate. The function does not call the * connection's polling update function, so the caller is responsible for this. * errno is cleared before starting so that the caller knows that if it spots an * error without errno, it's pending and can be retrieved via getsockopt(SO_ERROR). */ static int raw_sock_to_buf(struct connection *conn, struct buffer *buf, int count) { int ret, done = 0; int try; if (!conn_ctrl_ready(conn)) return 0; if (!fd_recv_ready(conn->t.sock.fd)) return 0; errno = 0; if (unlikely(!(fdtab[conn->t.sock.fd].ev & FD_POLL_IN))) { /* stop here if we reached the end of data */ if ((fdtab[conn->t.sock.fd].ev & (FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_HUP) goto read0; /* report error on POLL_ERR before connection establishment */ if ((fdtab[conn->t.sock.fd].ev & FD_POLL_ERR) && (conn->flags & CO_FL_WAIT_L4_CONN)) { conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; return done; } } /* let's realign the buffer to optimize I/O */ if (buffer_empty(buf)) buf->p = buf->data; /* read the largest possible block. For this, we perform only one call * to recv() unless the buffer wraps and we exactly fill the first hunk, * in which case we accept to do it once again. A new attempt is made on * EINTR too. */ while (count > 0) { /* first check if we have some room after p+i */ try = buf->data + buf->size - (buf->p + buf->i); /* otherwise continue between data and p-o */ if (try <= 0) { try = buf->p - (buf->data + buf->o); if (try <= 0) break; } if (try > count) try = count; ret = recv(conn->t.sock.fd, bi_end(buf), try, 0); if (ret > 0) { buf->i += ret; done += ret; if (ret < try) { /* unfortunately, on level-triggered events, POLL_HUP * is generally delivered AFTER the system buffer is * empty, so this one might never match. */ if (fdtab[conn->t.sock.fd].ev & FD_POLL_HUP) goto read0; fd_done_recv(conn->t.sock.fd); break; } count -= ret; } else if (ret == 0) { goto read0; } else if (errno == EAGAIN) { fd_cant_recv(conn->t.sock.fd); break; } else if (errno != EINTR) { conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH; break; } }
/* * Flush data in process, and write the header and footer of the chunk. Upon * success, in and out buffers are swapped to avoid a copy. */ int http_compression_buffer_end(struct session *s, struct buffer **in, struct buffer **out, int end) { int to_forward, forwarded; int left; struct http_msg *msg = &s->txn.rsp; struct buffer *ib = *in, *ob = *out; #ifdef USE_ZLIB int ret; /* flush data here */ if (end) ret = s->comp_algo->flush(s->comp_ctx, ob, Z_FINISH); /* end of data */ else ret = s->comp_algo->flush(s->comp_ctx, ob, Z_SYNC_FLUSH); /* end of buffer */ if (ret < 0) return -1; /* flush failed */ #endif /* USE_ZLIB */ if (ob->i > 8) { /* more than a chunk size => some data were emitted */ char *tail = ob->p + ob->i; /* write real size at the begining of the chunk, no need of wrapping */ http_emit_chunk_size(ob->p, ob->i - 8, 0); /* chunked encoding requires CRLF after data */ *tail++ = '\r'; *tail++ = '\n'; if (!(msg->flags & HTTP_MSGF_TE_CHNK) && msg->chunk_len == 0) { /* End of data, 0<CRLF><CRLF> is needed but we're not * in chunked mode on input so we must add it ourselves. */ memcpy(tail, "0\r\n\r\n", 5); tail += 5; } ob->i = tail - ob->p; } else { /* no data were sent, cancel the chunk size */ ob->i = 0; } to_forward = ob->i; /* update input rate */ forwarded = ib->o - ob->o; if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_in, forwarded); s->fe->fe_counters.comp_in += forwarded; s->be->be_counters.comp_in += forwarded; } else { s->fe->fe_counters.comp_byp += forwarded; s->be->be_counters.comp_byp += forwarded; } /* copy the remaining data in the tmp buffer. */ if (ib->i > 0) { left = ib->i - bi_contig_data(ib); memcpy(bi_end(ob), bi_ptr(ib), bi_contig_data(ib)); ob->i += bi_contig_data(ib); if (left > 0) { memcpy(bi_end(ob), ib->data, left); ob->i += left; } } /* swap the buffers */ *in = ob; *out = ib; if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_out, to_forward); s->fe->fe_counters.comp_out += to_forward; s->be->be_counters.comp_out += to_forward; } /* forward the new chunk without remaining data */ b_adv(ob, to_forward); return to_forward; }
/* * Flush data in process, and write the header and footer of the chunk. Upon * success, in and out buffers are swapped to avoid a copy. */ int http_compression_buffer_end(struct session *s, struct buffer **in, struct buffer **out, int end) { int to_forward; int left; struct http_msg *msg = &s->txn.rsp; struct buffer *ib = *in, *ob = *out; #ifdef USE_ZLIB int ret; /* flush data here */ if (end) ret = s->comp_algo->flush(s->comp_ctx, ob, Z_FINISH); /* end of data */ else ret = s->comp_algo->flush(s->comp_ctx, ob, Z_SYNC_FLUSH); /* end of buffer */ if (ret < 0) return -1; /* flush failed */ #endif /* USE_ZLIB */ if (ob->i > 8) { /* more than a chunk size => some data were emitted */ char *tail = ob->p + ob->i; /* write real size at the begining of the chunk, no need of wrapping */ http_emit_chunk_size(ob->p, ob->i - 8, 0); /* chunked encoding requires CRLF after data */ *tail++ = '\r'; *tail++ = '\n'; /* At the end of data, we must write the empty chunk 0<CRLF>, * and terminate the trailers section with a last <CRLF>. If * we're forwarding a chunked-encoded response, we'll have a * trailers section after the empty chunk which needs to be * forwarded and which will provide the last CRLF. Otherwise * we write it ourselves. */ if (msg->msg_state >= HTTP_MSG_TRAILERS) { memcpy(tail, "0\r\n", 3); tail += 3; if (msg->msg_state >= HTTP_MSG_DONE) { memcpy(tail, "\r\n", 2); tail += 2; } } ob->i = tail - ob->p; } else { /* no data were sent, cancel the chunk size */ ob->i = 0; } to_forward = ob->i; /* update input rate */ if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_in, msg->next); s->fe->fe_counters.comp_in += msg->next; s->be->be_counters.comp_in += msg->next; } else { s->fe->fe_counters.comp_byp += msg->next; s->be->be_counters.comp_byp += msg->next; } /* copy the remaining data in the tmp buffer. */ b_adv(ib, msg->next); msg->next = 0; if (ib->i > 0) { left = ib->i - bi_contig_data(ib); memcpy(bi_end(ob), bi_ptr(ib), bi_contig_data(ib)); ob->i += bi_contig_data(ib); if (left > 0) { memcpy(bi_end(ob), ib->data, left); ob->i += left; } } /* swap the buffers */ *in = ob; *out = ib; if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_out, to_forward); s->fe->fe_counters.comp_out += to_forward; s->be->be_counters.comp_out += to_forward; } /* forward the new chunk without remaining data */ b_adv(ob, to_forward); return to_forward; }