Exemplo n.º 1
0
/* Schedule up to <bytes> more bytes to be forwarded via the channel without
 * notifying the owner task. Any data pending in the buffer are scheduled to be
 * sent as well, in the limit of the number of bytes to forward. This must be
 * the only method to use to schedule bytes to be forwarded. If the requested
 * number is too large, it is automatically adjusted. The number of bytes taken
 * into account is returned. Directly touching ->to_forward will cause lockups
 * when buf->o goes down to zero if nobody is ready to push the remaining data.
 */
unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes)
{
	unsigned int new_forward;
	unsigned int forwarded;

	forwarded = chn->buf->i;
	b_adv(chn->buf, chn->buf->i);

	/* Note: the case below is the only case where we may return
	 * a byte count that does not fit into a 32-bit number.
	 */
	if (likely(chn->to_forward == CHN_INFINITE_FORWARD))
		return bytes;

	if (likely(bytes == CHN_INFINITE_FORWARD)) {
		chn->to_forward = bytes;
		return bytes;
	}

	new_forward = chn->to_forward + bytes - forwarded;
	bytes = forwarded; /* at least those bytes were scheduled */

	if (new_forward <= chn->to_forward) {
		/* integer overflow detected, let's assume no more than 2G at once */
		new_forward = MID_RANGE(new_forward);
	}

	if (new_forward > chn->to_forward) {
		bytes += new_forward - chn->to_forward;
		chn->to_forward = new_forward;
	}
	return bytes;
}
Exemplo n.º 2
0
/*
 * Add data to compress
 */
int http_compression_buffer_add_data(struct session *s, struct buffer *in, struct buffer *out)
{
	struct http_msg *msg = &s->txn.rsp;
	int consumed_data = 0;
	int data_process_len;
	int left;
	int ret;

	/*
	 * Skip data, we don't need them in the new buffer. They are results
	 * of CHUNK_CRLF and CHUNK_SIZE parsing.
	 */
	b_adv(in, msg->next);
	msg->next = 0;
	msg->sov = 0;
	msg->sol = 0;

	/*
	 * select the smallest size between the announced chunk size, the input
	 * data, and the available output buffer size
	 */
	data_process_len = MIN(in->i, msg->chunk_len);
	data_process_len = MIN(out->size - buffer_len(out), data_process_len);

	left = data_process_len - bi_contig_data(in);
	if (left <= 0) {
		consumed_data += ret = s->comp_algo->add_data(s->comp_ctx, bi_ptr(in), data_process_len, out);
		if (ret < 0)
			return -1;

	} else {
		consumed_data += ret = s->comp_algo->add_data(s->comp_ctx, bi_ptr(in), bi_contig_data(in), out);
		if (ret < 0)
			return -1;
		consumed_data += ret = s->comp_algo->add_data(s->comp_ctx, in->data, left, out);
		if (ret < 0)
			return -1;
	}

	b_adv(in, data_process_len);
	msg->chunk_len -= data_process_len;

	return consumed_data;
}
Exemplo n.º 3
0
/* If an explicit source binding is specified on the server and/or backend, and
 * this source makes use of the transparent proxy, then it is extracted now and
 * assigned to the session's pending connection. This function assumes that an
 * outgoing connection has already been assigned to s->si[1].end.
 */
static void assign_tproxy_address(struct session *s)
{
#if defined(CONFIG_HAP_CTTPROXY) || defined(CONFIG_HAP_TRANSPARENT)
	struct server *srv = objt_server(s->target);
	struct conn_src *src;
	struct connection *cli_conn;
	struct connection *srv_conn = objt_conn(s->si[1].end);

	if (srv && srv->conn_src.opts & CO_SRC_BIND)
		src = &srv->conn_src;
	else if (s->be->conn_src.opts & CO_SRC_BIND)
		src = &s->be->conn_src;
	else
		return;

	switch (src->opts & CO_SRC_TPROXY_MASK) {
	case CO_SRC_TPROXY_ADDR:
		srv_conn->addr.from = src->tproxy_addr;
		break;
	case CO_SRC_TPROXY_CLI:
	case CO_SRC_TPROXY_CIP:
		/* FIXME: what can we do if the client connects in IPv6 or unix socket ? */
		cli_conn = objt_conn(s->si[0].end);
		if (cli_conn)
			srv_conn->addr.from = cli_conn->addr.from;
		else
			memset(&srv_conn->addr.from, 0, sizeof(srv_conn->addr.from));
		break;
	case CO_SRC_TPROXY_DYN:
		if (src->bind_hdr_occ) {
			char *vptr;
			int vlen;
			int rewind;

			/* bind to the IP in a header */
			((struct sockaddr_in *)&srv_conn->addr.from)->sin_family = AF_INET;
			((struct sockaddr_in *)&srv_conn->addr.from)->sin_port = 0;
			((struct sockaddr_in *)&srv_conn->addr.from)->sin_addr.s_addr = 0;

			b_rew(s->req.buf, rewind = http_hdr_rewind(&s->txn.req));
			if (http_get_hdr(&s->txn.req, src->bind_hdr_name, src->bind_hdr_len,
					 &s->txn.hdr_idx, src->bind_hdr_occ, NULL, &vptr, &vlen)) {
				((struct sockaddr_in *)&srv_conn->addr.from)->sin_addr.s_addr =
					htonl(inetaddr_host_lim(vptr, vptr + vlen));
			}
			b_adv(s->req.buf, rewind);
		}
		break;
	default:
		memset(&srv_conn->addr.from, 0, sizeof(srv_conn->addr.from));
	}
#endif
}
Exemplo n.º 4
0
/* Tries to copy block <blk> at once into the channel's buffer after length
 * controls. The chn->o and to_forward pointers are updated. If the channel
 * input is closed, -2 is returned. If the block is too large for this buffer,
 * -3 is returned. If there is not enough room left in the buffer, -1 is
 * returned. Otherwise the number of bytes copied is returned (0 being a valid
 * number). Channel flag READ_PARTIAL is updated if some data can be
 * transferred. Channel flag CF_WAKE_WRITE is set if the write fails because
 * the buffer is full.
 */
int bi_putblk(struct channel *chn, const char *blk, int len)
{
	int max;

	if (unlikely(channel_input_closed(chn)))
		return -2;

	max = buffer_max_len(chn);
	if (unlikely(len > max - buffer_len(chn->buf))) {
		/* we can't write this chunk right now because the buffer is
		 * almost full or because the block is too large. Return the
		 * available space or -2 if impossible.
		 */
		if (len > max)
			return -3;

		chn->flags |= CF_WAKE_WRITE;
		return -1;
	}

	if (unlikely(len == 0))
		return 0;

	/* OK so the data fits in the buffer in one or two blocks */
	max = buffer_contig_space(chn->buf);
	memcpy(bi_end(chn->buf), blk, MIN(len, max));
	if (len > max)
		memcpy(chn->buf->data, blk + max, len - max);

	chn->buf->i += len;
	chn->total += len;
	if (chn->to_forward) {
		unsigned long fwd = len;
		if (chn->to_forward != CHN_INFINITE_FORWARD) {
			if (fwd > chn->to_forward)
				fwd = chn->to_forward;
			chn->to_forward -= fwd;
		}
		b_adv(chn->buf, fwd);
	}

	/* notify that some data was read from the SI into the buffer */
	chn->flags |= CF_READ_PARTIAL;
	return len;
}
Exemplo n.º 5
0
/*
 * Add data to compress
 */
int http_compression_buffer_add_data(struct session *s, struct buffer *in, struct buffer *out)
{
	struct http_msg *msg = &s->txn.rsp;
	int consumed_data = 0;
	int data_process_len;
	int block1, block2;

	/*
	 * Temporarily skip already parsed data and chunks to jump to the
	 * actual data block. It is fixed before leaving.
	 */
	b_adv(in, msg->next);

	/*
	 * select the smallest size between the announced chunk size, the input
	 * data, and the available output buffer size. The compressors are
	 * assumed to be able to process all the bytes we pass to them at once.
	 */
	data_process_len = MIN(in->i, msg->chunk_len);
	data_process_len = MIN(out->size - buffer_len(out), data_process_len);

	block1 = data_process_len;
	if (block1 > bi_contig_data(in))
		block1 = bi_contig_data(in);
	block2 = data_process_len - block1;

	/* compressors return < 0 upon error or the amount of bytes read */
	consumed_data = s->comp_algo->add_data(s->comp_ctx, bi_ptr(in), block1, out);
	if (consumed_data >= 0 && block2 > 0) {
		consumed_data = s->comp_algo->add_data(s->comp_ctx, in->data, block2, out);
		if (consumed_data >= 0)
			consumed_data += block1;
	}

	/* restore original buffer pointer */
	b_rew(in, msg->next);

	if (consumed_data > 0) {
		msg->next += consumed_data;
		msg->chunk_len -= consumed_data;
	}
	return consumed_data;
}
Exemplo n.º 6
0
/* RDP Cookie HASH.  */
struct server *get_server_rch(struct session *s)
{
	unsigned int hash = 0;
	struct proxy    *px   = s->be;
	unsigned long    len;
	int              ret;
	struct sample    smp;
	int rewind;

	/* tot_weight appears to mean srv_count */
	if (px->lbprm.tot_weight == 0)
		return NULL;

	memset(&smp, 0, sizeof(smp));

	b_rew(s->req.buf, rewind = s->req.buf->o);

	ret = fetch_rdp_cookie_name(s, &smp, px->hh_name, px->hh_len);
	len = smp.data.str.len;

	b_adv(s->req.buf, rewind);

	if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || len == 0)
		return NULL;

	/* note: we won't hash if there's only one server left */
	if (px->lbprm.tot_used == 1)
		goto hash_done;

	/* Found a the hh_name in the headers.
	 * we will compute the hash based on this value ctx.val.
	 */
	hash = gen_hash(px, smp.data.str.str, len);

	if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
		hash = full_hash(hash);
 hash_done:
	if (px->lbprm.algo & BE_LB_LKUP_CHTREE)
		return chash_get_server_hash(px, hash);
	else
		return map_get_server_hash(px, hash);
}
Exemplo n.º 7
0
/*
 * Init HTTP compression
 */
int http_compression_buffer_init(struct session *s, struct buffer *in, struct buffer *out)
{
	struct http_msg *msg = &s->txn.rsp;
	int left;

	/* not enough space */
	if (in->size - buffer_len(in) < 40)
	    return -1;

	/*
	 * Skip data, we don't need them in the new buffer. They are results
	 * of CHUNK_CRLF and CHUNK_SIZE parsing.
	 */
	b_adv(in, msg->next);
	msg->next = 0;
	msg->sov = 0;
	msg->sol = 0;

	out->size = global.tune.bufsize;
	out->i = 0;
	out->o = 0;
	out->p = out->data;
	/* copy output data */
	if (in->o > 0) {
		left = in->o - bo_contig_data(in);
		memcpy(out->data, bo_ptr(in), bo_contig_data(in));
		out->p += bo_contig_data(in);
		if (left > 0) { /* second part of the buffer */
			memcpy(out->p, in->data, left);
			out->p += left;
		}
		out->o = in->o;
	}
	out->i += http_emit_chunk_size(out->p, 0, 0);

	return 0;
}
Exemplo n.º 8
0
/* Tries to copy character <c> into the channel's buffer after some length
 * controls. The chn->o and to_forward pointers are updated. If the channel
 * input is closed, -2 is returned. If there is not enough room left in the
 * buffer, -1 is returned. Otherwise the number of bytes copied is returned
 * (1). Channel flag READ_PARTIAL is updated if some data can be transferred.
 * Channel flag CF_WAKE_WRITE is set if the write fails because the buffer is
 * full.
 */
int bi_putchr(struct channel *chn, char c)
{
	if (unlikely(channel_input_closed(chn)))
		return -2;

	if (channel_full(chn)) {
		chn->flags |= CF_WAKE_WRITE;
		return -1;
	}

	*bi_end(chn->buf) = c;

	chn->buf->i++;
	chn->flags |= CF_READ_PARTIAL;

	if (chn->to_forward >= 1) {
		if (chn->to_forward != CHN_INFINITE_FORWARD)
			chn->to_forward--;
		b_adv(chn->buf, 1);
	}

	chn->total++;
	return 1;
}
Exemplo n.º 9
0
/*
 * Flush data in process, and write the header and footer of the chunk. Upon
 * success, in and out buffers are swapped to avoid a copy.
 */
int http_compression_buffer_end(struct session *s, struct buffer **in, struct buffer **out, int end)
{
	int to_forward, forwarded;
	int left;
	struct http_msg *msg = &s->txn.rsp;
	struct buffer *ib = *in, *ob = *out;

#ifdef USE_ZLIB
	int ret;

	/* flush data here */

	if (end)
		ret = s->comp_algo->flush(s->comp_ctx, ob, Z_FINISH); /* end of data */
	else
		ret = s->comp_algo->flush(s->comp_ctx, ob, Z_SYNC_FLUSH); /* end of buffer */

	if (ret < 0)
		return -1; /* flush failed */

#endif /* USE_ZLIB */

	if (ob->i > 8) {
		/* more than a chunk size => some data were emitted */
		char *tail = ob->p + ob->i;

		/* write real size at the begining of the chunk, no need of wrapping */
		http_emit_chunk_size(ob->p, ob->i - 8, 0);

		/* chunked encoding requires CRLF after data */
		*tail++ = '\r';
		*tail++ = '\n';

		if (!(msg->flags & HTTP_MSGF_TE_CHNK) && msg->chunk_len == 0) {
			/* End of data, 0<CRLF><CRLF> is needed but we're not
			 * in chunked mode on input so we must add it ourselves.
			 */
			memcpy(tail, "0\r\n\r\n", 5);
			tail += 5;
		}
		ob->i = tail - ob->p;
	} else {
		/* no data were sent, cancel the chunk size */
		ob->i = 0;
	}

	to_forward = ob->i;

	/* update input rate */
	forwarded = ib->o - ob->o;
	if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) {
		update_freq_ctr(&global.comp_bps_in, forwarded);
		s->fe->fe_counters.comp_in += forwarded;
		s->be->be_counters.comp_in += forwarded;
	} else {
		s->fe->fe_counters.comp_byp += forwarded;
		s->be->be_counters.comp_byp += forwarded;
	}

	/* copy the remaining data in the tmp buffer. */
	if (ib->i > 0) {
		left = ib->i - bi_contig_data(ib);
		memcpy(bi_end(ob), bi_ptr(ib), bi_contig_data(ib));
		ob->i += bi_contig_data(ib);
		if (left > 0) {
			memcpy(bi_end(ob), ib->data, left);
			ob->i += left;
		}
	}

	/* swap the buffers */
	*in = ob;
	*out = ib;

	if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) {
		update_freq_ctr(&global.comp_bps_out, to_forward);
		s->fe->fe_counters.comp_out += to_forward;
		s->be->be_counters.comp_out += to_forward;
	}

	/* forward the new chunk without remaining data */
	b_adv(ob, to_forward);

	return to_forward;
}
Exemplo n.º 10
0
/*
 * Flush data in process, and write the header and footer of the chunk. Upon
 * success, in and out buffers are swapped to avoid a copy.
 */
int http_compression_buffer_end(struct session *s, struct buffer **in, struct buffer **out, int end)
{
	int to_forward;
	int left;
	struct http_msg *msg = &s->txn.rsp;
	struct buffer *ib = *in, *ob = *out;

#ifdef USE_ZLIB
	int ret;

	/* flush data here */

	if (end)
		ret = s->comp_algo->flush(s->comp_ctx, ob, Z_FINISH); /* end of data */
	else
		ret = s->comp_algo->flush(s->comp_ctx, ob, Z_SYNC_FLUSH); /* end of buffer */

	if (ret < 0)
		return -1; /* flush failed */

#endif /* USE_ZLIB */

	if (ob->i > 8) {
		/* more than a chunk size => some data were emitted */
		char *tail = ob->p + ob->i;

		/* write real size at the begining of the chunk, no need of wrapping */
		http_emit_chunk_size(ob->p, ob->i - 8, 0);

		/* chunked encoding requires CRLF after data */
		*tail++ = '\r';
		*tail++ = '\n';

		/* At the end of data, we must write the empty chunk 0<CRLF>,
		 * and terminate the trailers section with a last <CRLF>. If
		 * we're forwarding a chunked-encoded response, we'll have a
		 * trailers section after the empty chunk which needs to be
		 * forwarded and which will provide the last CRLF. Otherwise
		 * we write it ourselves.
		 */
		if (msg->msg_state >= HTTP_MSG_TRAILERS) {
			memcpy(tail, "0\r\n", 3);
			tail += 3;
			if (msg->msg_state >= HTTP_MSG_DONE) {
				memcpy(tail, "\r\n", 2);
				tail += 2;
			}
		}
		ob->i = tail - ob->p;
	} else {
		/* no data were sent, cancel the chunk size */
		ob->i = 0;
	}

	to_forward = ob->i;

	/* update input rate */
	if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) {
		update_freq_ctr(&global.comp_bps_in, msg->next);
		s->fe->fe_counters.comp_in += msg->next;
		s->be->be_counters.comp_in += msg->next;
	} else {
		s->fe->fe_counters.comp_byp += msg->next;
		s->be->be_counters.comp_byp += msg->next;
	}

	/* copy the remaining data in the tmp buffer. */
	b_adv(ib, msg->next);
	msg->next = 0;

	if (ib->i > 0) {
		left = ib->i - bi_contig_data(ib);
		memcpy(bi_end(ob), bi_ptr(ib), bi_contig_data(ib));
		ob->i += bi_contig_data(ib);
		if (left > 0) {
			memcpy(bi_end(ob), ib->data, left);
			ob->i += left;
		}
	}

	/* swap the buffers */
	*in = ob;
	*out = ib;

	if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) {
		update_freq_ctr(&global.comp_bps_out, to_forward);
		s->fe->fe_counters.comp_out += to_forward;
		s->be->be_counters.comp_out += to_forward;
	}

	/* forward the new chunk without remaining data */
	b_adv(ob, to_forward);

	return to_forward;
}
Exemplo n.º 11
0
/*
 * This is the callback which is called by the connection layer to receive data
 * into the buffer from the connection. It iterates over the transport layer's
 * rcv_buf function.
 */
static void si_conn_recv_cb(struct connection *conn)
{
	struct stream_interface *si = conn->owner;
	struct channel *chn = si->ib;
	int ret, max, cur_read;
	int read_poll = MAX_READ_POLL_LOOPS;

	/* stop immediately on errors. Note that we DON'T want to stop on
	 * POLL_ERR, as the poller might report a write error while there
	 * are still data available in the recv buffer. This typically
	 * happens when we send too large a request to a backend server
	 * which rejects it before reading it all.
	 */
	if (conn->flags & CO_FL_ERROR)
		return;

	/* stop here if we reached the end of data */
	if (conn_data_read0_pending(conn))
		goto out_shutdown_r;

	/* maybe we were called immediately after an asynchronous shutr */
	if (chn->flags & CF_SHUTR)
		return;

	cur_read = 0;

	if ((chn->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !chn->buf->o &&
	    global.tune.idle_timer &&
	    (unsigned short)(now_ms - chn->last_read) >= global.tune.idle_timer) {
		/* The buffer was empty and nothing was transferred for more
		 * than one second. This was caused by a pause and not by
		 * congestion. Reset any streaming mode to reduce latency.
		 */
		chn->xfer_small = 0;
		chn->xfer_large = 0;
		chn->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
	}

	/* First, let's see if we may splice data across the channel without
	 * using a buffer.
	 */
	if (conn->xprt->rcv_pipe &&
	    (chn->pipe || chn->to_forward >= MIN_SPLICE_FORWARD) &&
	    chn->flags & CF_KERN_SPLICING) {
		if (buffer_not_empty(chn->buf)) {
			/* We're embarrassed, there are already data pending in
			 * the buffer and we don't want to have them at two
			 * locations at a time. Let's indicate we need some
			 * place and ask the consumer to hurry.
			 */
			goto abort_splice;
		}

		if (unlikely(chn->pipe == NULL)) {
			if (pipes_used >= global.maxpipes || !(chn->pipe = get_pipe())) {
				chn->flags &= ~CF_KERN_SPLICING;
				goto abort_splice;
			}
		}

		ret = conn->xprt->rcv_pipe(conn, chn->pipe, chn->to_forward);
		if (ret < 0) {
			/* splice not supported on this end, let's disable it */
			chn->flags &= ~CF_KERN_SPLICING;
			goto abort_splice;
		}

		if (ret > 0) {
			if (chn->to_forward != CHN_INFINITE_FORWARD)
				chn->to_forward -= ret;
			chn->total += ret;
			cur_read += ret;
			chn->flags |= CF_READ_PARTIAL;
		}

		if (conn_data_read0_pending(conn))
			goto out_shutdown_r;

		if (conn->flags & CO_FL_ERROR)
			return;

		if (conn->flags & CO_FL_WAIT_ROOM) {
			/* the pipe is full or we have read enough data that it
			 * could soon be full. Let's stop before needing to poll.
			 */
			si->flags |= SI_FL_WAIT_ROOM;
			__conn_data_stop_recv(conn);
		}

		/* splice not possible (anymore), let's go on on standard copy */
	}

 abort_splice:
	if (chn->pipe && unlikely(!chn->pipe->data)) {
		put_pipe(chn->pipe);
		chn->pipe = NULL;
	}

	/* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
	 * was enabled, which implies that the recv buffer was not full. So we have a guarantee
	 * that if such an event is not handled above in splice, it will be handled here by
	 * recv().
	 */
	while (!(conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_DATA_RD_SH | CO_FL_WAIT_ROOM | CO_FL_HANDSHAKE))) {
		max = bi_avail(chn);

		if (!max) {
			si->flags |= SI_FL_WAIT_ROOM;
			break;
		}

		ret = conn->xprt->rcv_buf(conn, chn->buf, max);
		if (ret <= 0)
			break;

		cur_read += ret;

		/* if we're allowed to directly forward data, we must update ->o */
		if (chn->to_forward && !(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
			unsigned long fwd = ret;
			if (chn->to_forward != CHN_INFINITE_FORWARD) {
				if (fwd > chn->to_forward)
					fwd = chn->to_forward;
				chn->to_forward -= fwd;
			}
			b_adv(chn->buf, fwd);
		}

		chn->flags |= CF_READ_PARTIAL;
		chn->total += ret;

		if (channel_full(chn)) {
			si->flags |= SI_FL_WAIT_ROOM;
			break;
		}

		if ((chn->flags & CF_READ_DONTWAIT) || --read_poll <= 0) {
			si->flags |= SI_FL_WAIT_ROOM;
			__conn_data_stop_recv(conn);
			break;
		}

		/* if too many bytes were missing from last read, it means that
		 * it's pointless trying to read again because the system does
		 * not have them in buffers.
		 */
		if (ret < max) {
			/* if a streamer has read few data, it may be because we
			 * have exhausted system buffers. It's not worth trying
			 * again.
			 */
			if (chn->flags & CF_STREAMER)
				break;

			/* if we read a large block smaller than what we requested,
			 * it's almost certain we'll never get anything more.
			 */
			if (ret >= global.tune.recv_enough)
				break;
		}
	} /* while !flags */

	if (conn->flags & CO_FL_ERROR)
		return;

	if (cur_read) {
		if ((chn->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
		    (cur_read <= chn->buf->size / 2)) {
			chn->xfer_large = 0;
			chn->xfer_small++;
			if (chn->xfer_small >= 3) {
				/* we have read less than half of the buffer in
				 * one pass, and this happened at least 3 times.
				 * This is definitely not a streamer.
				 */
				chn->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
			}
			else if (chn->xfer_small >= 2) {
				/* if the buffer has been at least half full twice,
				 * we receive faster than we send, so at least it
				 * is not a "fast streamer".
				 */
				chn->flags &= ~CF_STREAMER_FAST;
			}
		}
		else if (!(chn->flags & CF_STREAMER_FAST) &&
			 (cur_read >= chn->buf->size - global.tune.maxrewrite)) {
			/* we read a full buffer at once */
			chn->xfer_small = 0;
			chn->xfer_large++;
			if (chn->xfer_large >= 3) {
				/* we call this buffer a fast streamer if it manages
				 * to be filled in one call 3 consecutive times.
				 */
				chn->flags |= (CF_STREAMER | CF_STREAMER_FAST);
			}
		}
		else {
			chn->xfer_small = 0;
			chn->xfer_large = 0;
		}
		chn->last_read = now_ms;
	}

	if (conn_data_read0_pending(conn))
		/* connection closed */
		goto out_shutdown_r;

	return;

 out_shutdown_r:
	/* we received a shutdown */
	chn->flags |= CF_READ_NULL;
	if (chn->flags & CF_AUTO_CLOSE)
		channel_shutw_now(chn);
	stream_sock_read0(si);
	conn_data_read0(conn);
	return;
}