/* * Flush data in process, and write the header and footer of the chunk. Upon * success, in and out buffers are swapped to avoid a copy. */ int http_compression_buffer_end(struct session *s, struct buffer **in, struct buffer **out, int end) { int to_forward, forwarded; int left; struct http_msg *msg = &s->txn.rsp; struct buffer *ib = *in, *ob = *out; #ifdef USE_ZLIB int ret; /* flush data here */ if (end) ret = s->comp_algo->flush(s->comp_ctx, ob, Z_FINISH); /* end of data */ else ret = s->comp_algo->flush(s->comp_ctx, ob, Z_SYNC_FLUSH); /* end of buffer */ if (ret < 0) return -1; /* flush failed */ #endif /* USE_ZLIB */ if (ob->i > 8) { /* more than a chunk size => some data were emitted */ char *tail = ob->p + ob->i; /* write real size at the begining of the chunk, no need of wrapping */ http_emit_chunk_size(ob->p, ob->i - 8, 0); /* chunked encoding requires CRLF after data */ *tail++ = '\r'; *tail++ = '\n'; if (!(msg->flags & HTTP_MSGF_TE_CHNK) && msg->chunk_len == 0) { /* End of data, 0<CRLF><CRLF> is needed but we're not * in chunked mode on input so we must add it ourselves. */ memcpy(tail, "0\r\n\r\n", 5); tail += 5; } ob->i = tail - ob->p; } else { /* no data were sent, cancel the chunk size */ ob->i = 0; } to_forward = ob->i; /* update input rate */ forwarded = ib->o - ob->o; if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_in, forwarded); s->fe->fe_counters.comp_in += forwarded; s->be->be_counters.comp_in += forwarded; } else { s->fe->fe_counters.comp_byp += forwarded; s->be->be_counters.comp_byp += forwarded; } /* copy the remaining data in the tmp buffer. */ if (ib->i > 0) { left = ib->i - bi_contig_data(ib); memcpy(bi_end(ob), bi_ptr(ib), bi_contig_data(ib)); ob->i += bi_contig_data(ib); if (left > 0) { memcpy(bi_end(ob), ib->data, left); ob->i += left; } } /* swap the buffers */ *in = ob; *out = ib; if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_out, to_forward); s->fe->fe_counters.comp_out += to_forward; s->be->be_counters.comp_out += to_forward; } /* forward the new chunk without remaining data */ b_adv(ob, to_forward); return to_forward; }
/* This function is called on a read event from a listening socket, corresponding * to an accept. It tries to accept as many connections as possible, and for each * calls the listener's accept handler (generally the frontend's accept handler). */ int stream_sock_accept(int fd) { struct listener *l = fdtab[fd].owner; struct proxy *p = l->frontend; int max_accept = global.tune.maxaccept; int cfd; int ret; if (unlikely(l->nbconn >= l->maxconn)) { listener_full(l); return 0; } if (global.cps_lim && !(l->options & LI_O_UNLIMITED)) { int max = freq_ctr_remain(&global.conn_per_sec, global.cps_lim, 0); if (unlikely(!max)) { /* frontend accept rate limit was reached */ limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, next_event_delay(&global.conn_per_sec, global.cps_lim, 0))); return 0; } if (max_accept > max) max_accept = max; } if (p && p->fe_sps_lim) { int max = freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0); if (unlikely(!max)) { /* frontend accept rate limit was reached */ limit_listener(l, &p->listener_queue); task_schedule(p->task, tick_add(now_ms, next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0))); return 0; } if (max_accept > max) max_accept = max; } /* Note: if we fail to allocate a connection because of configured * limits, we'll schedule a new attempt worst 1 second later in the * worst case. If we fail due to system limits or temporary resource * shortage, we try again 100ms later in the worst case. */ while (max_accept--) { struct sockaddr_storage addr; socklen_t laddr = sizeof(addr); if (unlikely(actconn >= global.maxconn) && !(l->options & LI_O_UNLIMITED)) { limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 1000)); /* try again in 1 second */ return 0; } if (unlikely(p && p->feconn >= p->maxconn)) { limit_listener(l, &p->listener_queue); return 0; } cfd = accept(fd, (struct sockaddr *)&addr, &laddr); if (unlikely(cfd == -1)) { switch (errno) { case EAGAIN: case EINTR: case ECONNABORTED: return 0; /* nothing more to accept */ case ENFILE: if (p) send_log(p, LOG_EMERG, "Proxy %s reached system FD limit at %d. Please check system tunables.\n", p->id, maxfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; case EMFILE: if (p) send_log(p, LOG_EMERG, "Proxy %s reached process FD limit at %d. Please check 'ulimit-n' and restart.\n", p->id, maxfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; case ENOBUFS: case ENOMEM: if (p) send_log(p, LOG_EMERG, "Proxy %s reached system memory limit at %d sockets. Please check system tunables.\n", p->id, maxfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; default: return 0; } } if (unlikely(cfd >= global.maxsock)) { send_log(p, LOG_EMERG, "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n", p->id); close(cfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 1000)); /* try again in 1 second */ return 0; } /* increase the per-process number of cumulated connections */ if (!(l->options & LI_O_UNLIMITED)) { update_freq_ctr(&global.conn_per_sec, 1); if (global.conn_per_sec.curr_ctr > global.cps_max) global.cps_max = global.conn_per_sec.curr_ctr; actconn++; } jobs++; totalconn++; l->nbconn++; if (l->counters) { if (l->nbconn > l->counters->conn_max) l->counters->conn_max = l->nbconn; } ret = l->accept(l, cfd, &addr); if (unlikely(ret <= 0)) { /* The connection was closed by session_accept(). Either * we just have to ignore it (ret == 0) or it's a critical * error due to a resource shortage, and we must stop the * listener (ret < 0). */ if (!(l->options & LI_O_UNLIMITED)) actconn--; jobs--; l->nbconn--; if (ret == 0) /* successful termination */ continue; limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; } if (l->nbconn >= l->maxconn) { listener_full(l); return 0; } } /* end of while (p->feconn < p->maxconn) */ return 0; }
/* * Flush data in process, and write the header and footer of the chunk. Upon * success, in and out buffers are swapped to avoid a copy. */ int http_compression_buffer_end(struct session *s, struct buffer **in, struct buffer **out, int end) { int to_forward; int left; struct http_msg *msg = &s->txn.rsp; struct buffer *ib = *in, *ob = *out; #ifdef USE_ZLIB int ret; /* flush data here */ if (end) ret = s->comp_algo->flush(s->comp_ctx, ob, Z_FINISH); /* end of data */ else ret = s->comp_algo->flush(s->comp_ctx, ob, Z_SYNC_FLUSH); /* end of buffer */ if (ret < 0) return -1; /* flush failed */ #endif /* USE_ZLIB */ if (ob->i > 8) { /* more than a chunk size => some data were emitted */ char *tail = ob->p + ob->i; /* write real size at the begining of the chunk, no need of wrapping */ http_emit_chunk_size(ob->p, ob->i - 8, 0); /* chunked encoding requires CRLF after data */ *tail++ = '\r'; *tail++ = '\n'; /* At the end of data, we must write the empty chunk 0<CRLF>, * and terminate the trailers section with a last <CRLF>. If * we're forwarding a chunked-encoded response, we'll have a * trailers section after the empty chunk which needs to be * forwarded and which will provide the last CRLF. Otherwise * we write it ourselves. */ if (msg->msg_state >= HTTP_MSG_TRAILERS) { memcpy(tail, "0\r\n", 3); tail += 3; if (msg->msg_state >= HTTP_MSG_DONE) { memcpy(tail, "\r\n", 2); tail += 2; } } ob->i = tail - ob->p; } else { /* no data were sent, cancel the chunk size */ ob->i = 0; } to_forward = ob->i; /* update input rate */ if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_in, msg->next); s->fe->fe_counters.comp_in += msg->next; s->be->be_counters.comp_in += msg->next; } else { s->fe->fe_counters.comp_byp += msg->next; s->be->be_counters.comp_byp += msg->next; } /* copy the remaining data in the tmp buffer. */ b_adv(ib, msg->next); msg->next = 0; if (ib->i > 0) { left = ib->i - bi_contig_data(ib); memcpy(bi_end(ob), bi_ptr(ib), bi_contig_data(ib)); ob->i += bi_contig_data(ib); if (left > 0) { memcpy(bi_end(ob), ib->data, left); ob->i += left; } } /* swap the buffers */ *in = ob; *out = ib; if (s->comp_ctx && s->comp_ctx->cur_lvl > 0) { update_freq_ctr(&global.comp_bps_out, to_forward); s->fe->fe_counters.comp_out += to_forward; s->be->be_counters.comp_out += to_forward; } /* forward the new chunk without remaining data */ b_adv(ob, to_forward); return to_forward; }