Beispiel #1
0
/* This function is called on a read event from a listening socket, corresponding
 * to an accept. It tries to accept as many connections as possible, and for each
 * calls the listener's accept handler (generally the frontend's accept handler).
 */
int stream_sock_accept(int fd)
{
	struct listener *l = fdtab[fd].owner;
	struct proxy *p = l->frontend;
	int max_accept = global.tune.maxaccept;
	int cfd;
	int ret;

	if (unlikely(l->nbconn >= l->maxconn)) {
		listener_full(l);
		return 0;
	}

	if (global.cps_lim && !(l->options & LI_O_UNLIMITED)) {
		int max = freq_ctr_remain(&global.conn_per_sec, global.cps_lim, 0);

		if (unlikely(!max)) {
			/* frontend accept rate limit was reached */
			limit_listener(l, &global_listener_queue);
			task_schedule(global_listener_queue_task, tick_add(now_ms, next_event_delay(&global.conn_per_sec, global.cps_lim, 0)));
			return 0;
		}

		if (max_accept > max)
			max_accept = max;
	}

	if (p && p->fe_sps_lim) {
		int max = freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0);

		if (unlikely(!max)) {
			/* frontend accept rate limit was reached */
			limit_listener(l, &p->listener_queue);
			task_schedule(p->task, tick_add(now_ms, next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0)));
			return 0;
		}

		if (max_accept > max)
			max_accept = max;
	}

	/* Note: if we fail to allocate a connection because of configured
	 * limits, we'll schedule a new attempt worst 1 second later in the
	 * worst case. If we fail due to system limits or temporary resource
	 * shortage, we try again 100ms later in the worst case.
	 */
	while (max_accept--) {
		struct sockaddr_storage addr;
		socklen_t laddr = sizeof(addr);

		if (unlikely(actconn >= global.maxconn) && !(l->options & LI_O_UNLIMITED)) {
			limit_listener(l, &global_listener_queue);
			task_schedule(global_listener_queue_task, tick_add(now_ms, 1000)); /* try again in 1 second */
			return 0;
		}

		if (unlikely(p && p->feconn >= p->maxconn)) {
			limit_listener(l, &p->listener_queue);
			return 0;
		}

		cfd = accept(fd, (struct sockaddr *)&addr, &laddr);
		if (unlikely(cfd == -1)) {
			switch (errno) {
			case EAGAIN:
			case EINTR:
			case ECONNABORTED:
				return 0;	    /* nothing more to accept */
			case ENFILE:
				if (p)
					send_log(p, LOG_EMERG,
						 "Proxy %s reached system FD limit at %d. Please check system tunables.\n",
						 p->id, maxfd);
				limit_listener(l, &global_listener_queue);
				task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */
				return 0;
			case EMFILE:
				if (p)
					send_log(p, LOG_EMERG,
						 "Proxy %s reached process FD limit at %d. Please check 'ulimit-n' and restart.\n",
						 p->id, maxfd);
				limit_listener(l, &global_listener_queue);
				task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */
				return 0;
			case ENOBUFS:
			case ENOMEM:
				if (p)
					send_log(p, LOG_EMERG,
						 "Proxy %s reached system memory limit at %d sockets. Please check system tunables.\n",
						 p->id, maxfd);
				limit_listener(l, &global_listener_queue);
				task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */
				return 0;
			default:
				return 0;
			}
		}

		if (unlikely(cfd >= global.maxsock)) {
			send_log(p, LOG_EMERG,
				 "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
				 p->id);
			close(cfd);
			limit_listener(l, &global_listener_queue);
			task_schedule(global_listener_queue_task, tick_add(now_ms, 1000)); /* try again in 1 second */
			return 0;
		}

		/* increase the per-process number of cumulated connections */
		if (!(l->options & LI_O_UNLIMITED)) {
			update_freq_ctr(&global.conn_per_sec, 1);
			if (global.conn_per_sec.curr_ctr > global.cps_max)
				global.cps_max = global.conn_per_sec.curr_ctr;
			actconn++;
		}

		jobs++;
		totalconn++;
		l->nbconn++;

		if (l->counters) {
			if (l->nbconn > l->counters->conn_max)
				l->counters->conn_max = l->nbconn;
		}

		ret = l->accept(l, cfd, &addr);
		if (unlikely(ret <= 0)) {
			/* The connection was closed by session_accept(). Either
			 * we just have to ignore it (ret == 0) or it's a critical
			 * error due to a resource shortage, and we must stop the
			 * listener (ret < 0).
			 */
			if (!(l->options & LI_O_UNLIMITED))
				actconn--;
			jobs--;
			l->nbconn--;
			if (ret == 0) /* successful termination */
				continue;

			limit_listener(l, &global_listener_queue);
			task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */
			return 0;
		}

		if (l->nbconn >= l->maxconn) {
			listener_full(l);
			return 0;
		}

	} /* end of while (p->feconn < p->maxconn) */

	return 0;
}
Beispiel #2
0
/* This function kills an existing embryonic session. It stops the connection's
 * transport layer, releases assigned resources, resumes the listener if it was
 * disabled and finally kills the file descriptor. This function requires that
 * sess->origin points to the incoming connection.
 */
static void session_kill_embryonic(struct session *sess)
{
	int level = LOG_INFO;
	struct connection *conn = __objt_conn(sess->origin);
	struct task *task = conn->owner;
	unsigned int log = sess->fe->to_log;
	const char *err_msg;

	if (sess->fe->options2 & PR_O2_LOGERRORS)
		level = LOG_ERR;

	if (log && (sess->fe->options & PR_O_NULLNOLOG)) {
		/* with "option dontlognull", we don't log connections with no transfer */
		if (!conn->err_code ||
		    conn->err_code == CO_ER_PRX_EMPTY || conn->err_code == CO_ER_PRX_ABORT ||
		    conn->err_code == CO_ER_SSL_EMPTY || conn->err_code == CO_ER_SSL_ABORT)
			log = 0;
	}

	if (log) {
		if (!conn->err_code && (task->state & TASK_WOKEN_TIMER)) {
			if (conn->flags & CO_FL_ACCEPT_PROXY)
				conn->err_code = CO_ER_PRX_TIMEOUT;
			else if (conn->flags & CO_FL_SSL_WAIT_HS)
				conn->err_code = CO_ER_SSL_TIMEOUT;
		}

		session_prepare_log_prefix(sess);
		err_msg = conn_err_code_str(conn);
		if (err_msg)
			send_log(sess->fe, level, "%s: %s\n", trash.str, err_msg);
		else
			send_log(sess->fe, level, "%s: unknown connection error (code=%d flags=%08x)\n",
				 trash.str, conn->err_code, conn->flags);
	}

	/* kill the connection now */
	conn_force_close(conn);
	conn_free(conn);

	sess->fe->feconn--;

	if (!(sess->listener->options & LI_O_UNLIMITED))
		actconn--;
	jobs--;
	sess->listener->nbconn--;
	if (sess->listener->state == LI_FULL)
		resume_listener(sess->listener);

	/* Dequeues all of the listeners waiting for a resource */
	if (!LIST_ISEMPTY(&global_listener_queue))
		dequeue_all_listeners(&global_listener_queue);

	if (!LIST_ISEMPTY(&sess->fe->listener_queue) &&
	    (!sess->fe->fe_sps_lim || freq_ctr_remain(&sess->fe->fe_sess_per_sec, sess->fe->fe_sps_lim, 0) > 0))
		dequeue_all_listeners(&sess->fe->listener_queue);

	task_delete(task);
	task_free(task);
	session_free(sess);
}
Beispiel #3
0
/* This function is called on a read event from a listening socket, corresponding
 * to an accept. It tries to accept as many connections as possible, and for each
 * calls the listener's accept handler (generally the frontend's accept handler).
 */
int stream_sock_accept(int fd)
{
	struct listener *l = fdtab[fd].owner;
	struct proxy *p = l->frontend;
	int max_accept = global.tune.maxaccept;
	int cfd;
	int ret;

	if (unlikely(l->nbconn >= l->maxconn)) {
		EV_FD_CLR(l->fd, DIR_RD);
		l->state = LI_FULL;
		return 0;
	}

	if (p && p->fe_sps_lim) {
		int max = freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0);
		if (max_accept > max)
			max_accept = max;
	}

	while ((!p || p->feconn < p->maxconn) && actconn < global.maxconn && max_accept--) {
		struct sockaddr_storage addr;
		socklen_t laddr = sizeof(addr);

		cfd = accept(fd, (struct sockaddr *)&addr, &laddr);
		if (unlikely(cfd == -1)) {
			switch (errno) {
			case EAGAIN:
			case EINTR:
			case ECONNABORTED:
				return 0;	    /* nothing more to accept */
			case ENFILE:
				if (p)
					send_log(p, LOG_EMERG,
						 "Proxy %s reached system FD limit at %d. Please check system tunables.\n",
						 p->id, maxfd);
				return 0;
			case EMFILE:
				if (p)
					send_log(p, LOG_EMERG,
						 "Proxy %s reached process FD limit at %d. Please check 'ulimit-n' and restart.\n",
						 p->id, maxfd);
				return 0;
			case ENOBUFS:
			case ENOMEM:
				if (p)
					send_log(p, LOG_EMERG,
						 "Proxy %s reached system memory limit at %d sockets. Please check system tunables.\n",
						 p->id, maxfd);
				return 0;
			default:
				return 0;
			}
		}

		if (unlikely(cfd >= global.maxsock)) {
			send_log(p, LOG_EMERG,
				 "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
				 p->id);
			close(cfd);
			return 0;
		}

		jobs++;
		actconn++;
		totalconn++;
		l->nbconn++;

		if (l->counters) {
			if (l->nbconn > l->counters->conn_max)
				l->counters->conn_max = l->nbconn;
		}

		ret = l->accept(l, cfd, &addr);
		if (unlikely(ret <= 0)) {
			/* The connection was closed by session_accept(). Either
			 * we just have to ignore it (ret == 0) or it's a critical
			 * error due to a resource shortage, and we must stop the
			 * listener (ret < 0).
			 */
			jobs--;
			actconn--;
			l->nbconn--;
			if (ret == 0) /* successful termination */
				continue;

			if (p) {
				disable_listener(l);
				p->state = PR_STIDLE;
			}
			return 0;
		}

		if (l->nbconn >= l->maxconn) {
			EV_FD_CLR(l->fd, DIR_RD);
			l->state = LI_FULL;
		}
	} /* end of while (p->feconn < p->maxconn) */
	return 0;
}