/* * This is the proxy management task. It enables proxies when there are enough * free sessions, or stops them when the table is full. It is designed to be * called as a task which is woken up upon stopping or when rate limiting must * be enforced. */ struct task *manage_proxy(struct task *t) { struct proxy *p = t->context; int next = TICK_ETERNITY; unsigned int wait; /* We should periodically try to enable listeners waiting for a * global resource here. */ /* first, let's check if we need to stop the proxy */ if (unlikely(stopping && p->state != PR_STSTOPPED)) { int t; t = tick_remain(now_ms, p->stop_time); if (t == 0) { Warning("Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n", p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn); send_log(p, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n", p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn); stop_proxy(p); /* try to free more memory */ pool_gc2(); } else { next = tick_first(next, p->stop_time); } } /* the rest below is just for frontends */ if (!(p->cap & PR_CAP_FE)) goto out; /* check the various reasons we may find to block the frontend */ if (unlikely(p->feconn >= p->maxconn)) { if (p->state == PR_STREADY) p->state = PR_STFULL; goto out; } /* OK we have no reason to block, so let's unblock if we were blocking */ if (p->state == PR_STFULL) p->state = PR_STREADY; if (p->fe_sps_lim && (wait = next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0))) { /* we're blocking because a limit was reached on the number of * requests/s on the frontend. We want to re-check ASAP, which * means in 1 ms before estimated expiration date, because the * timer will have settled down. */ next = tick_first(next, tick_add(now_ms, wait)); goto out; } /* The proxy is not limited so we can re-enable any waiting listener */ if (!LIST_ISEMPTY(&p->listener_queue)) dequeue_all_listeners(&p->listener_queue); out: t->expire = next; task_queue(t); return t; }
/* * this function enables proxies when there are enough free sessions, * or stops them when the table is full. It is designed to be called from the * select_loop(). It adjusts the date of next expiration event during stop * time if appropriate. */ void maintain_proxies(int *next) { struct proxy *p; struct listener *l; unsigned int wait; p = proxy; /* if there are enough free sessions, we'll activate proxies */ if (actconn < global.maxconn) { for (; p; p = p->next) { /* check the various reasons we may find to block the frontend */ if (p->feconn >= p->maxconn) goto do_block; if (p->fe_sps_lim && (wait = next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 1))) { /* we're blocking because a limit was reached on the number of * requests/s on the frontend. We want to re-check ASAP, which * means in 1 ms before estimated expiration date, because the * timer will have settled down. Note that we may already be in * IDLE state here. */ *next = tick_first(*next, tick_add(now_ms, wait)); goto do_block; } /* OK we have no reason to block, so let's unblock if we were blocking */ if (p->state == PR_STIDLE) { for (l = p->listen; l != NULL; l = l->next) enable_listener(l); p->state = PR_STRUN; } continue; do_block: if (p->state == PR_STRUN) { for (l = p->listen; l != NULL; l = l->next) disable_listener(l); p->state = PR_STIDLE; } } } else { /* block all proxies */ while (p) { if (p->state == PR_STRUN) { for (l = p->listen; l != NULL; l = l->next) disable_listener(l); p->state = PR_STIDLE; } p = p->next; } } if (stopping) { p = proxy; while (p) { if (p->state != PR_STSTOPPED) { int t; t = tick_remain(now_ms, p->stop_time); if (t == 0) { Warning("Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n", p->id, p->cum_feconn, p->cum_beconn); send_log(p, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n", p->id, p->cum_feconn, p->cum_beconn); stop_proxy(p); /* try to free more memory */ pool_gc2(); } else { *next = tick_first(*next, p->stop_time); } } p = p->next; } } return; }
/* * Linux epoll() poller */ REGPRM2 static void _do_poll(struct poller *p, int exp) { int status; int fd; int count; int updt_idx; int wait_time; int old_fd; /* first, scan the update list to find polling changes */ for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) { fd = fd_updt[updt_idx]; HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit); if (!fdtab[fd].owner) { activity[tid].poll_drop++; continue; } _update_fd(fd); } fd_nbupdt = 0; /* Scan the global update list */ for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { if (fd == -2) { fd = old_fd; continue; } else if (fd <= -3) fd = -fd -4; if (fd == -1) break; if (fdtab[fd].update_mask & tid_bit) done_update_polling(fd); else continue; if (!fdtab[fd].owner) continue; _update_fd(fd); } thread_harmless_now(); /* compute the epoll_wait() timeout */ if (!exp) wait_time = MAX_DELAY_MS; else if (tick_is_expired(exp, now_ms)) { activity[tid].poll_exp++; wait_time = 0; } else { wait_time = TICKS_TO_MS(tick_remain(now_ms, exp)) + 1; if (wait_time > MAX_DELAY_MS) wait_time = MAX_DELAY_MS; } /* now let's wait for polled events */ gettimeofday(&before_poll, NULL); status = epoll_wait(epoll_fd[tid], epoll_events, global.tune.maxpollevents, wait_time); tv_update_date(wait_time, status); measure_idle(); thread_harmless_end(); /* process polled events */ for (count = 0; count < status; count++) { unsigned int n; unsigned int e = epoll_events[count].events; fd = epoll_events[count].data.fd; if (!fdtab[fd].owner) { activity[tid].poll_dead++; continue; } if (!(fdtab[fd].thread_mask & tid_bit)) { /* FD has been migrated */ activity[tid].poll_skip++; epoll_ctl(epoll_fd[tid], EPOLL_CTL_DEL, fd, &ev); HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit); continue; } /* it looks complicated but gcc can optimize it away when constants * have same values... In fact it depends on gcc :-( */ if (EPOLLIN == FD_POLL_IN && EPOLLOUT == FD_POLL_OUT && EPOLLPRI == FD_POLL_PRI && EPOLLERR == FD_POLL_ERR && EPOLLHUP == FD_POLL_HUP) { n = e & (EPOLLIN|EPOLLOUT|EPOLLPRI|EPOLLERR|EPOLLHUP); } else { n = ((e & EPOLLIN ) ? FD_POLL_IN : 0) | ((e & EPOLLPRI) ? FD_POLL_PRI : 0) | ((e & EPOLLOUT) ? FD_POLL_OUT : 0) | ((e & EPOLLERR) ? FD_POLL_ERR : 0) | ((e & EPOLLHUP) ? FD_POLL_HUP : 0); } /* always remap RDHUP to HUP as they're used similarly */ if (e & EPOLLRDHUP) { HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP); n |= FD_POLL_HUP; } fd_update_events(fd, n); } /* the caller will take care of cached events */ }