void zergpool_loop(int id) { int i; int zergpool_queue = event_queue_init(); void *events = event_queue_alloc(ZERGPOOL_EVENTS); struct zergpool_socket *zps = zergpool_sockets; while(zps) { event_queue_add_fd_read(zergpool_queue, zps->fd); zps = zps->next; } for(;;) { int nevents = event_queue_wait_multi(zergpool_queue, -1, events, ZERGPOOL_EVENTS); for(i=0;i<nevents;i++) { int interesting_fd = event_queue_interesting_fd(events, i); zps = zergpool_sockets; while(zps) { if (zps->fd == interesting_fd) { uwsgi_manage_zerg(zps->fd, zps->num_sockets, zps->sockets); } zps = zps->next; } } } }
void uwsgi_stats_pusher_loop(struct uwsgi_thread *ut) { void *events = event_queue_alloc(1); for (;;) { int nevents = event_queue_wait_multi(ut->queue, 1, events, 1); if (nevents > 0) { int interesting_fd = event_queue_interesting_fd(events, 0); char buf[4096]; ssize_t len = read(interesting_fd, buf, 4096); if (len <= 0) { uwsgi_log("[uwsgi-stats-pusher] goodbye...\n"); return; } uwsgi_log("[uwsgi-stats-pusher] message received from master: %.*s\n", (int) len, buf); continue; } time_t now = uwsgi_now(); struct uwsgi_stats_pusher_instance *uspi = uwsgi.stats_pusher_instances; struct uwsgi_stats *us = NULL; while (uspi) { int delta = uspi->freq ? uspi->freq : uwsgi.stats_pusher_default_freq; if ((uspi->last_run + delta) <= now) { if (!us) { us = uwsgi_master_generate_stats(); if (!us) goto next; } uspi->pusher->func(uspi, us->base, us->pos); uspi->last_run = now; } next: uspi = uspi->next; } if (us) { free(us->base); free(us); } } }
static void uwsgi_offload_loop(struct uwsgi_thread *ut) { int i; void *events = event_queue_alloc(uwsgi.offload_threads_events); for (;;) { // TODO make timeout tunable int nevents = event_queue_wait_multi(ut->queue, -1, events, uwsgi.offload_threads_events); for (i = 0; i < nevents; i++) { int interesting_fd = event_queue_interesting_fd(events, i); if (interesting_fd == ut->pipe[1]) { struct uwsgi_offload_request *uor = uwsgi_malloc(sizeof(struct uwsgi_offload_request)); ssize_t len = read(ut->pipe[1], uor, sizeof(struct uwsgi_offload_request)); if (len != sizeof(struct uwsgi_offload_request)) { uwsgi_error("read()"); free(uor); continue; } // cal the event function for the first time if (uor->engine->event_func(ut, uor, -1)) { uwsgi_offload_close(ut, uor); continue; } uwsgi_offload_append(ut, uor); continue; } // get the task from the interesting fd struct uwsgi_offload_request *uor = uwsgi_offload_get_by_fd(ut, interesting_fd); if (!uor) continue; // run the hook if (uor->engine->event_func(ut, uor, interesting_fd)) { uwsgi_offload_close(ut, uor); } } } }
void emperor_loop() { // monitor a directory struct uwsgi_instance ui_base; struct uwsgi_instance *ui_current; pid_t diedpid; int waitpid_status; int has_children = 0; int i_am_alone = 0; int i; void *events; int nevents; int interesting_fd; char notification_message[64]; struct rlimit rl; uwsgi.emperor_stats_fd = -1; if (uwsgi.emperor_pidfile) { uwsgi_write_pidfile(uwsgi.emperor_pidfile); } signal(SIGPIPE, SIG_IGN); uwsgi_unix_signal(SIGINT, royal_death); uwsgi_unix_signal(SIGTERM, royal_death); uwsgi_unix_signal(SIGQUIT, royal_death); uwsgi_unix_signal(SIGUSR1, emperor_stats); uwsgi_unix_signal(SIGHUP, emperor_massive_reload); memset(&ui_base, 0, sizeof(struct uwsgi_instance)); if (getrlimit(RLIMIT_NOFILE, &rl)) { uwsgi_error("getrlimit()"); exit(1); } uwsgi.max_fd = rl.rlim_cur; emperor_throttle_level = uwsgi.emperor_throttle; // the queue must be initialized before adding scanners uwsgi.emperor_queue = event_queue_init(); emperor_build_scanners(); events = event_queue_alloc(64); if (uwsgi.has_emperor) { uwsgi_log("*** starting uWSGI sub-Emperor ***\n"); } else { uwsgi_log("*** starting uWSGI Emperor ***\n"); } if (uwsgi.emperor_stats) { char *tcp_port = strchr(uwsgi.emperor_stats, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; uwsgi.emperor_stats_fd = bind_to_tcp(uwsgi.emperor_stats, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { uwsgi.emperor_stats_fd = bind_to_unix(uwsgi.emperor_stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(uwsgi.emperor_queue, uwsgi.emperor_stats_fd); uwsgi_log("*** Emperor stats server enabled on %s fd: %d ***\n", uwsgi.emperor_stats, uwsgi.emperor_stats_fd); } ui = &ui_base; int freq = 0; for (;;) { if (!i_am_alone) { diedpid = waitpid(uwsgi.emperor_pid, &waitpid_status, WNOHANG); if (diedpid < 0 || diedpid > 0) { i_am_alone = 1; } } nevents = event_queue_wait_multi(uwsgi.emperor_queue, freq, events, 64); freq = uwsgi.emperor_freq; for (i = 0; i < nevents; i++) { interesting_fd = event_queue_interesting_fd(events, i); if (uwsgi.emperor_stats && uwsgi.emperor_stats_fd > -1 && interesting_fd == uwsgi.emperor_stats_fd) { emperor_send_stats(uwsgi.emperor_stats_fd); continue; } // check if a monitor is mapped to that file descriptor if (uwsgi_emperor_scanner_event(interesting_fd)) continue; ui_current = emperor_get_by_fd(interesting_fd); if (ui_current) { char byte; ssize_t rlen = read(interesting_fd, &byte, 1); if (rlen <= 0) { // SAFE if (!ui_current->config_len) { emperor_del(ui_current); } } else { if (byte == 17) { ui_current->loyal = 1; ui_current->last_loyal = uwsgi_now(); uwsgi_log("[emperor] vassal %s is now loyal\n", ui_current->name); // remove it from the blacklist uwsgi_emperor_blacklist_remove(ui_current->name); // TODO post-start hook } // heartbeat can be used for spotting blocked instances else if (byte == 26) { ui_current->last_heartbeat = uwsgi_now(); } else if (byte == 22) { emperor_stop(ui_current); } else if (byte == 30 && uwsgi.emperor_broodlord > 0 && uwsgi.emperor_broodlord_count < uwsgi.emperor_broodlord) { uwsgi_log("[emperor] going in broodlord mode: launching zergs for %s\n", ui_current->name); char *zerg_name = uwsgi_concat3(ui_current->name, ":", "zerg"); emperor_add(NULL, zerg_name, uwsgi_now(), NULL, 0, ui_current->uid, ui_current->gid); free(zerg_name); } } } else { uwsgi_log("[emperor] unrecognized vassal event on fd %d\n", interesting_fd); close(interesting_fd); } } uwsgi_emperor_run_scanners(); // check for heartbeat (if required) ui_current = ui->ui_next; while (ui_current) { if (ui_current->last_heartbeat > 0) { if ((ui_current->last_heartbeat + uwsgi.emperor_heartbeat) < uwsgi_now()) { uwsgi_log("[emperor] vassal %s sent no heartbeat in last %d seconds, respawning it...\n", ui_current->name, uwsgi.emperor_heartbeat); // set last_heartbeat to 0 avoiding races ui_current->last_heartbeat = 0; emperor_respawn(ui_current, uwsgi_now()); } } ui_current = ui_current->ui_next; } // check for removed instances ui_current = ui; has_children = 0; while (ui_current->ui_next) { ui_current = ui_current->ui_next; has_children++; } if (uwsgi.notify) { if (snprintf(notification_message, 64, "The Emperor is governing %d vassals", has_children) >= 34) { uwsgi_notify(notification_message); } } if (has_children) { diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG); } else { // vacuum waitpid(WAIT_ANY, &waitpid_status, WNOHANG); diedpid = 0; } if (diedpid < 0) { // it looks like it happens when OOM is triggered to Linux cgroup, but it could be a uWSGI bug :P // by the way, fallback to a clean situation... if (errno == ECHILD) { uwsgi_log("--- MUTINY DETECTED !!! IMPALING VASSALS... ---\n"); ui_current = ui->ui_next; while (ui_current) { struct uwsgi_instance *rebel_vassal = ui_current; ui_current = ui_current->ui_next; emperor_del(rebel_vassal); } } else { uwsgi_error("waitpid()"); } } ui_current = ui; while (ui_current->ui_next) { ui_current = ui_current->ui_next; if (ui_current->status == 1) { if (ui_current->config) free(ui_current->config); // SAFE emperor_del(ui_current); break; } else if (ui_current->pid == diedpid) { if (ui_current->status == 0) { // respawn an accidentally dead instance if its exit code is not UWSGI_EXILE_CODE if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXILE_CODE) { // SAFE emperor_del(ui_current); } else { // UNSAFE emperor_add(ui_current->scanner, ui_current->name, ui_current->last_mod, ui_current->config, ui_current->config_len, ui_current->uid, ui_current->gid); emperor_del(ui_current); } break; } else if (ui_current->status == 1) { // remove 'marked for dead' instance if (ui_current->config) free(ui_current->config); // SAFE emperor_del(ui_current); break; } } } } }
void async_loop() { if (uwsgi.async < 2) { uwsgi_log("the async loop engine requires async mode (--async <n>)\n"); exit(1); } int interesting_fd, i; struct uwsgi_rb_timer *min_timeout; int timeout; int is_a_new_connection; int proto_parser_status; uint64_t now; struct uwsgi_async_request *current_request = NULL; void *events = event_queue_alloc(64); struct uwsgi_socket *uwsgi_sock; uwsgi.async_runqueue = NULL; uwsgi.wait_write_hook = async_wait_fd_write; uwsgi.wait_read_hook = async_wait_fd_read; if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket); event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket); } // set a default request manager if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req; if (!uwsgi.schedule_to_main) { uwsgi_log("*** DANGER *** async mode without coroutine/greenthread engine loaded !!!\n"); } while (uwsgi.workers[uwsgi.mywid].manage_next_request) { now = (uint64_t) uwsgi_now(); if (uwsgi.async_runqueue) { timeout = 0; } else { min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL); if (min_timeout) { timeout = min_timeout->value - now; if (timeout <= 0) { async_expire_timeouts(now); timeout = 0; } } else { timeout = -1; } } uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64); now = (uint64_t) uwsgi_now(); // timeout ??? if (uwsgi.async_nevents == 0) { async_expire_timeouts(now); } for (i = 0; i < uwsgi.async_nevents; i++) { // manage events interesting_fd = event_queue_interesting_fd(events, i); // signals are executed in the main stack... in the future we could have dedicated stacks for them if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) { uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid); continue; } is_a_new_connection = 0; // new request coming in ? uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { is_a_new_connection = 1; uwsgi.wsgi_req = find_first_available_wsgi_req(); if (uwsgi.wsgi_req == NULL) { uwsgi_async_queue_is_full((time_t)now); break; } // on error re-insert the request in the queue wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock); if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (wsgi_req_async_recv(uwsgi.wsgi_req)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } // by default the core is in UWSGI_AGAIN mode uwsgi.wsgi_req->async_status = UWSGI_AGAIN; // some protocol (like zeromq) do not need additional parsing, just push it in the runqueue if (uwsgi.wsgi_req->do_not_add_to_async_queue) { runqueue_push(uwsgi.wsgi_req); } break; } uwsgi_sock = uwsgi_sock->next; } if (!is_a_new_connection) { // proto event uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd); if (uwsgi.wsgi_req) { proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req); // reset timeout async_reset_request(uwsgi.wsgi_req); // parsing complete if (!proto_parser_status) { // remove fd from event poll and fd proto table uwsgi.async_proto_fd_table[interesting_fd] = NULL; event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read()); // put request in the runqueue runqueue_push(uwsgi.wsgi_req); continue; } else if (proto_parser_status < 0) { uwsgi.async_proto_fd_table[interesting_fd] = NULL; close(interesting_fd); continue; } // re-add timer async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); continue; } // app-registered event uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd); // unknown fd, remove it (for safety) if (uwsgi.wsgi_req == NULL) { close(interesting_fd); continue; } // remove all the fd monitors and timeout async_reset_request(uwsgi.wsgi_req); uwsgi.wsgi_req->async_ready_fd = 1; uwsgi.wsgi_req->async_last_ready_fd = interesting_fd; // put the request in the runqueue again runqueue_push(uwsgi.wsgi_req); } } // event queue managed, give cpu to runqueue current_request = uwsgi.async_runqueue; while(current_request) { // current_request could be nulled on error/end of request struct uwsgi_async_request *next_request = current_request->next; uwsgi.wsgi_req = current_request->wsgi_req; uwsgi.schedule_to_req(); uwsgi.wsgi_req->switches++; // request ended ? if (uwsgi.wsgi_req->async_status <= UWSGI_OK || uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) { // remove from the runqueue runqueue_remove(current_request); } current_request = next_request; } } }
void fastrouter_loop() { int nevents; int interesting_fd; int new_connection; ssize_t len; int i; time_t delta; char bbuf[UMAX16]; char *tcp_port; char *tmp_socket_name; int tmp_socket_name_len; struct uwsgi_subscribe_req usr; char *magic_table[0xff]; struct uwsgi_rb_timer *min_timeout; void *events; struct msghdr msg; union { struct cmsghdr cmsg; char control [CMSG_SPACE (sizeof (int))]; } msg_control; struct cmsghdr *cmsg; struct sockaddr_un fr_addr; socklen_t fr_addr_len = sizeof(struct sockaddr_un); struct fastrouter_session *fr_session; struct fastrouter_session *fr_table[2048]; struct iovec iov[2]; int soopt; socklen_t solen = sizeof(int); int ufr_subserver = -1; for(i=0;i<2048;i++) { fr_table[i] = NULL; } ufr.queue = event_queue_init(); struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets; while(ufr_sock) { if (ufr_sock->name[0] == '=') { int shared_socket = atoi(ufr_sock->name+1); if (shared_socket >= 0) { ufr_sock->fd = uwsgi_get_shared_socket_fd_by_num(shared_socket); if (ufr_sock->fd == -1) { uwsgi_log("unable to use shared socket %d\n", shared_socket); } } } else { tcp_port = strchr(ufr_sock->name, ':'); if (tcp_port) { ufr_sock->fd = bind_to_tcp(ufr_sock->name, uwsgi.listen_queue, tcp_port); } else { ufr_sock->fd = bind_to_unix(ufr_sock->name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } } uwsgi_log("uwsgi fastrouter/proxy bound on %s\n", ufr_sock->name); if (!ufr.cheap) { event_queue_add_fd_read(ufr.queue, ufr_sock->fd); } else { uwsgi_log("[uwsgi-fastrouter] cheap mode requested. Waiting for subscriptions...\n"); ufr.i_am_cheap = 1; } ufr_sock = ufr_sock->next; } events = event_queue_alloc(ufr.nevents); ufr.timeouts = uwsgi_init_rb_timer(); if (!ufr.socket_timeout) ufr.socket_timeout = 30; if (ufr.subscription_server) { ufr_subserver = bind_to_udp(ufr.subscription_server, 0, 0); event_queue_add_fd_read(ufr.queue, ufr_subserver); if (!ufr.subscription_slot) ufr.subscription_slot = 30; // check for node status every 10 seconds //ufr.subscriptions_check = add_check_timeout(10); } if (ufr.pattern) { init_magic_table(magic_table); } for (;;) { min_timeout = uwsgi_min_rb_timer(ufr.timeouts); if (min_timeout == NULL ) { delta = -1; } else { delta = min_timeout->key - time(NULL); if (delta <= 0) { expire_timeouts(fr_table); delta = 0; } } nevents = event_queue_wait_multi(ufr.queue, delta, events, ufr.nevents); if (nevents == 0) { expire_timeouts(fr_table); } for (i=0;i<nevents;i++) { tmp_socket_name = NULL; interesting_fd = event_queue_interesting_fd(events, i); int taken = 0; struct uwsgi_fastrouter_socket *uwsgi_sock = ufr.sockets; while(uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { new_connection = accept(interesting_fd, (struct sockaddr *) &fr_addr, &fr_addr_len); if (new_connection < 0) { continue; } fr_table[new_connection] = alloc_fr_session(); fr_table[new_connection]->fd = new_connection; fr_table[new_connection]->instance_fd = -1; fr_table[new_connection]->status = FASTROUTER_STATUS_RECV_HDR; fr_table[new_connection]->h_pos = 0; fr_table[new_connection]->pos = 0; fr_table[new_connection]->un = NULL; fr_table[new_connection]->instance_failed = 0; fr_table[new_connection]->instance_address_len = 0; fr_table[new_connection]->hostname_len = 0; fr_table[new_connection]->hostname = NULL; fr_table[new_connection]->timeout = add_timeout(fr_table[new_connection]); event_queue_add_fd_read(ufr.queue, new_connection); taken = 1; break; } uwsgi_sock = uwsgi_sock->next; } if (taken) { continue; } if (interesting_fd == ufr_subserver) { len = recv(ufr_subserver, bbuf, 4096, 0); #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(ufr.queue, ufr_subserver); #endif if (len > 0) { memset(&usr, 0, sizeof(struct uwsgi_subscribe_req)); uwsgi_hooked_parse(bbuf+4, len-4, fastrouter_manage_subscription, &usr); if (uwsgi_add_subscribe_node(&ufr.subscriptions, &usr, ufr.subscription_regexp) && ufr.i_am_cheap) { struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets; while(ufr_sock) { event_queue_add_fd_read(ufr.queue, ufr_sock->fd); ufr_sock = ufr_sock->next; } ufr.i_am_cheap = 0; uwsgi_log("[uwsgi-fastrouter] leaving cheap mode...\n"); } } } else { fr_session = fr_table[interesting_fd]; // something is going wrong... if (fr_session == NULL) continue; if (event_queue_interesting_fd_has_error(events, i)) { close_session(fr_table, fr_session); continue; } fr_session->timeout = reset_timeout(fr_session); switch(fr_session->status) { case FASTROUTER_STATUS_RECV_HDR: len = recv(fr_session->fd, (char *)(&fr_session->uh) + fr_session->h_pos, 4-fr_session->h_pos, 0); if (len <= 0) { uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } fr_session->h_pos += len; if (fr_session->h_pos == 4) { #ifdef UWSGI_DEBUG uwsgi_log("modifier1: %d pktsize: %d modifier2: %d\n", fr_session->uh.modifier1, fr_session->uh.pktsize, fr_session->uh.modifier2); #endif fr_session->status = FASTROUTER_STATUS_RECV_VARS; } break; case FASTROUTER_STATUS_RECV_VARS: len = recv(fr_session->fd, fr_session->buffer + fr_session->pos, fr_session->uh.pktsize - fr_session->pos, 0); if (len <= 0) { uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } fr_session->pos += len; if (fr_session->pos == fr_session->uh.pktsize) { if (uwsgi_hooked_parse(fr_session->buffer, fr_session->uh.pktsize, fr_get_hostname, (void *) fr_session)) { close_session(fr_table, fr_session); break; } if (fr_session->hostname_len == 0) { close_session(fr_table, fr_session); break; } #ifdef UWSGI_DEBUG //uwsgi_log("requested domain %.*s\n", fr_session->hostname_len, fr_session->hostname); #endif if (ufr.use_cache) { fr_session->instance_address = uwsgi_cache_get(fr_session->hostname, fr_session->hostname_len, &fr_session->instance_address_len); char *cs_mod = uwsgi_str_contains(fr_session->instance_address, fr_session->instance_address_len, ','); if (cs_mod) { fr_session->modifier1 = uwsgi_str_num(cs_mod+1, (fr_session->instance_address_len - (cs_mod - fr_session->instance_address))-1); fr_session->instance_address_len = (cs_mod - fr_session->instance_address); } } else if (ufr.pattern) { magic_table['s'] = uwsgi_concat2n(fr_session->hostname, fr_session->hostname_len, "", 0); tmp_socket_name = magic_sub(ufr.pattern, ufr.pattern_len, &tmp_socket_name_len, magic_table); free(magic_table['s']); fr_session->instance_address_len = tmp_socket_name_len; fr_session->instance_address = tmp_socket_name; } else if (ufr.subscription_server) { fr_session->un = uwsgi_get_subscribe_node(&ufr.subscriptions, fr_session->hostname, fr_session->hostname_len, ufr.subscription_regexp); if (fr_session->un && fr_session->un->len) { fr_session->instance_address = fr_session->un->name; fr_session->instance_address_len = fr_session->un->len; fr_session->modifier1 = fr_session->un->modifier1; } } else if (ufr.base) { tmp_socket_name = uwsgi_concat2nn(ufr.base, ufr.base_len, fr_session->hostname, fr_session->hostname_len, &tmp_socket_name_len); fr_session->instance_address_len = tmp_socket_name_len; fr_session->instance_address = tmp_socket_name; } else if (ufr.code_string_code && ufr.code_string_function) { if (uwsgi.p[ufr.code_string_modifier1]->code_string) { fr_session->instance_address = uwsgi.p[ufr.code_string_modifier1]->code_string("uwsgi_fastrouter", ufr.code_string_code, ufr.code_string_function, fr_session->hostname, fr_session->hostname_len); if (fr_session->instance_address) { fr_session->instance_address_len = strlen(fr_session->instance_address); char *cs_mod = uwsgi_str_contains(fr_session->instance_address, fr_session->instance_address_len, ','); if (cs_mod) { fr_session->modifier1 = uwsgi_str_num(cs_mod+1, (fr_session->instance_address_len - (cs_mod - fr_session->instance_address))-1); fr_session->instance_address_len = (cs_mod - fr_session->instance_address); } } } } // no address found if (!fr_session->instance_address_len) { close_session(fr_table, fr_session); break; } fr_session->pass_fd = is_unix(fr_session->instance_address, fr_session->instance_address_len); fr_session->instance_fd = uwsgi_connectn(fr_session->instance_address, fr_session->instance_address_len, 0, 1); if (tmp_socket_name) free(tmp_socket_name); if (fr_session->instance_fd < 0) { /* if (ufr.subscription_server) { if (fr_session->un && fr_session->un->len > 0) { uwsgi_log("[uwsgi-fastrouter] %.*s => marking %.*s as failed\n", (int) fr_session->hostname_len, fr_session->hostname, (int) fr_session->instance_address_len,fr_session->instance_address); uwsgi_remove_subscribe_node(&ufr.subscriptions, fr_session->un); if (ufr.subscriptions == NULL && ufr.cheap && !ufr.i_am_cheap) { uwsgi_log("[uwsgi-fastrouter] no more nodes available. Going cheap...\n"); struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets; while(ufr_sock) { event_queue_del_fd(ufr.queue, ufr_sock->fd, event_queue_read()); ufr_sock = ufr_sock->next; } ufr.i_am_cheap = 1; } } } */ fr_session->instance_failed = 1; close_session(fr_table, fr_session); break; } fr_session->status = FASTROUTER_STATUS_CONNECTING; fr_table[fr_session->instance_fd] = fr_session; event_queue_add_fd_write(ufr.queue, fr_session->instance_fd); } break; case FASTROUTER_STATUS_CONNECTING: if (interesting_fd == fr_session->instance_fd) { if (getsockopt(fr_session->instance_fd, SOL_SOCKET, SO_ERROR, (void *) (&soopt), &solen) < 0) { uwsgi_error("getsockopt()"); fr_session->instance_failed = 1; close_session(fr_table, fr_session); break; } if (soopt) { uwsgi_log("unable to connect() to uwsgi instance: %s\n", strerror(soopt)); fr_session->instance_failed = 1; close_session(fr_table, fr_session); break; } fr_session->uh.modifier1 = fr_session->modifier1; iov[0].iov_base = &fr_session->uh; iov[0].iov_len = 4; iov[1].iov_base = fr_session->buffer; iov[1].iov_len = fr_session->uh.pktsize; // increment node requests counter if (fr_session->un) fr_session->un->requests++; // fd passing: PERFORMANCE EXTREME BOOST !!! if (fr_session->pass_fd && !uwsgi.no_fd_passing) { msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 2; msg.msg_flags = 0; msg.msg_control = &msg_control; msg.msg_controllen = sizeof (msg_control); cmsg = CMSG_FIRSTHDR (&msg); cmsg->cmsg_len = CMSG_LEN (sizeof (int)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; memcpy(CMSG_DATA(cmsg), &fr_session->fd, sizeof(int)); if (sendmsg(fr_session->instance_fd, &msg, 0) < 0) { uwsgi_error("sendmsg()"); } close_session(fr_table, fr_session); break; } if (writev(fr_session->instance_fd, iov, 2) < 0) { uwsgi_error("writev()"); close_session(fr_table, fr_session); break; } event_queue_fd_write_to_read(ufr.queue, fr_session->instance_fd); fr_session->status = FASTROUTER_STATUS_RESPONSE; } break; case FASTROUTER_STATUS_RESPONSE: // data from instance if (interesting_fd == fr_session->instance_fd) { len = recv(fr_session->instance_fd, fr_session->buffer, 0xffff, 0); if (len <= 0) { if (len < 0) uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } len = send(fr_session->fd, fr_session->buffer, len, 0); if (len <= 0) { if (len < 0) uwsgi_error("send()"); close_session(fr_table, fr_session); break; } // update transfer statistics if (fr_session->un) fr_session->un->transferred += len; } // body from client else if (interesting_fd == fr_session->fd) { //uwsgi_log("receiving body...\n"); len = recv(fr_session->fd, fr_session->buffer, 0xffff, 0); if (len <= 0) { if (len < 0) uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } len = send(fr_session->instance_fd, fr_session->buffer, len, 0); if (len <= 0) { if (len < 0) uwsgi_error("send()"); close_session(fr_table, fr_session); break; } } break; // fallback to destroy !!! default: uwsgi_log("unknown event: closing session\n"); close_session(fr_table, fr_session); break; } } } } }
void uwsgi_corerouter_loop(int id, void *data) { int i; struct uwsgi_corerouter *ucr = (struct uwsgi_corerouter *) data; ucr->cr_stats_server = -1; ucr->cr_table = uwsgi_malloc(sizeof(struct corerouter_session *) * uwsgi.max_fd); for (i = 0; i < (int) uwsgi.max_fd; i++) { ucr->cr_table[i] = NULL; } ucr->i_am_cheap = ucr->cheap; void *events = uwsgi_corerouter_setup_event_queue(ucr, id); if (ucr->has_subscription_sockets) event_queue_add_fd_read(ucr->queue, ushared->gateways[id].internal_subscription_pipe[1]); if (!ucr->socket_timeout) ucr->socket_timeout = 60; if (!ucr->static_node_gracetime) ucr->static_node_gracetime = 30; int i_am_the_first = 1; for(i=0;i<id;i++) { if (!strcmp(ushared->gateways[i].name, ucr->name)) { i_am_the_first = 0; break; } } if (ucr->stats_server && i_am_the_first) { char *tcp_port = strchr(ucr->stats_server, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; ucr->cr_stats_server = bind_to_tcp(ucr->stats_server, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { ucr->cr_stats_server = bind_to_unix(ucr->stats_server, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(ucr->queue, ucr->cr_stats_server); uwsgi_log("*** %s stats server enabled on %s fd: %d ***\n", ucr->short_name, ucr->stats_server, ucr->cr_stats_server); } if (ucr->use_socket) { ucr->to_socket = uwsgi_get_socket_by_num(ucr->socket_num); if (ucr->to_socket) { // fix socket name_len if (ucr->to_socket->name_len == 0 && ucr->to_socket->name) { ucr->to_socket->name_len = strlen(ucr->to_socket->name); } } } if (!ucr->pb_base_dir) { ucr->pb_base_dir = getenv("TMPDIR"); if (!ucr->pb_base_dir) ucr->pb_base_dir = "/tmp"; } int nevents; time_t delta; struct uwsgi_rb_timer *min_timeout; int new_connection; if (ucr->pattern) { init_magic_table(ucr->magic_table); } union uwsgi_sockaddr cr_addr; socklen_t cr_addr_len = sizeof(struct sockaddr_un); ucr->mapper = uwsgi_cr_map_use_void; if (ucr->use_cache) { ucr->cache = uwsgi_cache_by_name(ucr->use_cache); if (!ucr->cache) { uwsgi_log("!!! unable to find cache \"%s\" !!!\n", ucr->use_cache); exit(1); } ucr->mapper = uwsgi_cr_map_use_cache; } else if (ucr->pattern) { ucr->mapper = uwsgi_cr_map_use_pattern; } else if (ucr->has_subscription_sockets) { ucr->mapper = uwsgi_cr_map_use_subscription; if (uwsgi.subscription_dotsplit) { ucr->mapper = uwsgi_cr_map_use_subscription_dotsplit; } } else if (ucr->base) { ucr->mapper = uwsgi_cr_map_use_base; } else if (ucr->code_string_code && ucr->code_string_function) { ucr->mapper = uwsgi_cr_map_use_cs; } else if (ucr->to_socket) { ucr->mapper = uwsgi_cr_map_use_to; } else if (ucr->static_nodes) { ucr->mapper = uwsgi_cr_map_use_static_nodes; } ucr->timeouts = uwsgi_init_rb_timer(); for (;;) { time_t now = uwsgi_now(); // set timeouts and harakiri min_timeout = uwsgi_min_rb_timer(ucr->timeouts, NULL); if (min_timeout == NULL) { delta = -1; } else { delta = min_timeout->value - now; if (delta <= 0) { corerouter_expire_timeouts(ucr, now); delta = 0; } } if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = 0; } // wait for events nevents = event_queue_wait_multi(ucr->queue, delta, events, ucr->nevents); now = uwsgi_now(); if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = now + ucr->harakiri; } if (nevents == 0) { corerouter_expire_timeouts(ucr, now); } for (i = 0; i < nevents; i++) { // get the interesting fd ucr->interesting_fd = event_queue_interesting_fd(events, i); // something bad happened if (ucr->interesting_fd < 0) continue; // check if the ucr->interesting_fd matches a gateway socket struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets; int taken = 0; while (ugs) { if (ugs->gateway == &ushared->gateways[id] && ucr->interesting_fd == ugs->fd) { if (!ugs->subscription) { #if defined(__linux__) && defined(SOCK_NONBLOCK) && !defined(OBSOLETE_LINUX_KERNEL) new_connection = accept4(ucr->interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len, SOCK_NONBLOCK); if (new_connection < 0) { taken = 1; break; } #else new_connection = accept(ucr->interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len); if (new_connection < 0) { taken = 1; break; } // set socket in non-blocking mode, on non-linux platforms, clients get the server mode #ifdef __linux__ uwsgi_socket_nb(new_connection); #endif #endif struct corerouter_session *cr = corerouter_alloc_session(ucr, ugs, new_connection, (struct sockaddr *) &cr_addr, cr_addr_len); //something wrong in the allocation if (!cr) break; } else if (ugs->subscription) { uwsgi_corerouter_manage_subscription(ucr, id, ugs); } taken = 1; break; } ugs = ugs->next; } if (taken) { continue; } // manage internal subscription if (ucr->interesting_fd == ushared->gateways[id].internal_subscription_pipe[1]) { uwsgi_corerouter_manage_internal_subscription(ucr, ucr->interesting_fd); } // manage a stats request else if (ucr->interesting_fd == ucr->cr_stats_server) { corerouter_send_stats(ucr); } else { struct corerouter_peer *peer = ucr->cr_table[ucr->interesting_fd]; // something is going wrong... if (peer == NULL) continue; // on error, destroy the session if (event_queue_interesting_fd_has_error(events, i)) { peer->failed = 1; corerouter_close_peer(ucr, peer); continue; } // set timeout (in main_peer too) peer->timeout = corerouter_reset_timeout_fast(ucr, peer, now); peer->session->main_peer->timeout = corerouter_reset_timeout_fast(ucr, peer->session->main_peer, now); ssize_t (*hook)(struct corerouter_peer *) = NULL; // call event hook if (event_queue_interesting_fd_is_read(events, i)) { hook = peer->hook_read; } else if (event_queue_interesting_fd_is_write(events, i)) { hook = peer->hook_write; } if (!hook) continue; // reset errno (as we use it for internal signalling) errno = 0; ssize_t ret = hook(peer); // connection closed if (ret == 0) { corerouter_close_peer(ucr, peer); continue; } else if (ret < 0) { if (errno == EINPROGRESS) continue; // remove keepalive on error peer->session->can_keepalive = 0; corerouter_close_peer(ucr, peer); continue; } } } } }
void *async_loop(void *arg1) { struct uwsgi_async_fd *tmp_uaf; int interesting_fd, i; struct uwsgi_rb_timer *min_timeout; int timeout; int is_a_new_connection; int proto_parser_status; time_t now, last_now = 0; static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL; void *events = event_queue_alloc(64); struct uwsgi_socket *uwsgi_sock; uwsgi.async_runqueue = NULL; uwsgi.async_runqueue_cnt = 0; if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket); event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket); } // set a default request manager if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req; while (uwsgi.workers[uwsgi.mywid].manage_next_request) { if (uwsgi.async_runqueue_cnt) { timeout = 0; } else { min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts); if (uwsgi.async_runqueue_cnt) { timeout = 0; } if (min_timeout) { timeout = min_timeout->key - time(NULL); if (timeout <= 0) { async_expire_timeouts(); timeout = 0; } } else { timeout = -1; } } uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64); // timeout ??? if (uwsgi.async_nevents == 0) { async_expire_timeouts(); } for(i=0;i<uwsgi.async_nevents;i++) { // manage events interesting_fd = event_queue_interesting_fd(events, i); if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) { uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid); continue; } is_a_new_connection = 0; // new request coming in ? uwsgi_sock = uwsgi.sockets; while(uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { is_a_new_connection = 1; uwsgi.wsgi_req = find_first_available_wsgi_req(); if (uwsgi.wsgi_req == NULL) { now = time(NULL); if (now > last_now) { uwsgi_log("async queue is full !!!\n"); last_now = now; } break; } wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock ); if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) { #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(uwsgi.async_queue, interesting_fd); #endif uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(uwsgi.async_queue, interesting_fd); #endif // on linux we do not need to reset the socket to blocking state #ifndef __linux__ /* re-set blocking socket */ int arg = uwsgi_sock->arg; arg &= (~O_NONBLOCK); if (fcntl(uwsgi.wsgi_req->poll.fd, F_SETFL, arg) < 0) { uwsgi_error("fcntl()"); uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } #endif if (wsgi_req_async_recv(uwsgi.wsgi_req)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (uwsgi.wsgi_req->do_not_add_to_async_queue) { runqueue_push(uwsgi.wsgi_req); } break; } uwsgi_sock = uwsgi_sock->next; } if (!is_a_new_connection) { // proto event uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd); if (uwsgi.wsgi_req) { proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req); // reset timeout rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; // parsing complete if (!proto_parser_status) { // remove fd from event poll and fd proto table #ifndef UWSGI_EVENT_USE_PORT event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read()); #endif uwsgi.async_proto_fd_table[interesting_fd] = NULL; // put request in the runqueue runqueue_push(uwsgi.wsgi_req); continue; } else if (proto_parser_status < 0) { if (proto_parser_status == -1) uwsgi_log("error parsing request\n"); uwsgi.async_proto_fd_table[interesting_fd] = NULL; close(interesting_fd); continue; } // re-add timer async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); continue; } // app event uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd); // unknown fd, remove it (for safety) if (uwsgi.wsgi_req == NULL) { close(interesting_fd); continue; } // remove all the fd monitors and timeout while(uwsgi.wsgi_req->waiting_fds) { #ifndef UWSGI_EVENT_USE_PORT event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event); #endif tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } uwsgi.wsgi_req->async_ready_fd = 1; uwsgi.wsgi_req->async_last_ready_fd = interesting_fd; // put the request in the runqueue again runqueue_push(uwsgi.wsgi_req); } } // event queue managed, give cpu to runqueue if (!current_request) current_request = uwsgi.async_runqueue; if (uwsgi.async_runqueue_cnt) { uwsgi.wsgi_req = current_request->wsgi_req; uwsgi.schedule_to_req(); uwsgi.wsgi_req->switches++; next_async_request = current_request->next; // request ended ? if (uwsgi.wsgi_req->async_status <= UWSGI_OK) { // remove all the monitored fds and timeout while(uwsgi.wsgi_req->waiting_fds) { #ifndef UWSGI_EVENT_USE_PORT event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event); #endif tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } // remove from the list runqueue_remove(current_request); uwsgi_close_request(uwsgi.wsgi_req); // push wsgi_request in the unused stack uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; } else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) { // remove this request from suspended list runqueue_remove(current_request); } current_request = next_async_request; } } return NULL; }
void uwsgi_corerouter_loop(int id, void *data) { int i; struct uwsgi_corerouter *ucr = (struct uwsgi_corerouter *) data; ucr->cr_stats_server = -1; ucr->cr_table = uwsgi_malloc(sizeof(struct corerouter_session *) * uwsgi.max_fd); for (i = 0; i < (int) uwsgi.max_fd; i++) { ucr->cr_table[i] = NULL; } ucr->i_am_cheap = ucr->cheap; void *events = uwsgi_corerouter_setup_event_queue(ucr, id); if (ucr->has_subscription_sockets) event_queue_add_fd_read(ucr->queue, ushared->gateways[id].internal_subscription_pipe[1]); if (!ucr->socket_timeout) ucr->socket_timeout = 30; if (!ucr->static_node_gracetime) ucr->static_node_gracetime = 30; int i_am_the_first = 1; for(i=0;i<id;i++) { if (!strcmp(ushared->gateways[i].name, ucr->name)) { i_am_the_first = 0; break; } } if (ucr->stats_server && i_am_the_first) { char *tcp_port = strchr(ucr->stats_server, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; ucr->cr_stats_server = bind_to_tcp(ucr->stats_server, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { ucr->cr_stats_server = bind_to_unix(ucr->stats_server, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(ucr->queue, ucr->cr_stats_server); uwsgi_log("*** %s stats server enabled on %s fd: %d ***\n", ucr->short_name, ucr->stats_server, ucr->cr_stats_server); } if (ucr->use_socket) { ucr->to_socket = uwsgi_get_socket_by_num(ucr->socket_num); if (ucr->to_socket) { // fix socket name_len if (ucr->to_socket->name_len == 0 && ucr->to_socket->name) { ucr->to_socket->name_len = strlen(ucr->to_socket->name); } } } if (!ucr->pb_base_dir) { ucr->pb_base_dir = getenv("TMPDIR"); if (!ucr->pb_base_dir) ucr->pb_base_dir = "/tmp"; } int nevents; time_t delta; struct uwsgi_rb_timer *min_timeout; int interesting_fd; int new_connection; if (ucr->pattern) { init_magic_table(ucr->magic_table); } union uwsgi_sockaddr cr_addr; socklen_t cr_addr_len = sizeof(struct sockaddr_un); struct corerouter_session *cr_session; ucr->mapper = uwsgi_cr_map_use_void; if (ucr->use_cache) { ucr->mapper = uwsgi_cr_map_use_cache; } else if (ucr->pattern) { ucr->mapper = uwsgi_cr_map_use_pattern; } else if (ucr->has_subscription_sockets) { ucr->mapper = uwsgi_cr_map_use_subscription; } else if (ucr->base) { ucr->mapper = uwsgi_cr_map_use_base; } else if (ucr->code_string_code && ucr->code_string_function) { ucr->mapper = uwsgi_cr_map_use_cs; } else if (ucr->to_socket) { ucr->mapper = uwsgi_cr_map_use_to; } else if (ucr->static_nodes) { ucr->mapper = uwsgi_cr_map_use_static_nodes; } else if (ucr->use_cluster) { ucr->mapper = uwsgi_cr_map_use_cluster; } ucr->timeouts = uwsgi_init_rb_timer(); for (;;) { min_timeout = uwsgi_min_rb_timer(ucr->timeouts); if (min_timeout == NULL) { delta = -1; } else { delta = min_timeout->key - time(NULL); if (delta <= 0) { corerouter_expire_timeouts(ucr); delta = 0; } } if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = 0; } nevents = event_queue_wait_multi(ucr->queue, delta, events, ucr->nevents); if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = time(NULL) + ucr->harakiri; } if (nevents == 0) { corerouter_expire_timeouts(ucr); } for (i = 0; i < nevents; i++) { interesting_fd = event_queue_interesting_fd(events, i); struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets; int taken = 0; while (ugs) { if (ugs->gateway == &ushared->gateways[id] && interesting_fd == ugs->fd) { if (!ugs->subscription) { new_connection = accept(interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len); #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(ucr->queue, interesting_fd); #endif if (new_connection < 0) { taken = 1; break; } // set socket blocking mode, on non-linux platforms, clients get the server mode #ifndef __linux__ if (!ugs->nb) { uwsgi_socket_b(new_connection); } #else if (ugs->nb) { uwsgi_socket_nb(new_connection); } #endif corerouter_alloc_session(ucr, ugs, new_connection, (struct sockaddr *) &cr_addr, cr_addr_len); } else if (ugs->subscription) { uwsgi_corerouter_manage_subscription(ucr, id, ugs); } taken = 1; break; } ugs = ugs->next; } if (taken) { continue; } if (interesting_fd == ushared->gateways[id].internal_subscription_pipe[1]) { uwsgi_corerouter_manage_internal_subscription(ucr, interesting_fd); } else if (interesting_fd == ucr->cr_stats_server) { corerouter_send_stats(ucr); } else { cr_session = ucr->cr_table[interesting_fd]; // something is going wrong... if (cr_session == NULL) continue; if (event_queue_interesting_fd_has_error(events, i)) { corerouter_close_session(ucr, cr_session); continue; } cr_session->timeout = corerouter_reset_timeout(ucr, cr_session); // mplementation specific cycle; ucr->switch_events(ucr, cr_session, interesting_fd); } } } }
void async_loop() { if (uwsgi.async < 2) { uwsgi_log("the async loop engine requires async mode (--async <n>)\n"); exit(1); } struct uwsgi_async_fd *tmp_uaf; int interesting_fd, i; struct uwsgi_rb_timer *min_timeout; int timeout; int is_a_new_connection; int proto_parser_status; uint64_t now; static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL; void *events = event_queue_alloc(64); struct uwsgi_socket *uwsgi_sock; uwsgi.async_runqueue = NULL; uwsgi.async_runqueue_cnt = 0; uwsgi.wait_write_hook = async_wait_fd_write; uwsgi.wait_read_hook = async_wait_fd_read; if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket); event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket); } // set a default request manager if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req; if (!uwsgi.schedule_to_main) { uwsgi_log("*** WARNING *** async mode without coroutine/greenthread engine loaded !!!\n"); } while (uwsgi.workers[uwsgi.mywid].manage_next_request) { now = (uint64_t) uwsgi_now(); if (uwsgi.async_runqueue_cnt) { timeout = 0; } else { min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL); if (min_timeout) { timeout = min_timeout->value - now; if (timeout <= 0) { async_expire_timeouts(now); timeout = 0; } } else { timeout = -1; } } uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64); now = (uint64_t) uwsgi_now(); // timeout ??? if (uwsgi.async_nevents == 0) { async_expire_timeouts(now); } for (i = 0; i < uwsgi.async_nevents; i++) { // manage events interesting_fd = event_queue_interesting_fd(events, i); if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) { uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid); continue; } is_a_new_connection = 0; // new request coming in ? uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { is_a_new_connection = 1; uwsgi.wsgi_req = find_first_available_wsgi_req(); if (uwsgi.wsgi_req == NULL) { uwsgi_async_queue_is_full((time_t)now); break; } wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock); if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (wsgi_req_async_recv(uwsgi.wsgi_req)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (uwsgi.wsgi_req->do_not_add_to_async_queue) { runqueue_push(uwsgi.wsgi_req); } break; } uwsgi_sock = uwsgi_sock->next; } if (!is_a_new_connection) { // proto event uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd); if (uwsgi.wsgi_req) { proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req); // reset timeout uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; // parsing complete if (!proto_parser_status) { // remove fd from event poll and fd proto table uwsgi.async_proto_fd_table[interesting_fd] = NULL; event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read()); // put request in the runqueue runqueue_push(uwsgi.wsgi_req); continue; } else if (proto_parser_status < 0) { if (proto_parser_status == -1) uwsgi_log("error parsing request\n"); uwsgi.async_proto_fd_table[interesting_fd] = NULL; close(interesting_fd); continue; } // re-add timer async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); continue; } // app event uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd); // unknown fd, remove it (for safety) if (uwsgi.wsgi_req == NULL) { close(interesting_fd); continue; } // remove all the fd monitors and timeout while (uwsgi.wsgi_req->waiting_fds) { tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event); uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } uwsgi.wsgi_req->async_ready_fd = 1; uwsgi.wsgi_req->async_last_ready_fd = interesting_fd; // put the request in the runqueue again runqueue_push(uwsgi.wsgi_req); // avoid managing other enqueued events... break; } } // event queue managed, give cpu to runqueue if (!current_request) current_request = uwsgi.async_runqueue; if (uwsgi.async_runqueue_cnt) { uwsgi.wsgi_req = current_request->wsgi_req; uwsgi.schedule_to_req(); uwsgi.wsgi_req->switches++; next_async_request = current_request->next; // request ended ? if (uwsgi.wsgi_req->async_status <= UWSGI_OK) { // remove all the monitored fds and timeout while (uwsgi.wsgi_req->waiting_fds) { tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event); uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } // remove from the list runqueue_remove(current_request); uwsgi_close_request(uwsgi.wsgi_req); // push wsgi_request in the unused stack uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; } else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) { // remove this request from suspended list runqueue_remove(current_request); } current_request = next_async_request; } } }