struct zergpool_socket *add_zergpool_socket(char *name, char *sockets) { struct zergpool_socket *z_sock,*zps = zergpool_sockets; if (!zps) { z_sock = uwsgi_calloc(sizeof(struct zergpool_socket)); zergpool_sockets = z_sock; } else { while(zps) { if (!zps->next) { z_sock= uwsgi_calloc(sizeof(struct zergpool_socket)); zps->next = z_sock; break; } zps = zps->next; } } // do not defer accept for zergpools if (uwsgi.no_defer_accept) { uwsgi.no_defer_accept = 0; z_sock->fd = bind_to_unix(name, uwsgi.listen_queue, uwsgi.chmod_socket, 0); uwsgi.no_defer_accept = 1; } else { z_sock->fd = bind_to_unix(name, uwsgi.listen_queue, uwsgi.chmod_socket, 0); } char *sock_list = uwsgi_str(sockets); char *p = strtok(sock_list, ","); while(p) { z_sock->num_sockets++; p = strtok(NULL, ","); } free(sock_list); z_sock->sockets = uwsgi_calloc(sizeof(int) * (z_sock->num_sockets + 1)); sock_list = uwsgi_str(sockets); int pos = 0; p = strtok(sock_list, ","); while(p) { char *port = strchr(p, ':'); if (!port) { z_sock->sockets[pos] = bind_to_unix(p, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); uwsgi_log("zergpool %s bound to UNIX socket %s (fd: %d)\n", name, uwsgi_getsockname(z_sock->sockets[pos]), z_sock->sockets[pos]); } else { char *gsn = generate_socket_name(p); z_sock->sockets[pos] = bind_to_tcp(gsn, uwsgi.listen_queue, strchr(gsn, ':')); uwsgi_log("zergpool %s bound to TCP socket %s (fd: %d)\n", name, uwsgi_getsockname(z_sock->sockets[pos]), z_sock->sockets[pos]); } pos++; p = strtok(NULL, ","); } return z_sock; }
void emperor_loop() { // monitor a directory struct uwsgi_instance ui_base; struct uwsgi_instance *ui_current; pid_t diedpid; int waitpid_status; int has_children = 0; int i_am_alone = 0; int i; void *events; int nevents; int interesting_fd; char notification_message[64]; struct rlimit rl; uwsgi.emperor_stats_fd = -1; if (uwsgi.emperor_pidfile) { uwsgi_write_pidfile(uwsgi.emperor_pidfile); } signal(SIGPIPE, SIG_IGN); uwsgi_unix_signal(SIGINT, royal_death); uwsgi_unix_signal(SIGTERM, royal_death); uwsgi_unix_signal(SIGQUIT, royal_death); uwsgi_unix_signal(SIGUSR1, emperor_stats); uwsgi_unix_signal(SIGHUP, emperor_massive_reload); memset(&ui_base, 0, sizeof(struct uwsgi_instance)); if (getrlimit(RLIMIT_NOFILE, &rl)) { uwsgi_error("getrlimit()"); exit(1); } uwsgi.max_fd = rl.rlim_cur; emperor_throttle_level = uwsgi.emperor_throttle; // the queue must be initialized before adding scanners uwsgi.emperor_queue = event_queue_init(); emperor_build_scanners(); events = event_queue_alloc(64); if (uwsgi.has_emperor) { uwsgi_log("*** starting uWSGI sub-Emperor ***\n"); } else { uwsgi_log("*** starting uWSGI Emperor ***\n"); } if (uwsgi.emperor_stats) { char *tcp_port = strchr(uwsgi.emperor_stats, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; uwsgi.emperor_stats_fd = bind_to_tcp(uwsgi.emperor_stats, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { uwsgi.emperor_stats_fd = bind_to_unix(uwsgi.emperor_stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(uwsgi.emperor_queue, uwsgi.emperor_stats_fd); uwsgi_log("*** Emperor stats server enabled on %s fd: %d ***\n", uwsgi.emperor_stats, uwsgi.emperor_stats_fd); } ui = &ui_base; int freq = 0; for (;;) { if (!i_am_alone) { diedpid = waitpid(uwsgi.emperor_pid, &waitpid_status, WNOHANG); if (diedpid < 0 || diedpid > 0) { i_am_alone = 1; } } nevents = event_queue_wait_multi(uwsgi.emperor_queue, freq, events, 64); freq = uwsgi.emperor_freq; for (i = 0; i < nevents; i++) { interesting_fd = event_queue_interesting_fd(events, i); if (uwsgi.emperor_stats && uwsgi.emperor_stats_fd > -1 && interesting_fd == uwsgi.emperor_stats_fd) { emperor_send_stats(uwsgi.emperor_stats_fd); continue; } // check if a monitor is mapped to that file descriptor if (uwsgi_emperor_scanner_event(interesting_fd)) continue; ui_current = emperor_get_by_fd(interesting_fd); if (ui_current) { char byte; ssize_t rlen = read(interesting_fd, &byte, 1); if (rlen <= 0) { // SAFE if (!ui_current->config_len) { emperor_del(ui_current); } } else { if (byte == 17) { ui_current->loyal = 1; ui_current->last_loyal = uwsgi_now(); uwsgi_log("[emperor] vassal %s is now loyal\n", ui_current->name); // remove it from the blacklist uwsgi_emperor_blacklist_remove(ui_current->name); // TODO post-start hook } // heartbeat can be used for spotting blocked instances else if (byte == 26) { ui_current->last_heartbeat = uwsgi_now(); } else if (byte == 22) { emperor_stop(ui_current); } else if (byte == 30 && uwsgi.emperor_broodlord > 0 && uwsgi.emperor_broodlord_count < uwsgi.emperor_broodlord) { uwsgi_log("[emperor] going in broodlord mode: launching zergs for %s\n", ui_current->name); char *zerg_name = uwsgi_concat3(ui_current->name, ":", "zerg"); emperor_add(NULL, zerg_name, uwsgi_now(), NULL, 0, ui_current->uid, ui_current->gid); free(zerg_name); } } } else { uwsgi_log("[emperor] unrecognized vassal event on fd %d\n", interesting_fd); close(interesting_fd); } } uwsgi_emperor_run_scanners(); // check for heartbeat (if required) ui_current = ui->ui_next; while (ui_current) { if (ui_current->last_heartbeat > 0) { if ((ui_current->last_heartbeat + uwsgi.emperor_heartbeat) < uwsgi_now()) { uwsgi_log("[emperor] vassal %s sent no heartbeat in last %d seconds, respawning it...\n", ui_current->name, uwsgi.emperor_heartbeat); // set last_heartbeat to 0 avoiding races ui_current->last_heartbeat = 0; emperor_respawn(ui_current, uwsgi_now()); } } ui_current = ui_current->ui_next; } // check for removed instances ui_current = ui; has_children = 0; while (ui_current->ui_next) { ui_current = ui_current->ui_next; has_children++; } if (uwsgi.notify) { if (snprintf(notification_message, 64, "The Emperor is governing %d vassals", has_children) >= 34) { uwsgi_notify(notification_message); } } if (has_children) { diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG); } else { // vacuum waitpid(WAIT_ANY, &waitpid_status, WNOHANG); diedpid = 0; } if (diedpid < 0) { // it looks like it happens when OOM is triggered to Linux cgroup, but it could be a uWSGI bug :P // by the way, fallback to a clean situation... if (errno == ECHILD) { uwsgi_log("--- MUTINY DETECTED !!! IMPALING VASSALS... ---\n"); ui_current = ui->ui_next; while (ui_current) { struct uwsgi_instance *rebel_vassal = ui_current; ui_current = ui_current->ui_next; emperor_del(rebel_vassal); } } else { uwsgi_error("waitpid()"); } } ui_current = ui; while (ui_current->ui_next) { ui_current = ui_current->ui_next; if (ui_current->status == 1) { if (ui_current->config) free(ui_current->config); // SAFE emperor_del(ui_current); break; } else if (ui_current->pid == diedpid) { if (ui_current->status == 0) { // respawn an accidentally dead instance if its exit code is not UWSGI_EXILE_CODE if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXILE_CODE) { // SAFE emperor_del(ui_current); } else { // UNSAFE emperor_add(ui_current->scanner, ui_current->name, ui_current->last_mod, ui_current->config, ui_current->config_len, ui_current->uid, ui_current->gid); emperor_del(ui_current); } break; } else if (ui_current->status == 1) { // remove 'marked for dead' instance if (ui_current->config) free(ui_current->config); // SAFE emperor_del(ui_current); break; } } } } }
int master_loop(char **argv, char **environ) { uint64_t tmp_counter; struct timeval last_respawn; int last_respawn_rate = 0; int pid_found = 0; pid_t diedpid; int waitpid_status; uint8_t uwsgi_signal; time_t last_request_timecheck = 0, now = 0; uint64_t last_request_count = 0; pthread_t logger_thread; pthread_t cache_sweeper; #ifdef UWSGI_UDP int udp_fd = -1; #ifdef UWSGI_MULTICAST char *cluster_opt_buf = NULL; size_t cluster_opt_size = 4; #endif #endif #ifdef UWSGI_SNMP int snmp_fd = -1; #endif int i = 0; int rlen; int check_interval = 1; struct uwsgi_rb_timer *min_timeout; struct rb_root *rb_timers = uwsgi_init_rb_timer(); if (uwsgi.procname_master) { uwsgi_set_processname(uwsgi.procname_master); } else if (uwsgi.procname) { uwsgi_set_processname(uwsgi.procname); } else if (uwsgi.auto_procname) { uwsgi_set_processname("uWSGI master"); } uwsgi.current_time = uwsgi_now(); uwsgi_unix_signal(SIGTSTP, suspend_resume_them_all); uwsgi_unix_signal(SIGHUP, grace_them_all); if (uwsgi.die_on_term) { uwsgi_unix_signal(SIGTERM, kill_them_all); uwsgi_unix_signal(SIGQUIT, reap_them_all); } else { uwsgi_unix_signal(SIGTERM, reap_them_all); uwsgi_unix_signal(SIGQUIT, kill_them_all); } uwsgi_unix_signal(SIGINT, kill_them_all); uwsgi_unix_signal(SIGUSR1, stats); if (uwsgi.auto_snapshot) { uwsgi_unix_signal(SIGURG, uwsgi_restore_auto_snapshot); } atexit(uwsgi_master_cleanup_hooks); uwsgi.master_queue = event_queue_init(); /* route signals to workers... */ #ifdef UWSGI_DEBUG uwsgi_log("adding %d to signal poll\n", uwsgi.shared->worker_signal_pipe[0]); #endif event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_signal_pipe[0]); #ifdef UWSGI_SPOOLER if (uwsgi.spoolers) { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->spooler_signal_pipe[0]); } #endif if (uwsgi.mules_cnt > 0) { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->mule_signal_pipe[0]); } if (uwsgi.log_master) { uwsgi.log_master_buf = uwsgi_malloc(uwsgi.log_master_bufsize); if (!uwsgi.threaded_logger) { #ifdef UWSGI_DEBUG uwsgi_log("adding %d to master logging\n", uwsgi.shared->worker_log_pipe[0]); #endif event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]); } else { if (pthread_create(&logger_thread, NULL, logger_thread_loop, NULL)) { uwsgi_error("pthread_create()"); uwsgi_log("falling back to non-threaded logger...\n"); event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]); uwsgi.threaded_logger = 0; } } #ifdef UWSGI_ALARM // initialize the alarm subsystem uwsgi_alarms_init(); #endif } if (uwsgi.cache_max_items > 0 && !uwsgi.cache_no_expire) { if (pthread_create(&cache_sweeper, NULL, cache_sweeper_loop, NULL)) { uwsgi_error("pthread_create()"); uwsgi_log("unable to run the cache sweeper !!!\n"); } else { uwsgi_log("cache sweeper thread enabled\n"); } } uwsgi.wsgi_req->buffer = uwsgi.workers[0].cores[0].buffer; if (uwsgi.has_emperor) { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd); } if (uwsgi.zerg_server) { uwsgi.zerg_server_fd = bind_to_unix(uwsgi.zerg_server, uwsgi.listen_queue, 0, 0); event_queue_add_fd_read(uwsgi.master_queue, uwsgi.zerg_server_fd); uwsgi_log("*** Zerg server enabled on %s ***\n", uwsgi.zerg_server); } if (uwsgi.stats) { char *tcp_port = strchr(uwsgi.stats, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; uwsgi.stats_fd = bind_to_tcp(uwsgi.stats, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { uwsgi.stats_fd = bind_to_unix(uwsgi.stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(uwsgi.master_queue, uwsgi.stats_fd); uwsgi_log("*** Stats server enabled on %s fd: %d ***\n", uwsgi.stats, uwsgi.stats_fd); } #ifdef UWSGI_UDP if (uwsgi.udp_socket) { udp_fd = bind_to_udp(uwsgi.udp_socket, 0, 0); if (udp_fd < 0) { uwsgi_log("unable to bind to udp socket. SNMP and cluster management services will be disabled.\n"); } else { uwsgi_log("UDP server enabled.\n"); event_queue_add_fd_read(uwsgi.master_queue, udp_fd); } } #ifdef UWSGI_MULTICAST if (uwsgi.cluster) { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.cluster_fd); cluster_opt_buf = uwsgi_setup_clusterbuf(&cluster_opt_size); } #endif #endif #ifdef UWSGI_SNMP snmp_fd = uwsgi_setup_snmp(); #endif if (uwsgi.cheap) { uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1); for (i = 1; i <= uwsgi.numproc; i++) { uwsgi.workers[i].cheaped = 1; } uwsgi_log("cheap mode enabled: waiting for socket connection...\n"); } // spawn mules for (i = 0; i < uwsgi.mules_cnt; i++) { size_t mule_patch_size = 0; uwsgi.mules[i].patch = uwsgi_string_get_list(&uwsgi.mules_patches, i, &mule_patch_size); uwsgi_mule(i + 1); } // spawn gateways for (i = 0; i < ushared->gateways_cnt; i++) { if (ushared->gateways[i].pid == 0) { gateway_respawn(i); } } // spawn daemons uwsgi_daemons_spawn_all(); // first subscription struct uwsgi_string_list *subscriptions = uwsgi.subscriptions; while (subscriptions) { uwsgi_subscribe(subscriptions->value, 0); subscriptions = subscriptions->next; } // sync the cache store if needed if (uwsgi.cache_store && uwsgi.cache_filesize) { if (msync(uwsgi.cache_items, uwsgi.cache_filesize, MS_ASYNC)) { uwsgi_error("msync()"); } } if (uwsgi.queue_store && uwsgi.queue_filesize) { if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) { uwsgi_error("msync()"); } } // update touches timestamps uwsgi_check_touches(uwsgi.touch_reload); uwsgi_check_touches(uwsgi.touch_logrotate); uwsgi_check_touches(uwsgi.touch_logreopen); // setup cheaper algos uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare; if (uwsgi.requested_cheaper_algo) { uwsgi.cheaper_algo = NULL; struct uwsgi_cheaper_algo *uca = uwsgi.cheaper_algos; while (uca) { if (!strcmp(uca->name, uwsgi.requested_cheaper_algo)) { uwsgi.cheaper_algo = uca->func; break; } uca = uca->next; } if (!uwsgi.cheaper_algo) { uwsgi_log("unable to find requested cheaper algorithm, falling back to spare\n"); uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare; } } // here really starts the master loop for (;;) { //uwsgi_log("uwsgi.ready_to_reload %d %d\n", uwsgi.ready_to_reload, uwsgi.numproc); // run master_cycle hook for every plugin for (i = 0; i < uwsgi.gp_cnt; i++) { if (uwsgi.gp[i]->master_cycle) { uwsgi.gp[i]->master_cycle(); } } for (i = 0; i < 256; i++) { if (uwsgi.p[i]->master_cycle) { uwsgi.p[i]->master_cycle(); } } uwsgi_daemons_smart_check(); // count the number of active workers int active_workers = 0; for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) { active_workers++; } } if (uwsgi.to_outworld) { //uwsgi_log("%d/%d\n", uwsgi.lazy_respawned, uwsgi.numproc); if (uwsgi.lazy_respawned >= active_workers) { uwsgi.to_outworld = 0; uwsgi.master_mercy = 0; uwsgi.lazy_respawned = 0; } } if (uwsgi_master_check_mercy()) return 0; if (uwsgi.respawn_workers) { for (i = 1; i <= uwsgi.respawn_workers; i++) { if (uwsgi_respawn_worker(i)) return 0; } uwsgi.respawn_workers = 0; } if (uwsgi.restore_snapshot) { uwsgi_master_restore_snapshot(); continue; } // cheaper management if (uwsgi.cheaper && !uwsgi.cheap && !uwsgi.to_heaven && !uwsgi.to_hell && !uwsgi.to_outworld && !uwsgi.workers[0].suspended) { if (!uwsgi_calc_cheaper()) return 0; } if ((uwsgi.cheap || uwsgi.ready_to_die >= active_workers) && uwsgi.to_hell) { // call a series of waitpid to ensure all processes (gateways, mules and daemons) are dead for (i = 0; i < (ushared->gateways_cnt + uwsgi.daemons_cnt + uwsgi.mules_cnt); i++) { diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG); } uwsgi_log("goodbye to uWSGI.\n"); exit(0); } if ((uwsgi.cheap || uwsgi.ready_to_reload >= active_workers) && uwsgi.to_heaven) { uwsgi_reload(argv); // never here (unless in shared library mode) return -1; } diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG); if (diedpid == -1) { if (errno == ECHILD) { // something did not work as expected, just assume all has been cleared if (uwsgi.to_heaven) { uwsgi.ready_to_reload = uwsgi.numproc; continue; } else if (uwsgi.to_hell) { uwsgi.ready_to_die = uwsgi.numproc; continue; } else if (uwsgi.to_outworld) { uwsgi.lazy_respawned = uwsgi.numproc; uwsgi_log("*** no workers to reload found ***\n"); continue; } diedpid = 0; } else { uwsgi_error("waitpid()"); /* here is better to reload all the uWSGI stack */ uwsgi_log("something horrible happened...\n"); reap_them_all(0); exit(1); } } if (diedpid == 0) { /* all processes ok, doing status scan after N seconds */ check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL]; if (!check_interval) check_interval = 1; // add unregistered file monitors // locking is not needed as monitors can only increase for (i = 0; i < ushared->files_monitored_cnt; i++) { if (!ushared->files_monitored[i].registered) { ushared->files_monitored[i].fd = event_queue_add_file_monitor(uwsgi.master_queue, ushared->files_monitored[i].filename, &ushared->files_monitored[i].id); ushared->files_monitored[i].registered = 1; } } // add unregistered timers // locking is not needed as timers can only increase for (i = 0; i < ushared->timers_cnt; i++) { if (!ushared->timers[i].registered) { ushared->timers[i].fd = event_queue_add_timer(uwsgi.master_queue, &ushared->timers[i].id, ushared->timers[i].value); ushared->timers[i].registered = 1; } } // add unregistered rb_timers // locking is not needed as rb_timers can only increase for (i = 0; i < ushared->rb_timers_cnt; i++) { if (!ushared->rb_timers[i].registered) { ushared->rb_timers[i].uwsgi_rb_timer = uwsgi_add_rb_timer(rb_timers, uwsgi_now() + ushared->rb_timers[i].value, &ushared->rb_timers[i]); ushared->rb_timers[i].registered = 1; } } int interesting_fd = -1; if (ushared->rb_timers_cnt > 0) { min_timeout = uwsgi_min_rb_timer(rb_timers); if (min_timeout == NULL) { check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL]; } else { check_interval = min_timeout->key - uwsgi_now(); if (check_interval <= 0) { expire_rb_timeouts(rb_timers); check_interval = 0; } } } // wait for event rlen = event_queue_wait(uwsgi.master_queue, check_interval, &interesting_fd); if (rlen == 0) { if (ushared->rb_timers_cnt > 0) { expire_rb_timeouts(rb_timers); } } // check uwsgi-cron table if (ushared->cron_cnt) { uwsgi_manage_signal_cron(uwsgi_now()); } if (uwsgi.crons) { uwsgi_manage_command_cron(uwsgi_now()); } // check for probes if (ushared->probes_cnt > 0) { uwsgi_lock(uwsgi.probe_table_lock); for (i = 0; i < ushared->probes_cnt; i++) { if (interesting_fd == -1) { // increment cycles ushared->probes[i].cycles++; } if (ushared->probes[i].func(interesting_fd, &ushared->probes[i])) { uwsgi_route_signal(ushared->probes[i].sig); } } uwsgi_unlock(uwsgi.probe_table_lock); } if (rlen > 0) { if (uwsgi.log_master && !uwsgi.threaded_logger) { if (interesting_fd == uwsgi.shared->worker_log_pipe[0]) { uwsgi_master_log(); goto health_cycle; } } if (uwsgi.stats && uwsgi.stats_fd > -1) { if (interesting_fd == uwsgi.stats_fd) { uwsgi_send_stats(uwsgi.stats_fd); goto health_cycle; } } if (uwsgi.zerg_server) { if (interesting_fd == uwsgi.zerg_server_fd) { uwsgi_manage_zerg(uwsgi.zerg_server_fd, 0, NULL); goto health_cycle; } } if (uwsgi.has_emperor) { if (interesting_fd == uwsgi.emperor_fd) { uwsgi_master_manage_emperor(); goto health_cycle; } } if (uwsgi.cheap) { int found = 0; struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { found = 1; uwsgi.cheap = 0; uwsgi_del_sockets_from_queue(uwsgi.master_queue); int needed = uwsgi.numproc; if (uwsgi.cheaper) { needed = uwsgi.cheaper_count; } for (i = 1; i <= needed; i++) { if (uwsgi_respawn_worker(i)) return 0; } break; } uwsgi_sock = uwsgi_sock->next; } // here is better to continue instead going to health_cycle if (found) continue; } #ifdef UWSGI_SNMP if (uwsgi.snmp_addr && interesting_fd == snmp_fd) { uwsgi_master_manage_snmp(snmp_fd); goto health_cycle; } #endif #ifdef UWSGI_UDP if (uwsgi.udp_socket && interesting_fd == udp_fd) { uwsgi_master_manage_udp(udp_fd); goto health_cycle; } #ifdef UWSGI_MULTICAST if (interesting_fd == uwsgi.cluster_fd) { if (uwsgi_get_dgram(uwsgi.cluster_fd, &uwsgi.workers[0].cores[0].req)) { goto health_cycle; } manage_cluster_message(cluster_opt_buf, cluster_opt_size); goto health_cycle; } #endif #endif int next_iteration = 0; uwsgi_lock(uwsgi.fmon_table_lock); for (i = 0; i < ushared->files_monitored_cnt; i++) { if (ushared->files_monitored[i].registered) { if (interesting_fd == ushared->files_monitored[i].fd) { struct uwsgi_fmon *uf = event_queue_ack_file_monitor(uwsgi.master_queue, interesting_fd); // now call the file_monitor handler if (uf) uwsgi_route_signal(uf->sig); break; } } } uwsgi_unlock(uwsgi.fmon_table_lock); if (next_iteration) goto health_cycle;; next_iteration = 0; uwsgi_lock(uwsgi.timer_table_lock); for (i = 0; i < ushared->timers_cnt; i++) { if (ushared->timers[i].registered) { if (interesting_fd == ushared->timers[i].fd) { struct uwsgi_timer *ut = event_queue_ack_timer(interesting_fd); // now call the file_monitor handler if (ut) uwsgi_route_signal(ut->sig); break; } } } uwsgi_unlock(uwsgi.timer_table_lock); if (next_iteration) goto health_cycle;; // check for worker signal if (interesting_fd == uwsgi.shared->worker_signal_pipe[0]) { rlen = read(interesting_fd, &uwsgi_signal, 1); if (rlen < 0) { uwsgi_error("read()"); } else if (rlen > 0) { #ifdef UWSGI_DEBUG uwsgi_log_verbose("received uwsgi signal %d from a worker\n", uwsgi_signal); #endif uwsgi_route_signal(uwsgi_signal); } else { uwsgi_log_verbose("lost connection with worker %d\n", i); close(interesting_fd); } goto health_cycle; } #ifdef UWSGI_SPOOLER // check for spooler signal if (uwsgi.spoolers) { if (interesting_fd == uwsgi.shared->spooler_signal_pipe[0]) { rlen = read(interesting_fd, &uwsgi_signal, 1); if (rlen < 0) { uwsgi_error("read()"); } else if (rlen > 0) { #ifdef UWSGI_DEBUG uwsgi_log_verbose("received uwsgi signal %d from a spooler\n", uwsgi_signal); #endif uwsgi_route_signal(uwsgi_signal); } else { uwsgi_log_verbose("lost connection with the spooler\n"); close(interesting_fd); } goto health_cycle; } } #endif // check for mules signal if (uwsgi.mules_cnt > 0) { if (interesting_fd == uwsgi.shared->mule_signal_pipe[0]) { rlen = read(interesting_fd, &uwsgi_signal, 1); if (rlen < 0) { uwsgi_error("read()"); } else if (rlen > 0) { #ifdef UWSGI_DEBUG uwsgi_log_verbose("received uwsgi signal %d from a mule\n", uwsgi_signal); #endif uwsgi_route_signal(uwsgi_signal); } else { uwsgi_log_verbose("lost connection with a mule\n"); close(interesting_fd); } goto health_cycle; } } } health_cycle: now = uwsgi_now(); if (now - uwsgi.current_time < 1) { continue; } uwsgi.current_time = now; // checking logsize if (uwsgi.logfile) { uwsgi_check_logrotate(); } // this will be incremented at (more or less) regular intervals uwsgi.master_cycles++; // recalculate requests counter on race conditions risky configurations // a bit of inaccuracy is better than locking;) if (uwsgi.numproc > 1) { tmp_counter = 0; for (i = 1; i < uwsgi.numproc + 1; i++) tmp_counter += uwsgi.workers[i].requests; uwsgi.workers[0].requests = tmp_counter; } if (uwsgi.idle > 0 && !uwsgi.cheap) { uwsgi.current_time = uwsgi_now(); if (!last_request_timecheck) last_request_timecheck = uwsgi.current_time; int busy_workers = 0; for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) { if (uwsgi.workers[i].busy == 1) { busy_workers = 1; break; } } } if (last_request_count != uwsgi.workers[0].requests) { last_request_timecheck = uwsgi.current_time; last_request_count = uwsgi.workers[0].requests; } // a bit of over-engeneering to avoid clock skews else if (last_request_timecheck < uwsgi.current_time && (uwsgi.current_time - last_request_timecheck > uwsgi.idle) && !busy_workers) { uwsgi_log("workers have been inactive for more than %d seconds (%llu-%llu)\n", uwsgi.idle, (unsigned long long) uwsgi.current_time, (unsigned long long) last_request_timecheck); uwsgi.cheap = 1; if (uwsgi.die_on_idle) { if (uwsgi.has_emperor) { char byte = 22; if (write(uwsgi.emperor_fd, &byte, 1) != 1) { uwsgi_error("write()"); kill_them_all(0); } } else { kill_them_all(0); } continue; } for (i = 1; i <= uwsgi.numproc; i++) { uwsgi.workers[i].cheaped = 1; if (uwsgi.workers[i].pid == 0) continue; kill(uwsgi.workers[i].pid, SIGKILL); if (waitpid(uwsgi.workers[i].pid, &waitpid_status, 0) < 0) { if (errno != ECHILD) uwsgi_error("waitpid()"); } } uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1); uwsgi_log("cheap mode enabled: waiting for socket connection...\n"); last_request_timecheck = 0; continue; } } check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL]; if (!check_interval) check_interval = 1; #ifdef __linux__ // get listen_queue status struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (uwsgi_sock->family == AF_INET) { get_linux_tcp_info(uwsgi_sock->fd); } #ifdef SIOBKLGQ else if (uwsgi_sock->family == AF_UNIX) { get_linux_unbit_SIOBKLGQ(uwsgi_sock->fd); } #endif uwsgi_sock = uwsgi_sock->next; } #endif for (i = 1; i <= uwsgi.numproc; i++) { /* first check for harakiri */ if (uwsgi.workers[i].harakiri > 0) { if (uwsgi.workers[i].harakiri < (time_t) uwsgi.current_time) { trigger_harakiri(i); } } /* then user-defined harakiri */ if (uwsgi.workers[i].user_harakiri > 0) { if (uwsgi.workers[i].user_harakiri < (time_t) uwsgi.current_time) { trigger_harakiri(i); } } // then for evil memory checkers if (uwsgi.evil_reload_on_as) { if ((rlim_t) uwsgi.workers[i].vsz_size >= uwsgi.evil_reload_on_as) { uwsgi_log("*** EVIL RELOAD ON WORKER %d ADDRESS SPACE: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].vsz_size, uwsgi.workers[i].pid); kill(uwsgi.workers[i].pid, SIGKILL); uwsgi.workers[i].vsz_size = 0; } } if (uwsgi.evil_reload_on_rss) { if ((rlim_t) uwsgi.workers[i].rss_size >= uwsgi.evil_reload_on_rss) { uwsgi_log("*** EVIL RELOAD ON WORKER %d RSS: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].rss_size, uwsgi.workers[i].pid); kill(uwsgi.workers[i].pid, SIGKILL); uwsgi.workers[i].rss_size = 0; } } // need to find a better way //uwsgi.workers[i].last_running_time = uwsgi.workers[i].running_time; } for (i = 0; i < ushared->gateways_cnt; i++) { if (ushared->gateways_harakiri[i] > 0) { if (ushared->gateways_harakiri[i] < (time_t) uwsgi.current_time) { if (ushared->gateways[i].pid > 0) { kill(ushared->gateways[i].pid, SIGKILL); } ushared->gateways_harakiri[i] = 0; } } } for (i = 0; i < uwsgi.mules_cnt; i++) { if (uwsgi.mules[i].harakiri > 0) { if (uwsgi.mules[i].harakiri < (time_t) uwsgi.current_time) { uwsgi_log("*** HARAKIRI ON MULE %d HANDLING SIGNAL %d (pid: %d) ***\n", i + 1, uwsgi.mules[i].signum, uwsgi.mules[i].pid); kill(uwsgi.mules[i].pid, SIGKILL); uwsgi.mules[i].harakiri = 0; } } } #ifdef UWSGI_SPOOLER struct uwsgi_spooler *uspool = uwsgi.spoolers; while (uspool) { if (uspool->harakiri > 0 && uspool->harakiri < (time_t) uwsgi.current_time) { uwsgi_log("*** HARAKIRI ON THE SPOOLER (pid: %d) ***\n", uspool->pid); kill(uspool->pid, SIGKILL); uspool->harakiri = 0; } uspool = uspool->next; } #endif #ifdef __linux__ #ifdef MADV_MERGEABLE if (uwsgi.linux_ksm > 0 && (uwsgi.master_cycles % uwsgi.linux_ksm) == 0) { uwsgi_linux_ksm_map(); } #endif #endif #ifdef UWSGI_UDP // check for cluster nodes master_check_cluster_nodes(); // reannounce myself every 10 cycles if (uwsgi.cluster && uwsgi.cluster_fd >= 0 && !uwsgi.cluster_nodes && (uwsgi.master_cycles % 10) == 0) { uwsgi_cluster_add_me(); } // resubscribe every 10 cycles by default if ((uwsgi.subscriptions && ((uwsgi.master_cycles % uwsgi.subscribe_freq) == 0 || uwsgi.master_cycles == 1)) && !uwsgi.to_heaven && !uwsgi.to_hell && !uwsgi.workers[0].suspended) { struct uwsgi_string_list *subscriptions = uwsgi.subscriptions; while (subscriptions) { uwsgi_subscribe(subscriptions->value, 0); subscriptions = subscriptions->next; } } #endif if (uwsgi.cache_store && uwsgi.cache_filesize && uwsgi.cache_store_sync && ((uwsgi.master_cycles % uwsgi.cache_store_sync) == 0)) { if (msync(uwsgi.cache_items, uwsgi.cache_filesize, MS_ASYNC)) { uwsgi_error("msync()"); } } if (uwsgi.queue_store && uwsgi.queue_filesize && uwsgi.queue_store_sync && ((uwsgi.master_cycles % uwsgi.queue_store_sync) == 0)) { if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) { uwsgi_error("msync()"); } } // check touch_reload if (!uwsgi.to_heaven && !uwsgi.to_hell) { char *touched = uwsgi_check_touches(uwsgi.touch_reload); if (touched) { uwsgi_log("*** %s has been touched... grace them all !!! ***\n", touched); uwsgi_block_signal(SIGHUP); grace_them_all(0); uwsgi_unblock_signal(SIGHUP); } } continue; } // no one died if (diedpid <= 0) continue; // check for deadlocks first uwsgi_deadlock_check(diedpid); // reload gateways and daemons only on normal workflow (+outworld status) if (!uwsgi.to_heaven && !uwsgi.to_hell) { #ifdef UWSGI_SPOOLER /* reload the spooler */ struct uwsgi_spooler *uspool = uwsgi.spoolers; pid_found = 0; while (uspool) { if (uspool->pid > 0 && diedpid == uspool->pid) { uwsgi_log("OOOPS the spooler is no more...trying respawn...\n"); uspool->respawned++; uspool->pid = spooler_start(uspool); pid_found = 1; break; } uspool = uspool->next; } if (pid_found) continue; #endif pid_found = 0; for (i = 0; i < uwsgi.mules_cnt; i++) { if (uwsgi.mules[i].pid == diedpid) { uwsgi_log("OOOPS mule %d (pid: %d) crippled...trying respawn...\n", i + 1, uwsgi.mules[i].pid); uwsgi_mule(i + 1); pid_found = 1; break; } } if (pid_found) continue; /* reload the gateways */ pid_found = 0; for (i = 0; i < ushared->gateways_cnt; i++) { if (ushared->gateways[i].pid == diedpid) { gateway_respawn(i); pid_found = 1; break; } } if (pid_found) continue; /* reload the daemons */ pid_found = uwsgi_daemon_check_pid_reload(diedpid); if (pid_found) continue; } /* What happens here ? case 1) the diedpid is not a worker, report it and continue case 2) the diedpid is a worker and we are not in a reload procedure -> reload it case 3) the diedpid is a worker and we are in graceful reload -> uwsgi.ready_to_reload++ and continue case 3) the diedpid is a worker and we are in brutal reload -> uwsgi.ready_to_die++ and continue */ uwsgi.mywid = find_worker_id(diedpid); if (uwsgi.mywid <= 0) { // check spooler, mules, gateways and daemons #ifdef UWSGI_SPOOLER struct uwsgi_spooler *uspool = uwsgi.spoolers; while (uspool) { if (uspool->pid > 0 && diedpid == uspool->pid) { uwsgi_log("spooler (pid: %d) annihilated\n", (int) diedpid); goto next; } uspool = uspool->next; } #endif for (i = 0; i < uwsgi.mules_cnt; i++) { if (uwsgi.mules[i].pid == diedpid) { uwsgi_log("mule %d (pid: %d) annihilated\n", i + 1, (int) diedpid); goto next; } } for (i = 0; i < ushared->gateways_cnt; i++) { if (ushared->gateways[i].pid == diedpid) { uwsgi_log("gateway %d (%s, pid: %d) annihilated\n", i + 1, ushared->gateways[i].fullname, (int) diedpid); goto next; } } if (uwsgi_daemon_check_pid_death(diedpid)) goto next; if (WIFEXITED(waitpid_status)) { uwsgi_log("subprocess %d exited with code %d\n", (int) diedpid, WEXITSTATUS(waitpid_status)); } else if (WIFSIGNALED(waitpid_status)) { uwsgi_log("subprocess %d exited by signal %d\n", (int) diedpid, WTERMSIG(waitpid_status)); } else if (WIFSTOPPED(waitpid_status)) { uwsgi_log("subprocess %d stopped\n", (int) diedpid); } next: continue; } // ok a worker died... if (uwsgi.to_heaven) { uwsgi.ready_to_reload++; uwsgi.workers[uwsgi.mywid].pid = 0; // only to be safe :P uwsgi.workers[uwsgi.mywid].harakiri = 0; continue; } else if (uwsgi.to_hell) { uwsgi.ready_to_die++; uwsgi.workers[uwsgi.mywid].pid = 0; // only to be safe :P uwsgi.workers[uwsgi.mywid].harakiri = 0; continue; } else if (uwsgi.to_outworld) { uwsgi.lazy_respawned++; uwsgi.workers[uwsgi.mywid].destroy = 0; uwsgi.workers[uwsgi.mywid].pid = 0; // only to be safe :P uwsgi.workers[uwsgi.mywid].harakiri = 0; } if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_FAILED_APP_CODE) { uwsgi_log("OOPS ! failed loading app in worker %d (pid %d) :( trying again...\n", uwsgi.mywid, (int) diedpid); } else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_DE_HIJACKED_CODE) { uwsgi_log("...restoring worker %d (pid: %d)...\n", uwsgi.mywid, (int) diedpid); } else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXCEPTION_CODE) { uwsgi_log("... monitored exception detected, respawning worker %d (pid: %d)...\n", uwsgi.mywid, (int) diedpid); } else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_QUIET_CODE) { // noop } else if (uwsgi.workers[uwsgi.mywid].manage_next_request) { if (WIFSIGNALED(waitpid_status)) { uwsgi_log("DAMN ! worker %d (pid: %d) died, killed by signal %d :( trying respawn ...\n", uwsgi.mywid, (int) diedpid, (int) WTERMSIG(waitpid_status)); } else { uwsgi_log("DAMN ! worker %d (pid: %d) died :( trying respawn ...\n", uwsgi.mywid, (int) diedpid); } } else { uwsgi_log("DAMN ! worker %d (pid: %d) MISTERIOUSLY died :( trying respawn ...\n", uwsgi.mywid, (int) diedpid); } if (uwsgi.workers[uwsgi.mywid].cheaped == 1) { uwsgi.workers[uwsgi.mywid].pid = 0; uwsgi_log("uWSGI worker %d cheaped.\n", uwsgi.mywid); uwsgi.workers[uwsgi.mywid].harakiri = 0; continue; } gettimeofday(&last_respawn, NULL); if (last_respawn.tv_sec <= uwsgi.respawn_delta + check_interval) { last_respawn_rate++; if (last_respawn_rate > uwsgi.numproc) { if (uwsgi.forkbomb_delay > 0) { uwsgi_log("worker respawning too fast !!! i have to sleep a bit (%d seconds)...\n", uwsgi.forkbomb_delay); /* use --forkbomb-delay 0 to disable sleeping */ sleep(uwsgi.forkbomb_delay); } last_respawn_rate = 0; } } else { last_respawn_rate = 0; } gettimeofday(&last_respawn, NULL); uwsgi.respawn_delta = last_respawn.tv_sec; if (uwsgi_respawn_worker(uwsgi.mywid)) return 0; // end of the loop } // never here }
void uwsgi_corerouter_setup_sockets(struct uwsgi_corerouter *ucr) { struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets; while (ugs) { if (!strcmp(ucr->name, ugs->owner)) { if (!ugs->subscription) { if (ugs->name[0] == '=') { int shared_socket = atoi(ugs->name+1); if (shared_socket >= 0) { ugs->fd = uwsgi_get_shared_socket_fd_by_num(shared_socket); ugs->shared = 1; if (ugs->fd == -1) { uwsgi_log("unable to use shared socket %d\n", shared_socket); exit(1); } ugs->name = uwsgi_getsockname(ugs->fd); } } else if (!uwsgi_startswith("fd://", ugs->name, 5 )) { int fd_socket = atoi(ugs->name+5); if (fd_socket >= 0) { ugs->fd = fd_socket; ugs->name = uwsgi_getsockname(ugs->fd); if (!ugs->name) { uwsgi_log("unable to use file descriptor %d as socket\n", fd_socket); exit(1); } } } else { ugs->port = strchr(ugs->name, ':'); if (ugs->fd == -1) { if (ugs->port) { ugs->fd = bind_to_tcp(ugs->name, uwsgi.listen_queue, ugs->port); ugs->port++; ugs->port_len = strlen(ugs->port); } else { ugs->fd = bind_to_unix(ugs->name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } } } // put socket in non-blocking mode uwsgi_socket_nb(ugs->fd); uwsgi_log("%s bound on %s fd %d\n", ucr->name, ugs->name, ugs->fd); } else if (ugs->subscription) { if (ugs->fd == -1) { if (strchr(ugs->name, ':')) { ugs->fd = bind_to_udp(ugs->name, 0, 0); } else { ugs->fd = bind_to_unix_dgram(ugs->name); } uwsgi_socket_nb(ugs->fd); } uwsgi_log("%s subscription server bound on %s fd %d\n", ucr->name, ugs->name, ugs->fd); } } ugs = ugs->next; } }
void fastrouter_loop() { int nevents; int interesting_fd; int new_connection; ssize_t len; int i; time_t delta; char bbuf[UMAX16]; char *tcp_port; char *tmp_socket_name; int tmp_socket_name_len; struct uwsgi_subscribe_req usr; char *magic_table[0xff]; struct uwsgi_rb_timer *min_timeout; void *events; struct msghdr msg; union { struct cmsghdr cmsg; char control [CMSG_SPACE (sizeof (int))]; } msg_control; struct cmsghdr *cmsg; struct sockaddr_un fr_addr; socklen_t fr_addr_len = sizeof(struct sockaddr_un); struct fastrouter_session *fr_session; struct fastrouter_session *fr_table[2048]; struct iovec iov[2]; int soopt; socklen_t solen = sizeof(int); int ufr_subserver = -1; for(i=0;i<2048;i++) { fr_table[i] = NULL; } ufr.queue = event_queue_init(); struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets; while(ufr_sock) { if (ufr_sock->name[0] == '=') { int shared_socket = atoi(ufr_sock->name+1); if (shared_socket >= 0) { ufr_sock->fd = uwsgi_get_shared_socket_fd_by_num(shared_socket); if (ufr_sock->fd == -1) { uwsgi_log("unable to use shared socket %d\n", shared_socket); } } } else { tcp_port = strchr(ufr_sock->name, ':'); if (tcp_port) { ufr_sock->fd = bind_to_tcp(ufr_sock->name, uwsgi.listen_queue, tcp_port); } else { ufr_sock->fd = bind_to_unix(ufr_sock->name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } } uwsgi_log("uwsgi fastrouter/proxy bound on %s\n", ufr_sock->name); if (!ufr.cheap) { event_queue_add_fd_read(ufr.queue, ufr_sock->fd); } else { uwsgi_log("[uwsgi-fastrouter] cheap mode requested. Waiting for subscriptions...\n"); ufr.i_am_cheap = 1; } ufr_sock = ufr_sock->next; } events = event_queue_alloc(ufr.nevents); ufr.timeouts = uwsgi_init_rb_timer(); if (!ufr.socket_timeout) ufr.socket_timeout = 30; if (ufr.subscription_server) { ufr_subserver = bind_to_udp(ufr.subscription_server, 0, 0); event_queue_add_fd_read(ufr.queue, ufr_subserver); if (!ufr.subscription_slot) ufr.subscription_slot = 30; // check for node status every 10 seconds //ufr.subscriptions_check = add_check_timeout(10); } if (ufr.pattern) { init_magic_table(magic_table); } for (;;) { min_timeout = uwsgi_min_rb_timer(ufr.timeouts); if (min_timeout == NULL ) { delta = -1; } else { delta = min_timeout->key - time(NULL); if (delta <= 0) { expire_timeouts(fr_table); delta = 0; } } nevents = event_queue_wait_multi(ufr.queue, delta, events, ufr.nevents); if (nevents == 0) { expire_timeouts(fr_table); } for (i=0;i<nevents;i++) { tmp_socket_name = NULL; interesting_fd = event_queue_interesting_fd(events, i); int taken = 0; struct uwsgi_fastrouter_socket *uwsgi_sock = ufr.sockets; while(uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { new_connection = accept(interesting_fd, (struct sockaddr *) &fr_addr, &fr_addr_len); if (new_connection < 0) { continue; } fr_table[new_connection] = alloc_fr_session(); fr_table[new_connection]->fd = new_connection; fr_table[new_connection]->instance_fd = -1; fr_table[new_connection]->status = FASTROUTER_STATUS_RECV_HDR; fr_table[new_connection]->h_pos = 0; fr_table[new_connection]->pos = 0; fr_table[new_connection]->un = NULL; fr_table[new_connection]->instance_failed = 0; fr_table[new_connection]->instance_address_len = 0; fr_table[new_connection]->hostname_len = 0; fr_table[new_connection]->hostname = NULL; fr_table[new_connection]->timeout = add_timeout(fr_table[new_connection]); event_queue_add_fd_read(ufr.queue, new_connection); taken = 1; break; } uwsgi_sock = uwsgi_sock->next; } if (taken) { continue; } if (interesting_fd == ufr_subserver) { len = recv(ufr_subserver, bbuf, 4096, 0); #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(ufr.queue, ufr_subserver); #endif if (len > 0) { memset(&usr, 0, sizeof(struct uwsgi_subscribe_req)); uwsgi_hooked_parse(bbuf+4, len-4, fastrouter_manage_subscription, &usr); if (uwsgi_add_subscribe_node(&ufr.subscriptions, &usr, ufr.subscription_regexp) && ufr.i_am_cheap) { struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets; while(ufr_sock) { event_queue_add_fd_read(ufr.queue, ufr_sock->fd); ufr_sock = ufr_sock->next; } ufr.i_am_cheap = 0; uwsgi_log("[uwsgi-fastrouter] leaving cheap mode...\n"); } } } else { fr_session = fr_table[interesting_fd]; // something is going wrong... if (fr_session == NULL) continue; if (event_queue_interesting_fd_has_error(events, i)) { close_session(fr_table, fr_session); continue; } fr_session->timeout = reset_timeout(fr_session); switch(fr_session->status) { case FASTROUTER_STATUS_RECV_HDR: len = recv(fr_session->fd, (char *)(&fr_session->uh) + fr_session->h_pos, 4-fr_session->h_pos, 0); if (len <= 0) { uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } fr_session->h_pos += len; if (fr_session->h_pos == 4) { #ifdef UWSGI_DEBUG uwsgi_log("modifier1: %d pktsize: %d modifier2: %d\n", fr_session->uh.modifier1, fr_session->uh.pktsize, fr_session->uh.modifier2); #endif fr_session->status = FASTROUTER_STATUS_RECV_VARS; } break; case FASTROUTER_STATUS_RECV_VARS: len = recv(fr_session->fd, fr_session->buffer + fr_session->pos, fr_session->uh.pktsize - fr_session->pos, 0); if (len <= 0) { uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } fr_session->pos += len; if (fr_session->pos == fr_session->uh.pktsize) { if (uwsgi_hooked_parse(fr_session->buffer, fr_session->uh.pktsize, fr_get_hostname, (void *) fr_session)) { close_session(fr_table, fr_session); break; } if (fr_session->hostname_len == 0) { close_session(fr_table, fr_session); break; } #ifdef UWSGI_DEBUG //uwsgi_log("requested domain %.*s\n", fr_session->hostname_len, fr_session->hostname); #endif if (ufr.use_cache) { fr_session->instance_address = uwsgi_cache_get(fr_session->hostname, fr_session->hostname_len, &fr_session->instance_address_len); char *cs_mod = uwsgi_str_contains(fr_session->instance_address, fr_session->instance_address_len, ','); if (cs_mod) { fr_session->modifier1 = uwsgi_str_num(cs_mod+1, (fr_session->instance_address_len - (cs_mod - fr_session->instance_address))-1); fr_session->instance_address_len = (cs_mod - fr_session->instance_address); } } else if (ufr.pattern) { magic_table['s'] = uwsgi_concat2n(fr_session->hostname, fr_session->hostname_len, "", 0); tmp_socket_name = magic_sub(ufr.pattern, ufr.pattern_len, &tmp_socket_name_len, magic_table); free(magic_table['s']); fr_session->instance_address_len = tmp_socket_name_len; fr_session->instance_address = tmp_socket_name; } else if (ufr.subscription_server) { fr_session->un = uwsgi_get_subscribe_node(&ufr.subscriptions, fr_session->hostname, fr_session->hostname_len, ufr.subscription_regexp); if (fr_session->un && fr_session->un->len) { fr_session->instance_address = fr_session->un->name; fr_session->instance_address_len = fr_session->un->len; fr_session->modifier1 = fr_session->un->modifier1; } } else if (ufr.base) { tmp_socket_name = uwsgi_concat2nn(ufr.base, ufr.base_len, fr_session->hostname, fr_session->hostname_len, &tmp_socket_name_len); fr_session->instance_address_len = tmp_socket_name_len; fr_session->instance_address = tmp_socket_name; } else if (ufr.code_string_code && ufr.code_string_function) { if (uwsgi.p[ufr.code_string_modifier1]->code_string) { fr_session->instance_address = uwsgi.p[ufr.code_string_modifier1]->code_string("uwsgi_fastrouter", ufr.code_string_code, ufr.code_string_function, fr_session->hostname, fr_session->hostname_len); if (fr_session->instance_address) { fr_session->instance_address_len = strlen(fr_session->instance_address); char *cs_mod = uwsgi_str_contains(fr_session->instance_address, fr_session->instance_address_len, ','); if (cs_mod) { fr_session->modifier1 = uwsgi_str_num(cs_mod+1, (fr_session->instance_address_len - (cs_mod - fr_session->instance_address))-1); fr_session->instance_address_len = (cs_mod - fr_session->instance_address); } } } } // no address found if (!fr_session->instance_address_len) { close_session(fr_table, fr_session); break; } fr_session->pass_fd = is_unix(fr_session->instance_address, fr_session->instance_address_len); fr_session->instance_fd = uwsgi_connectn(fr_session->instance_address, fr_session->instance_address_len, 0, 1); if (tmp_socket_name) free(tmp_socket_name); if (fr_session->instance_fd < 0) { /* if (ufr.subscription_server) { if (fr_session->un && fr_session->un->len > 0) { uwsgi_log("[uwsgi-fastrouter] %.*s => marking %.*s as failed\n", (int) fr_session->hostname_len, fr_session->hostname, (int) fr_session->instance_address_len,fr_session->instance_address); uwsgi_remove_subscribe_node(&ufr.subscriptions, fr_session->un); if (ufr.subscriptions == NULL && ufr.cheap && !ufr.i_am_cheap) { uwsgi_log("[uwsgi-fastrouter] no more nodes available. Going cheap...\n"); struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets; while(ufr_sock) { event_queue_del_fd(ufr.queue, ufr_sock->fd, event_queue_read()); ufr_sock = ufr_sock->next; } ufr.i_am_cheap = 1; } } } */ fr_session->instance_failed = 1; close_session(fr_table, fr_session); break; } fr_session->status = FASTROUTER_STATUS_CONNECTING; fr_table[fr_session->instance_fd] = fr_session; event_queue_add_fd_write(ufr.queue, fr_session->instance_fd); } break; case FASTROUTER_STATUS_CONNECTING: if (interesting_fd == fr_session->instance_fd) { if (getsockopt(fr_session->instance_fd, SOL_SOCKET, SO_ERROR, (void *) (&soopt), &solen) < 0) { uwsgi_error("getsockopt()"); fr_session->instance_failed = 1; close_session(fr_table, fr_session); break; } if (soopt) { uwsgi_log("unable to connect() to uwsgi instance: %s\n", strerror(soopt)); fr_session->instance_failed = 1; close_session(fr_table, fr_session); break; } fr_session->uh.modifier1 = fr_session->modifier1; iov[0].iov_base = &fr_session->uh; iov[0].iov_len = 4; iov[1].iov_base = fr_session->buffer; iov[1].iov_len = fr_session->uh.pktsize; // increment node requests counter if (fr_session->un) fr_session->un->requests++; // fd passing: PERFORMANCE EXTREME BOOST !!! if (fr_session->pass_fd && !uwsgi.no_fd_passing) { msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 2; msg.msg_flags = 0; msg.msg_control = &msg_control; msg.msg_controllen = sizeof (msg_control); cmsg = CMSG_FIRSTHDR (&msg); cmsg->cmsg_len = CMSG_LEN (sizeof (int)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; memcpy(CMSG_DATA(cmsg), &fr_session->fd, sizeof(int)); if (sendmsg(fr_session->instance_fd, &msg, 0) < 0) { uwsgi_error("sendmsg()"); } close_session(fr_table, fr_session); break; } if (writev(fr_session->instance_fd, iov, 2) < 0) { uwsgi_error("writev()"); close_session(fr_table, fr_session); break; } event_queue_fd_write_to_read(ufr.queue, fr_session->instance_fd); fr_session->status = FASTROUTER_STATUS_RESPONSE; } break; case FASTROUTER_STATUS_RESPONSE: // data from instance if (interesting_fd == fr_session->instance_fd) { len = recv(fr_session->instance_fd, fr_session->buffer, 0xffff, 0); if (len <= 0) { if (len < 0) uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } len = send(fr_session->fd, fr_session->buffer, len, 0); if (len <= 0) { if (len < 0) uwsgi_error("send()"); close_session(fr_table, fr_session); break; } // update transfer statistics if (fr_session->un) fr_session->un->transferred += len; } // body from client else if (interesting_fd == fr_session->fd) { //uwsgi_log("receiving body...\n"); len = recv(fr_session->fd, fr_session->buffer, 0xffff, 0); if (len <= 0) { if (len < 0) uwsgi_error("recv()"); close_session(fr_table, fr_session); break; } len = send(fr_session->instance_fd, fr_session->buffer, len, 0); if (len <= 0) { if (len < 0) uwsgi_error("send()"); close_session(fr_table, fr_session); break; } } break; // fallback to destroy !!! default: uwsgi_log("unknown event: closing session\n"); close_session(fr_table, fr_session); break; } } } } }
void uwsgi_corerouter_loop(int id, void *data) { int i; struct uwsgi_corerouter *ucr = (struct uwsgi_corerouter *) data; ucr->cr_stats_server = -1; ucr->cr_table = uwsgi_malloc(sizeof(struct corerouter_session *) * uwsgi.max_fd); for (i = 0; i < (int) uwsgi.max_fd; i++) { ucr->cr_table[i] = NULL; } ucr->i_am_cheap = ucr->cheap; void *events = uwsgi_corerouter_setup_event_queue(ucr, id); if (ucr->has_subscription_sockets) event_queue_add_fd_read(ucr->queue, ushared->gateways[id].internal_subscription_pipe[1]); if (!ucr->socket_timeout) ucr->socket_timeout = 60; if (!ucr->static_node_gracetime) ucr->static_node_gracetime = 30; int i_am_the_first = 1; for(i=0;i<id;i++) { if (!strcmp(ushared->gateways[i].name, ucr->name)) { i_am_the_first = 0; break; } } if (ucr->stats_server && i_am_the_first) { char *tcp_port = strchr(ucr->stats_server, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; ucr->cr_stats_server = bind_to_tcp(ucr->stats_server, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { ucr->cr_stats_server = bind_to_unix(ucr->stats_server, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(ucr->queue, ucr->cr_stats_server); uwsgi_log("*** %s stats server enabled on %s fd: %d ***\n", ucr->short_name, ucr->stats_server, ucr->cr_stats_server); } if (ucr->use_socket) { ucr->to_socket = uwsgi_get_socket_by_num(ucr->socket_num); if (ucr->to_socket) { // fix socket name_len if (ucr->to_socket->name_len == 0 && ucr->to_socket->name) { ucr->to_socket->name_len = strlen(ucr->to_socket->name); } } } if (!ucr->pb_base_dir) { ucr->pb_base_dir = getenv("TMPDIR"); if (!ucr->pb_base_dir) ucr->pb_base_dir = "/tmp"; } int nevents; time_t delta; struct uwsgi_rb_timer *min_timeout; int new_connection; if (ucr->pattern) { init_magic_table(ucr->magic_table); } union uwsgi_sockaddr cr_addr; socklen_t cr_addr_len = sizeof(struct sockaddr_un); ucr->mapper = uwsgi_cr_map_use_void; if (ucr->use_cache) { ucr->cache = uwsgi_cache_by_name(ucr->use_cache); if (!ucr->cache) { uwsgi_log("!!! unable to find cache \"%s\" !!!\n", ucr->use_cache); exit(1); } ucr->mapper = uwsgi_cr_map_use_cache; } else if (ucr->pattern) { ucr->mapper = uwsgi_cr_map_use_pattern; } else if (ucr->has_subscription_sockets) { ucr->mapper = uwsgi_cr_map_use_subscription; if (uwsgi.subscription_dotsplit) { ucr->mapper = uwsgi_cr_map_use_subscription_dotsplit; } } else if (ucr->base) { ucr->mapper = uwsgi_cr_map_use_base; } else if (ucr->code_string_code && ucr->code_string_function) { ucr->mapper = uwsgi_cr_map_use_cs; } else if (ucr->to_socket) { ucr->mapper = uwsgi_cr_map_use_to; } else if (ucr->static_nodes) { ucr->mapper = uwsgi_cr_map_use_static_nodes; } ucr->timeouts = uwsgi_init_rb_timer(); for (;;) { time_t now = uwsgi_now(); // set timeouts and harakiri min_timeout = uwsgi_min_rb_timer(ucr->timeouts, NULL); if (min_timeout == NULL) { delta = -1; } else { delta = min_timeout->value - now; if (delta <= 0) { corerouter_expire_timeouts(ucr, now); delta = 0; } } if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = 0; } // wait for events nevents = event_queue_wait_multi(ucr->queue, delta, events, ucr->nevents); now = uwsgi_now(); if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = now + ucr->harakiri; } if (nevents == 0) { corerouter_expire_timeouts(ucr, now); } for (i = 0; i < nevents; i++) { // get the interesting fd ucr->interesting_fd = event_queue_interesting_fd(events, i); // something bad happened if (ucr->interesting_fd < 0) continue; // check if the ucr->interesting_fd matches a gateway socket struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets; int taken = 0; while (ugs) { if (ugs->gateway == &ushared->gateways[id] && ucr->interesting_fd == ugs->fd) { if (!ugs->subscription) { #if defined(__linux__) && defined(SOCK_NONBLOCK) && !defined(OBSOLETE_LINUX_KERNEL) new_connection = accept4(ucr->interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len, SOCK_NONBLOCK); if (new_connection < 0) { taken = 1; break; } #else new_connection = accept(ucr->interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len); if (new_connection < 0) { taken = 1; break; } // set socket in non-blocking mode, on non-linux platforms, clients get the server mode #ifdef __linux__ uwsgi_socket_nb(new_connection); #endif #endif struct corerouter_session *cr = corerouter_alloc_session(ucr, ugs, new_connection, (struct sockaddr *) &cr_addr, cr_addr_len); //something wrong in the allocation if (!cr) break; } else if (ugs->subscription) { uwsgi_corerouter_manage_subscription(ucr, id, ugs); } taken = 1; break; } ugs = ugs->next; } if (taken) { continue; } // manage internal subscription if (ucr->interesting_fd == ushared->gateways[id].internal_subscription_pipe[1]) { uwsgi_corerouter_manage_internal_subscription(ucr, ucr->interesting_fd); } // manage a stats request else if (ucr->interesting_fd == ucr->cr_stats_server) { corerouter_send_stats(ucr); } else { struct corerouter_peer *peer = ucr->cr_table[ucr->interesting_fd]; // something is going wrong... if (peer == NULL) continue; // on error, destroy the session if (event_queue_interesting_fd_has_error(events, i)) { peer->failed = 1; corerouter_close_peer(ucr, peer); continue; } // set timeout (in main_peer too) peer->timeout = corerouter_reset_timeout_fast(ucr, peer, now); peer->session->main_peer->timeout = corerouter_reset_timeout_fast(ucr, peer->session->main_peer, now); ssize_t (*hook)(struct corerouter_peer *) = NULL; // call event hook if (event_queue_interesting_fd_is_read(events, i)) { hook = peer->hook_read; } else if (event_queue_interesting_fd_is_write(events, i)) { hook = peer->hook_write; } if (!hook) continue; // reset errno (as we use it for internal signalling) errno = 0; ssize_t ret = hook(peer); // connection closed if (ret == 0) { corerouter_close_peer(ucr, peer); continue; } else if (ret < 0) { if (errno == EINPROGRESS) continue; // remove keepalive on error peer->session->can_keepalive = 0; corerouter_close_peer(ucr, peer); continue; } } } } }
int master_loop(char **argv, char **environ) { struct timeval last_respawn; int last_respawn_rate = 0; pid_t diedpid; int waitpid_status; time_t now = 0; int i = 0; int rlen; int check_interval = 1; struct uwsgi_rb_timer *min_timeout; struct uwsgi_rbtree *rb_timers = uwsgi_init_rb_timer(); if (uwsgi.procname_master) { uwsgi_set_processname(uwsgi.procname_master); } else if (uwsgi.procname) { uwsgi_set_processname(uwsgi.procname); } else if (uwsgi.auto_procname) { uwsgi_set_processname("uWSGI master"); } uwsgi.current_time = uwsgi_now(); uwsgi_unix_signal(SIGTSTP, suspend_resume_them_all); uwsgi_unix_signal(SIGHUP, grace_them_all); if (uwsgi.die_on_term) { uwsgi_unix_signal(SIGTERM, kill_them_all); uwsgi_unix_signal(SIGQUIT, reap_them_all); } else { uwsgi_unix_signal(SIGTERM, reap_them_all); uwsgi_unix_signal(SIGQUIT, kill_them_all); } uwsgi_unix_signal(SIGINT, kill_them_all); uwsgi_unix_signal(SIGUSR1, stats); if (uwsgi.auto_snapshot) { uwsgi_unix_signal(SIGURG, uwsgi_restore_auto_snapshot); } atexit(uwsgi_master_cleanup_hooks); uwsgi.master_queue = event_queue_init(); /* route signals to workers... */ #ifdef UWSGI_DEBUG uwsgi_log("adding %d to signal poll\n", uwsgi.shared->worker_signal_pipe[0]); #endif event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_signal_pipe[0]); if (uwsgi.master_fifo) { uwsgi.master_fifo_fd = uwsgi_master_fifo(); event_queue_add_fd_read(uwsgi.master_queue, uwsgi.master_fifo_fd); } if (uwsgi.spoolers) { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->spooler_signal_pipe[0]); } if (uwsgi.mules_cnt > 0) { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->mule_signal_pipe[0]); } if (uwsgi.log_master) { uwsgi.log_master_buf = uwsgi_malloc(uwsgi.log_master_bufsize); if (!uwsgi.threaded_logger) { #ifdef UWSGI_DEBUG uwsgi_log("adding %d to master logging\n", uwsgi.shared->worker_log_pipe[0]); #endif event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]); if (uwsgi.req_log_master) { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_req_log_pipe[0]); } } else { uwsgi_threaded_logger_spawn(); } } #ifdef UWSGI_SSL uwsgi_start_legions(); #endif uwsgi_metrics_start_collector(); uwsgi_add_reload_fds(); uwsgi_cache_start_sweepers(); uwsgi_cache_start_sync_servers(); uwsgi.wsgi_req->buffer = uwsgi.workers[0].cores[0].buffer; if (uwsgi.has_emperor) { if (uwsgi.emperor_proxy) { uwsgi.emperor_fd_proxy = bind_to_unix(uwsgi.emperor_proxy, uwsgi.listen_queue, 0, 0); if (uwsgi.emperor_fd_proxy < 0) exit(1); if (chmod(uwsgi.emperor_proxy, S_IRUSR|S_IWUSR)) { uwsgi_error("[emperor-proxy] chmod()"); exit(1); } event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd_proxy); } else { event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd); } } if (uwsgi.zerg_server) { uwsgi.zerg_server_fd = bind_to_unix(uwsgi.zerg_server, uwsgi.listen_queue, 0, 0); event_queue_add_fd_read(uwsgi.master_queue, uwsgi.zerg_server_fd); uwsgi_log("*** Zerg server enabled on %s ***\n", uwsgi.zerg_server); } if (uwsgi.stats) { char *tcp_port = strrchr(uwsgi.stats, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; uwsgi.stats_fd = bind_to_tcp(uwsgi.stats, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { uwsgi.stats_fd = bind_to_unix(uwsgi.stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(uwsgi.master_queue, uwsgi.stats_fd); uwsgi_log("*** Stats server enabled on %s fd: %d ***\n", uwsgi.stats, uwsgi.stats_fd); } if (uwsgi.stats_pusher_instances) { if (!uwsgi_thread_new(uwsgi_stats_pusher_loop)) { uwsgi_log("!!! unable to spawn stats pusher thread !!!\n"); exit(1); } } if (uwsgi.udp_socket) { uwsgi.udp_fd = bind_to_udp(uwsgi.udp_socket, 0, 0); if (uwsgi.udp_fd < 0) { uwsgi_log("unable to bind to udp socket. SNMP services will be disabled.\n"); } else { uwsgi_log("UDP server enabled.\n"); event_queue_add_fd_read(uwsgi.master_queue, uwsgi.udp_fd); } } uwsgi.snmp_fd = uwsgi_setup_snmp(); if (uwsgi.status.is_cheap) { uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1); for (i = 1; i <= uwsgi.numproc; i++) { uwsgi.workers[i].cheaped = 1; } uwsgi_log("cheap mode enabled: waiting for socket connection...\n"); } // spawn mules for (i = 0; i < uwsgi.mules_cnt; i++) { size_t mule_patch_size = 0; uwsgi.mules[i].patch = uwsgi_string_get_list(&uwsgi.mules_patches, i, &mule_patch_size); uwsgi_mule(i + 1); } // spawn gateways for (i = 0; i < ushared->gateways_cnt; i++) { if (ushared->gateways[i].pid == 0) { gateway_respawn(i); } } // spawn daemons uwsgi_daemons_spawn_all(); // first subscription uwsgi_subscribe_all(0, 1); // sync the cache store if needed uwsgi_cache_sync_all(); if (uwsgi.queue_store && uwsgi.queue_filesize) { if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) { uwsgi_error("msync()"); } } // update touches timestamps uwsgi_check_touches(uwsgi.touch_reload); uwsgi_check_touches(uwsgi.touch_logrotate); uwsgi_check_touches(uwsgi.touch_logreopen); uwsgi_check_touches(uwsgi.touch_chain_reload); uwsgi_check_touches(uwsgi.touch_workers_reload); uwsgi_check_touches(uwsgi.touch_gracefully_stop); // update exec touches struct uwsgi_string_list *usl = uwsgi.touch_exec; while (usl) { char *space = strchr(usl->value, ' '); if (space) { *space = 0; usl->len = strlen(usl->value); usl->custom_ptr = space + 1; } usl = usl->next; } uwsgi_check_touches(uwsgi.touch_exec); // update signal touches usl = uwsgi.touch_signal; while (usl) { char *space = strchr(usl->value, ' '); if (space) { *space = 0; usl->len = strlen(usl->value); usl->custom_ptr = space + 1; } usl = usl->next; } uwsgi_check_touches(uwsgi.touch_signal); // fsmon uwsgi_fsmon_setup(); // setup cheaper algos (can be stacked) uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare; if (uwsgi.requested_cheaper_algo) { uwsgi.cheaper_algo = NULL; struct uwsgi_cheaper_algo *uca = uwsgi.cheaper_algos; while (uca) { if (!strcmp(uca->name, uwsgi.requested_cheaper_algo)) { uwsgi.cheaper_algo = uca->func; break; } uca = uca->next; } if (!uwsgi.cheaper_algo) { uwsgi_log("unable to find requested cheaper algorithm, falling back to spare\n"); uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare; } } // here really starts the master loop for (;;) { //uwsgi_log("uwsgi.ready_to_reload %d %d\n", uwsgi.ready_to_reload, uwsgi.numproc); // run master_cycle hook for every plugin for (i = 0; i < uwsgi.gp_cnt; i++) { if (uwsgi.gp[i]->master_cycle) { uwsgi.gp[i]->master_cycle(); } } for (i = 0; i < 256; i++) { if (uwsgi.p[i]->master_cycle) { uwsgi.p[i]->master_cycle(); } } // check for death (before reload !!!) uwsgi_master_check_death(); // check for realod if (uwsgi_master_check_reload(argv)) { return -1; } // check chain reload uwsgi_master_check_chain(); // check if some worker is taking too much to die... uwsgi_master_check_mercy(); // check for daemons (smart and dumb) uwsgi_daemons_smart_check(); if (uwsgi.respawn_snapshots) { for (i = 1; i <= uwsgi.respawn_snapshots; i++) { if (uwsgi_respawn_worker(i)) return 0; } uwsgi.respawn_snapshots = 0; } if (uwsgi.restore_snapshot) { uwsgi_master_restore_snapshot(); continue; } // cheaper management if (uwsgi.cheaper && !uwsgi.status.is_cheap && !uwsgi_instance_is_reloading && !uwsgi_instance_is_dying && !uwsgi.workers[0].suspended) { if (!uwsgi_calc_cheaper()) return 0; } // check if someone is dead diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG); if (diedpid == -1) { if (errno == ECHILD) { // something did not work as expected, just assume all has been cleared uwsgi_master_commit_status(); diedpid = 0; } else { uwsgi_error("waitpid()"); /* here is better to reload all the uWSGI stack */ uwsgi_log("something horrible happened...\n"); reap_them_all(0); exit(1); } } // no one died just run all of the standard master tasks if (diedpid == 0) { /* all processes ok, doing status scan after N seconds */ check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL]; if (!check_interval) check_interval = 1; // add unregistered file monitors // locking is not needed as monitors can only increase for (i = 0; i < ushared->files_monitored_cnt; i++) { if (!ushared->files_monitored[i].registered) { ushared->files_monitored[i].fd = event_queue_add_file_monitor(uwsgi.master_queue, ushared->files_monitored[i].filename, &ushared->files_monitored[i].id); ushared->files_monitored[i].registered = 1; } } // add unregistered timers // locking is not needed as timers can only increase for (i = 0; i < ushared->timers_cnt; i++) { if (!ushared->timers[i].registered) { ushared->timers[i].fd = event_queue_add_timer(uwsgi.master_queue, &ushared->timers[i].id, ushared->timers[i].value); ushared->timers[i].registered = 1; } } // add unregistered rb_timers // locking is not needed as rb_timers can only increase for (i = 0; i < ushared->rb_timers_cnt; i++) { if (!ushared->rb_timers[i].registered) { ushared->rb_timers[i].uwsgi_rb_timer = uwsgi_add_rb_timer(rb_timers, uwsgi_now() + ushared->rb_timers[i].value, &ushared->rb_timers[i]); ushared->rb_timers[i].registered = 1; } } int interesting_fd = -1; if (ushared->rb_timers_cnt > 0) { min_timeout = uwsgi_min_rb_timer(rb_timers, NULL); if (min_timeout == NULL) { check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL]; } else { check_interval = min_timeout->value - uwsgi_now(); if (check_interval <= 0) { expire_rb_timeouts(rb_timers); check_interval = 0; } } } // wait for event rlen = event_queue_wait(uwsgi.master_queue, check_interval, &interesting_fd); if (rlen == 0) { if (ushared->rb_timers_cnt > 0) { expire_rb_timeouts(rb_timers); } } // check uwsgi-cron table if (ushared->cron_cnt) { uwsgi_manage_signal_cron(uwsgi_now()); } if (uwsgi.crons) { uwsgi_manage_command_cron(uwsgi_now()); } // some event returned if (rlen > 0) { // if the following function returns -1, a new worker has just spawned if (uwsgi_master_manage_events(interesting_fd)) { return 0; } } now = uwsgi_now(); if (now - uwsgi.current_time < 1) { continue; } uwsgi.current_time = now; // checking logsize if (uwsgi.logfile) { uwsgi_check_logrotate(); } // this will be incremented at (more or less) regular intervals uwsgi.master_cycles++; // recalculate requests counter on race conditions risky configurations // a bit of inaccuracy is better than locking;) uwsgi_master_fix_request_counters(); // check for idle uwsgi_master_check_idle(); check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL]; if (!check_interval) check_interval = 1; // get listen_queue status struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; int tmp_queue = 0; while (uwsgi_sock) { if (uwsgi_sock->family == AF_INET) { uwsgi_sock->queue = uwsgi_get_tcp_info(uwsgi_sock->fd); } #ifdef __linux__ #ifdef SIOBKLGQ else if (uwsgi_sock->family == AF_UNIX) { uwsgi_sock->queue = get_linux_unbit_SIOBKLGQ(uwsgi_sock->fd); } #endif #endif if (uwsgi_sock->queue > tmp_queue) { tmp_queue = uwsgi_sock->queue; } uwsgi_sock = uwsgi_sock->next; } // fix queue size on multiple sockets uwsgi.shared->load = tmp_queue; // check if some worker has to die (harakiri, evil checks...) uwsgi_master_check_workers_deadline(); uwsgi_master_check_gateways_deadline(); uwsgi_master_check_mules_deadline(); uwsgi_master_check_spoolers_deadline(); uwsgi_master_check_crons_deadline(); uwsgi_master_check_mountpoints(); #ifdef __linux__ #ifdef MADV_MERGEABLE if (uwsgi.linux_ksm > 0 && (uwsgi.master_cycles % uwsgi.linux_ksm) == 0) { uwsgi_linux_ksm_map(); } #endif #endif // resubscribe every 10 cycles by default if (((uwsgi.subscriptions || uwsgi.subscriptions2) && ((uwsgi.master_cycles % uwsgi.subscribe_freq) == 0 || uwsgi.master_cycles == 1)) && !uwsgi_instance_is_reloading && !uwsgi_instance_is_dying && !uwsgi.workers[0].suspended) { uwsgi_subscribe_all(0, 0); } uwsgi_cache_sync_all(); if (uwsgi.queue_store && uwsgi.queue_filesize && uwsgi.queue_store_sync && ((uwsgi.master_cycles % uwsgi.queue_store_sync) == 0)) { if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) { uwsgi_error("msync()"); } } // check touch_reload if (!uwsgi_instance_is_reloading && !uwsgi_instance_is_dying) { char *touched = uwsgi_check_touches(uwsgi.touch_reload); if (touched) { uwsgi_log_verbose("*** %s has been touched... grace them all !!! ***\n", touched); uwsgi_block_signal(SIGHUP); grace_them_all(0); uwsgi_unblock_signal(SIGHUP); continue; } touched = uwsgi_check_touches(uwsgi.touch_workers_reload); if (touched) { uwsgi_log_verbose("*** %s has been touched... workers reload !!! ***\n", touched); uwsgi_reload_workers(); continue; } touched = uwsgi_check_touches(uwsgi.touch_chain_reload); if (touched) { if (uwsgi.status.chain_reloading == 0) { uwsgi_log_verbose("*** %s has been touched... chain reload !!! ***\n", touched); uwsgi.status.chain_reloading = 1; } else { uwsgi_log_verbose("*** %s has been touched... but chain reload is already running ***\n", touched); } } // be sure to run it as the last touch check touched = uwsgi_check_touches(uwsgi.touch_exec); if (touched) { if (uwsgi_run_command(touched, NULL, -1) >= 0) { uwsgi_log_verbose("[uwsgi-touch-exec] running %s\n", touched); } } touched = uwsgi_check_touches(uwsgi.touch_signal); if (touched) { uint8_t signum = atoi(touched); uwsgi_route_signal(signum); uwsgi_log_verbose("[uwsgi-touch-signal] raising %u\n", signum); } } continue; } // no one died if (diedpid <= 0) continue; // check for deadlocks first uwsgi_deadlock_check(diedpid); // reload gateways and daemons only on normal workflow (+outworld status) if (!uwsgi_instance_is_reloading && !uwsgi_instance_is_dying) { if (uwsgi_master_check_emperor_death(diedpid)) continue; if (uwsgi_master_check_spoolers_death(diedpid)) continue; if (uwsgi_master_check_mules_death(diedpid)) continue; if (uwsgi_master_check_gateways_death(diedpid)) continue; if (uwsgi_master_check_daemons_death(diedpid)) continue; if (uwsgi_master_check_cron_death(diedpid)) continue; } /* What happens here ? case 1) the diedpid is not a worker, report it and continue case 2) the diedpid is a worker and we are not in a reload procedure -> reload it case 3) the diedpid is a worker and we are in graceful reload -> uwsgi.ready_to_reload++ and continue case 3) the diedpid is a worker and we are in brutal reload -> uwsgi.ready_to_die++ and continue */ int thewid = find_worker_id(diedpid); if (thewid <= 0) { // check spooler, mules, gateways and daemons struct uwsgi_spooler *uspool = uwsgi.spoolers; while (uspool) { if (uspool->pid > 0 && diedpid == uspool->pid) { uwsgi_log("spooler (pid: %d) annihilated\n", (int) diedpid); goto next; } uspool = uspool->next; } for (i = 0; i < uwsgi.mules_cnt; i++) { if (uwsgi.mules[i].pid == diedpid) { uwsgi_log("mule %d (pid: %d) annihilated\n", i + 1, (int) diedpid); goto next; } } for (i = 0; i < ushared->gateways_cnt; i++) { if (ushared->gateways[i].pid == diedpid) { uwsgi_log("gateway %d (%s, pid: %d) annihilated\n", i + 1, ushared->gateways[i].fullname, (int) diedpid); goto next; } } if (uwsgi_daemon_check_pid_death(diedpid)) goto next; if (WIFEXITED(waitpid_status)) { uwsgi_log("subprocess %d exited with code %d\n", (int) diedpid, WEXITSTATUS(waitpid_status)); } else if (WIFSIGNALED(waitpid_status)) { uwsgi_log("subprocess %d exited by signal %d\n", (int) diedpid, WTERMSIG(waitpid_status)); } else if (WIFSTOPPED(waitpid_status)) { uwsgi_log("subprocess %d stopped\n", (int) diedpid); } next: continue; } // ok a worker died... uwsgi.workers[thewid].pid = 0; // only to be safe :P uwsgi.workers[thewid].harakiri = 0; // ok, if we are reloading or dying, just continue the master loop // as soon as all of the workers have pid == 0, the action (exit, or reload) is triggered if (uwsgi_instance_is_reloading || uwsgi_instance_is_dying) { if (!uwsgi.workers[thewid].cursed_at) uwsgi.workers[thewid].cursed_at = uwsgi_now(); uwsgi_log("worker %d buried after %d seconds\n", thewid, (int) (uwsgi_now() - uwsgi.workers[thewid].cursed_at)); uwsgi.workers[thewid].cursed_at = 0; continue; } // if we are stopping workers, just end here if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_FAILED_APP_CODE) { uwsgi_log("OOPS ! failed loading app in worker %d (pid %d) :( trying again...\n", thewid, (int) diedpid); } else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_DE_HIJACKED_CODE) { uwsgi_log("...restoring worker %d (pid: %d)...\n", thewid, (int) diedpid); } else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXCEPTION_CODE) { uwsgi_log("... monitored exception detected, respawning worker %d (pid: %d)...\n", thewid, (int) diedpid); } else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_QUIET_CODE) { // noop } else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_BRUTAL_RELOAD_CODE) { uwsgi_log("!!! inconsistent state reported by worker %d (pid: %d) !!!\n", thewid, (int) diedpid); reap_them_all(0); continue; } else if (uwsgi.workers[thewid].manage_next_request) { if (WIFSIGNALED(waitpid_status)) { uwsgi_log("DAMN ! worker %d (pid: %d) died, killed by signal %d :( trying respawn ...\n", thewid, (int) diedpid, (int) WTERMSIG(waitpid_status)); } else { uwsgi_log("DAMN ! worker %d (pid: %d) died :( trying respawn ...\n", thewid, (int) diedpid); } } else if (uwsgi.workers[thewid].cursed_at > 0) { uwsgi_log("worker %d killed successfully (pid: %d)\n", thewid, (int) diedpid); } // manage_next_request is zero, but killed by signal... else if (WIFSIGNALED(waitpid_status)) { uwsgi_log("DAMN ! worker %d (pid: %d) MISTERIOUSLY killed by signal %d :( trying respawn ...\n", thewid, (int) diedpid, (int) WTERMSIG(waitpid_status)); } if (uwsgi.workers[thewid].cheaped == 1) { uwsgi_log("uWSGI worker %d cheaped.\n", thewid); continue; } // avoid fork bombing gettimeofday(&last_respawn, NULL); if (last_respawn.tv_sec <= uwsgi.respawn_delta + check_interval) { last_respawn_rate++; if (last_respawn_rate > uwsgi.numproc) { if (uwsgi.forkbomb_delay > 0) { uwsgi_log("worker respawning too fast !!! i have to sleep a bit (%d seconds)...\n", uwsgi.forkbomb_delay); /* use --forkbomb-delay 0 to disable sleeping */ sleep(uwsgi.forkbomb_delay); } last_respawn_rate = 0; } } else { last_respawn_rate = 0; } gettimeofday(&last_respawn, NULL); uwsgi.respawn_delta = last_respawn.tv_sec; // are we chain reloading it ? if (uwsgi.status.chain_reloading == thewid) { uwsgi.status.chain_reloading++; } // respawn the worker (if needed) if (uwsgi_respawn_worker(thewid)) return 0; // end of the loop } // never here }
void uwsgi_corerouter_loop(int id, void *data) { int i; struct uwsgi_corerouter *ucr = (struct uwsgi_corerouter *) data; ucr->cr_stats_server = -1; ucr->cr_table = uwsgi_malloc(sizeof(struct corerouter_session *) * uwsgi.max_fd); for (i = 0; i < (int) uwsgi.max_fd; i++) { ucr->cr_table[i] = NULL; } ucr->i_am_cheap = ucr->cheap; void *events = uwsgi_corerouter_setup_event_queue(ucr, id); if (ucr->has_subscription_sockets) event_queue_add_fd_read(ucr->queue, ushared->gateways[id].internal_subscription_pipe[1]); if (!ucr->socket_timeout) ucr->socket_timeout = 30; if (!ucr->static_node_gracetime) ucr->static_node_gracetime = 30; int i_am_the_first = 1; for(i=0;i<id;i++) { if (!strcmp(ushared->gateways[i].name, ucr->name)) { i_am_the_first = 0; break; } } if (ucr->stats_server && i_am_the_first) { char *tcp_port = strchr(ucr->stats_server, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; ucr->cr_stats_server = bind_to_tcp(ucr->stats_server, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { ucr->cr_stats_server = bind_to_unix(ucr->stats_server, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } event_queue_add_fd_read(ucr->queue, ucr->cr_stats_server); uwsgi_log("*** %s stats server enabled on %s fd: %d ***\n", ucr->short_name, ucr->stats_server, ucr->cr_stats_server); } if (ucr->use_socket) { ucr->to_socket = uwsgi_get_socket_by_num(ucr->socket_num); if (ucr->to_socket) { // fix socket name_len if (ucr->to_socket->name_len == 0 && ucr->to_socket->name) { ucr->to_socket->name_len = strlen(ucr->to_socket->name); } } } if (!ucr->pb_base_dir) { ucr->pb_base_dir = getenv("TMPDIR"); if (!ucr->pb_base_dir) ucr->pb_base_dir = "/tmp"; } int nevents; time_t delta; struct uwsgi_rb_timer *min_timeout; int interesting_fd; int new_connection; if (ucr->pattern) { init_magic_table(ucr->magic_table); } union uwsgi_sockaddr cr_addr; socklen_t cr_addr_len = sizeof(struct sockaddr_un); struct corerouter_session *cr_session; ucr->mapper = uwsgi_cr_map_use_void; if (ucr->use_cache) { ucr->mapper = uwsgi_cr_map_use_cache; } else if (ucr->pattern) { ucr->mapper = uwsgi_cr_map_use_pattern; } else if (ucr->has_subscription_sockets) { ucr->mapper = uwsgi_cr_map_use_subscription; } else if (ucr->base) { ucr->mapper = uwsgi_cr_map_use_base; } else if (ucr->code_string_code && ucr->code_string_function) { ucr->mapper = uwsgi_cr_map_use_cs; } else if (ucr->to_socket) { ucr->mapper = uwsgi_cr_map_use_to; } else if (ucr->static_nodes) { ucr->mapper = uwsgi_cr_map_use_static_nodes; } else if (ucr->use_cluster) { ucr->mapper = uwsgi_cr_map_use_cluster; } ucr->timeouts = uwsgi_init_rb_timer(); for (;;) { min_timeout = uwsgi_min_rb_timer(ucr->timeouts); if (min_timeout == NULL) { delta = -1; } else { delta = min_timeout->key - time(NULL); if (delta <= 0) { corerouter_expire_timeouts(ucr); delta = 0; } } if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = 0; } nevents = event_queue_wait_multi(ucr->queue, delta, events, ucr->nevents); if (uwsgi.master_process && ucr->harakiri > 0) { ushared->gateways_harakiri[id] = time(NULL) + ucr->harakiri; } if (nevents == 0) { corerouter_expire_timeouts(ucr); } for (i = 0; i < nevents; i++) { interesting_fd = event_queue_interesting_fd(events, i); struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets; int taken = 0; while (ugs) { if (ugs->gateway == &ushared->gateways[id] && interesting_fd == ugs->fd) { if (!ugs->subscription) { new_connection = accept(interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len); #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(ucr->queue, interesting_fd); #endif if (new_connection < 0) { taken = 1; break; } // set socket blocking mode, on non-linux platforms, clients get the server mode #ifndef __linux__ if (!ugs->nb) { uwsgi_socket_b(new_connection); } #else if (ugs->nb) { uwsgi_socket_nb(new_connection); } #endif corerouter_alloc_session(ucr, ugs, new_connection, (struct sockaddr *) &cr_addr, cr_addr_len); } else if (ugs->subscription) { uwsgi_corerouter_manage_subscription(ucr, id, ugs); } taken = 1; break; } ugs = ugs->next; } if (taken) { continue; } if (interesting_fd == ushared->gateways[id].internal_subscription_pipe[1]) { uwsgi_corerouter_manage_internal_subscription(ucr, interesting_fd); } else if (interesting_fd == ucr->cr_stats_server) { corerouter_send_stats(ucr); } else { cr_session = ucr->cr_table[interesting_fd]; // something is going wrong... if (cr_session == NULL) continue; if (event_queue_interesting_fd_has_error(events, i)) { corerouter_close_session(ucr, cr_session); continue; } cr_session->timeout = corerouter_reset_timeout(ucr, cr_session); // mplementation specific cycle; ucr->switch_events(ucr, cr_session, interesting_fd); } } } }
void *uwsgi_python_tracebacker_thread(void *foobar) { struct iovec iov[9]; PyObject *new_thread = uwsgi_python_setup_thread("uWSGITraceBacker"); if (!new_thread) return NULL; struct sockaddr_un so_sun; socklen_t so_sun_len = 0; char *str_wid = uwsgi_num2str(uwsgi.mywid); char *sock_path = uwsgi_concat2(up.tracebacker, str_wid); int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; int fd = bind_to_unix(sock_path, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); uwsgi.no_defer_accept = current_defer_accept; PyObject *threading_module = PyImport_ImportModule("threading"); if (!threading_module) return NULL; //PyObject *threading_dict = PyModule_GetDict(threading_module); PyObject *traceback_module = PyImport_ImportModule("traceback"); if (!traceback_module) return NULL; PyObject *traceback_dict = PyModule_GetDict(traceback_module); PyObject *extract_stack = PyDict_GetItemString(traceback_dict, "extract_stack"); PyObject *sys_module = PyImport_ImportModule("sys"); PyObject *sys_dict = PyModule_GetDict(sys_module); PyObject *_current_frames = PyDict_GetItemString(sys_dict, "_current_frames"); uwsgi_log("python tracebacker for worker %d available on %s\n", uwsgi.mywid, sock_path); for(;;) { UWSGI_RELEASE_GIL; int client_fd = accept(fd, (struct sockaddr *) &so_sun, &so_sun_len); if (client_fd < 0) { uwsgi_error("accept()"); UWSGI_GET_GIL; continue; } UWSGI_GET_GIL; // here is the core of the tracebacker PyObject *current_frames = PyEval_CallObject(_current_frames, (PyObject *)NULL); if (!current_frames) goto end; uwsgi_log("current_frames = %p\n", current_frames); PyObject *current_frames_items = PyObject_GetAttrString(current_frames, "items"); if (!current_frames_items) goto end; uwsgi_log("current_frames_items = %p\n", current_frames_items); PyObject *frames_ret = PyEval_CallObject(current_frames_items, (PyObject *)NULL); if (!frames_ret) goto end; uwsgi_log("frames_ret = %p\n", frames_ret); PyObject *frames_iter = PyObject_GetIter(frames_ret); uwsgi_log("frames_iter = %p\n", frames_iter); PyObject *frame = PyIter_Next(frames_iter); while(frame) { uwsgi_log("frame = %p\n", frame); PyObject *stack = PyTuple_GetItem(frame, 1); uwsgi_log("stack = %p\n", stack); PyObject *arg_tuple = PyTuple_New(1); PyTuple_SetItem(arg_tuple, 0, stack); PyObject *stacktrace = PyEval_CallObject( extract_stack, arg_tuple); uwsgi_log("stacktrace = %p\n", stacktrace); PyObject *stacktrace_iter = PyObject_GetIter(stacktrace); PyObject *st_items = PyIter_Next(stacktrace_iter); while(st_items) { uwsgi_log("st_items = %p\n", st_items); PyObject *st_filename = PyTuple_GetItem(st_items, 0); PyObject *st_lineno = PyTuple_GetItem(st_items, 1); PyObject *st_name = PyTuple_GetItem(st_items, 2); PyObject *st_line = PyTuple_GetItem(st_items, 3); iov[0].iov_base = "filename = "; iov[0].iov_len = 11; iov[1].iov_base = PyString_AsString(st_filename); iov[1].iov_len = strlen(iov[1].iov_base); iov[2].iov_base = " lineno = "; iov[2].iov_len = 10 ; iov[3].iov_base = uwsgi_num2str(PyInt_AsLong(st_lineno)); iov[3].iov_len = strlen(iov[3].iov_base); iov[4].iov_base = " function = "; iov[4].iov_len = 12 ; iov[5].iov_base = PyString_AsString(st_name); iov[5].iov_len = strlen(iov[5].iov_base); iov[6].iov_base = "\n"; iov[6].iov_len = 1 ; if (st_line) { } if (writev(client_fd, iov, 7) < 0) { uwsgi_error("writev()"); } st_items = PyIter_Next(stacktrace_iter); } frame = PyIter_Next(frames_iter); } end: close(client_fd); } return NULL; }
void uwsgi_corerouter_setup_sockets(struct uwsgi_corerouter *ucr) { struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets; while (ugs) { if (!strcmp(ucr->name, ugs->owner)) { #ifdef UWSGI_SCTP if (!ugs->subscription && !ugs->sctp) { #else if (!ugs->subscription) { #endif if (ugs->name[0] == '=') { int shared_socket = atoi(ugs->name+1); if (shared_socket >= 0) { ugs->fd = uwsgi_get_shared_socket_fd_by_num(shared_socket); ugs->shared = 1; if (ugs->fd == -1) { uwsgi_log("unable to use shared socket %d\n", shared_socket); exit(1); } ugs->name = uwsgi_getsockname(ugs->fd); } } else if (!uwsgi_startswith("fd://", ugs->name, 5 )) { int fd_socket = atoi(ugs->name+5); if (fd_socket >= 0) { ugs->fd = fd_socket; ugs->name = uwsgi_getsockname(ugs->fd); if (!ugs->name) { uwsgi_log("unable to use file descriptor %d as socket\n", fd_socket); exit(1); } } } else { ugs->port = strchr(ugs->name, ':'); if (ugs->fd == -1) { if (ugs->port) { ugs->fd = bind_to_tcp(ugs->name, uwsgi.listen_queue, ugs->port); ugs->port++; ugs->port_len = strlen(ugs->port); } else { ugs->fd = bind_to_unix(ugs->name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } } } // put socket in non-blocking mode uwsgi_socket_nb(ugs->fd); uwsgi_log("%s bound on %s fd %d\n", ucr->name, ugs->name, ugs->fd); } else if (ugs->subscription) { if (ugs->fd == -1) { if (strchr(ugs->name, ':')) { ugs->fd = bind_to_udp(ugs->name, 0, 0); } else { ugs->fd = bind_to_unix_dgram(ugs->name); } uwsgi_socket_nb(ugs->fd); } uwsgi_log("%s subscription server bound on %s fd %d\n", ucr->name, ugs->name, ugs->fd); } #ifdef UWSGI_SCTP else if (ugs->sctp) { if (ugs->fd == -1) { ugs->fd = bind_to_sctp(ugs->name); } uwsgi_log("%s SCTP server bound on %s fd %d\n", gw_id, ugs->name, ugs->fd); } #endif } ugs = ugs->next; } } void *uwsgi_corerouter_setup_event_queue(struct uwsgi_corerouter *ucr, int id) { ucr->queue = event_queue_init(); struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets; while (ugs) { if (!strcmp(ucr->name, ugs->owner)) { if (!ucr->cheap || ugs->subscription) { event_queue_add_fd_read(ucr->queue, ugs->fd); } ugs->gateway = &ushared->gateways[id]; } ugs = ugs->next; } return event_queue_alloc(ucr->nevents); }
void emperor_add(struct uwsgi_emperor_scanner *ues, char *name, time_t born, char *config, uint32_t config_size, uid_t uid, gid_t gid, char *socket_name) { struct uwsgi_instance *c_ui = ui; struct uwsgi_instance *n_ui = NULL; struct timeval tv; #ifdef UWSGI_DEBUG uwsgi_log("\n\nVASSAL %s %d %.*s %d %d\n", name, born, config_size, config, uid, gid); #endif if (strlen(name) > (0xff - 1)) { uwsgi_log("[emperor] invalid vassal name: %s\n", name); return; } gettimeofday(&tv, NULL); int now = tv.tv_sec; uint64_t micros = (tv.tv_sec * 1000 * 1000) + tv.tv_usec; // blacklist check struct uwsgi_emperor_blacklist_item *uebi = uwsgi_emperor_blacklist_check(name); if (uebi) { uint64_t i_micros = (uebi->last_attempt.tv_sec * 1000 * 1000) + uebi->last_attempt.tv_usec + uebi->throttle_level; if (i_micros > micros) { return; } } if (now - emperor_throttle < 1) { emperor_throttle_level = emperor_throttle_level * 2; } else { if (emperor_throttle_level > uwsgi.emperor_throttle) { emperor_throttle_level = emperor_throttle_level / 2; } if (emperor_throttle_level < uwsgi.emperor_throttle) { emperor_throttle_level = uwsgi.emperor_throttle; } } emperor_throttle = now; #ifdef UWSGI_DEBUG uwsgi_log("emperor throttle = %d\n", emperor_throttle_level); #endif usleep(emperor_throttle_level); if (uwsgi.emperor_tyrant) { if (uid == 0 || gid == 0) { uwsgi_log("[emperor-tyrant] invalid permissions for vassal %s\n", name); return; } } while (c_ui->ui_next) { c_ui = c_ui->ui_next; } n_ui = uwsgi_calloc(sizeof(struct uwsgi_instance)); if (config) { n_ui->use_config = 1; n_ui->config = config; n_ui->config_len = config_size; } c_ui->ui_next = n_ui; #ifdef UWSGI_DEBUG uwsgi_log("c_ui->ui_next = %p\n", c_ui->ui_next); #endif n_ui->ui_prev = c_ui; if (strchr(name, ':')) { n_ui->zerg = 1; uwsgi.emperor_broodlord_count++; } n_ui->scanner = ues; memcpy(n_ui->name, name, strlen(name)); n_ui->born = born; n_ui->uid = uid; n_ui->gid = gid; n_ui->last_mod = born; // start without loyalty n_ui->last_loyal = 0; n_ui->loyal = 0; n_ui->first_run = uwsgi_now(); n_ui->last_run = n_ui->first_run; n_ui->on_demand_fd = -1; if (socket_name) { n_ui->socket_name = uwsgi_str(socket_name); } n_ui->pid = -1; // ok here we check if we need to bind to the specified socket or continue with the activation if (socket_name) { char *tcp_port = strchr(socket_name, ':'); if (tcp_port) { // disable deferred accept for this socket int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; n_ui->on_demand_fd = bind_to_tcp(socket_name, uwsgi.listen_queue, tcp_port); uwsgi.no_defer_accept = current_defer_accept; } else { n_ui->on_demand_fd = bind_to_unix(socket_name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); } if (n_ui->on_demand_fd < 0) { uwsgi_error("emperor_add()/bind()"); free(n_ui); c_ui->ui_next = NULL; return; } event_queue_add_fd_read(uwsgi.emperor_queue, n_ui->on_demand_fd); uwsgi_log("[uwsgi-emperor] %s -> \"on demand\" instance detected, waiting for connections on socket \"%s\" ...\n", name, socket_name); return; } if (uwsgi_emperor_vassal_start(n_ui)) { // clear the vassal free(n_ui); c_ui->ui_next = NULL; } }
void uwsgi_bind_sockets() { socklen_t socket_type_len; union uwsgi_sockaddr usa; union uwsgi_sockaddr_ptr gsa; struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (!uwsgi_sock->bound && !uwsgi_socket_is_already_bound(uwsgi_sock->name)) { char *tcp_port = strrchr(uwsgi_sock->name, ':'); if (tcp_port == NULL) { uwsgi_sock->fd = bind_to_unix(uwsgi_sock->name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); uwsgi_sock->family = AF_UNIX; if (uwsgi.chown_socket) { uwsgi_chown(uwsgi_sock->name, uwsgi.chown_socket); } uwsgi_log("uwsgi socket %d bound to UNIX address %s fd %d\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_sock->name, uwsgi_sock->fd); } else { #ifdef UWSGI_IPV6 if (uwsgi_sock->name[0] == '[' && tcp_port[-1] == ']') { uwsgi_sock->fd = bind_to_tcp6(uwsgi_sock->name, uwsgi.listen_queue, tcp_port); uwsgi_log("uwsgi socket %d bound to TCP6 address %s fd %d\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_sock->name, uwsgi_sock->fd); uwsgi_sock->family = AF_INET6; } else { #endif uwsgi_sock->fd = bind_to_tcp(uwsgi_sock->name, uwsgi.listen_queue, tcp_port); uwsgi_log("uwsgi socket %d bound to TCP address %s fd %d\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_sock->name, uwsgi_sock->fd); uwsgi_sock->family = AF_INET; #ifdef UWSGI_IPV6 } #endif } if (uwsgi_sock->fd < 0 && !uwsgi_sock->per_core) { uwsgi_log("unable to create server socket on: %s\n", uwsgi_sock->name); exit(1); } } uwsgi_sock->bound = 1; uwsgi_sock = uwsgi_sock->next; } if (uwsgi.chown_socket) { if (!uwsgi.master_as_root) { uwsgi_as_root(); } } int zero_used = 0; uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (uwsgi_sock->bound && uwsgi_sock->fd == 0) { zero_used = 1; break; } uwsgi_sock = uwsgi_sock->next; } if (!zero_used) { socket_type_len = sizeof(struct sockaddr_un); gsa.sa = (struct sockaddr *) &usa; if (!uwsgi.skip_zero && !getsockname(0, gsa.sa, &socket_type_len)) { if (gsa.sa->sa_family == AF_UNIX) { uwsgi_sock = uwsgi_new_socket(uwsgi_getsockname(0)); uwsgi_sock->family = AF_UNIX; uwsgi_sock->fd = 0; uwsgi_sock->bound = 1; uwsgi_log("uwsgi socket %d inherited UNIX address %s fd 0\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_sock->name); } else { uwsgi_sock = uwsgi_new_socket(uwsgi_getsockname(0)); uwsgi_sock->family = AF_INET; uwsgi_sock->fd = 0; uwsgi_sock->bound = 1; uwsgi_log("uwsgi socket %d inherited INET address %s fd 0\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_sock->name); } } else if (!uwsgi.honour_stdin) { int fd = open("/dev/null", O_RDONLY); if (fd < 0) { uwsgi_error_open("/dev/null"); exit(1); } if (fd != 0) { if (dup2(fd, 0) < 0) { uwsgi_error("dup2()"); exit(1); } close(fd); } } else if (uwsgi.honour_stdin) { if (!tcgetattr(0, &uwsgi.termios)) { uwsgi.restore_tc = 1; } } } // check for auto_port socket uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (uwsgi_sock->auto_port) { #ifdef UWSGI_IPV6 if (uwsgi_sock->family == AF_INET6) { uwsgi_log("uwsgi socket %d bound to TCP6 address %s (port auto-assigned) fd %d\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_sock->name, uwsgi_sock->fd); } else { #endif uwsgi_log("uwsgi socket %d bound to TCP address %s (port auto-assigned) fd %d\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_sock->name, uwsgi_sock->fd); #ifdef UWSGI_IPV6 } #endif } uwsgi_sock = uwsgi_sock->next; } }
void uwsgi_setup_shared_sockets() { int i; struct uwsgi_socket *shared_sock = uwsgi.shared_sockets; while (shared_sock) { if (!uwsgi.is_a_reload) { char *tcp_port = strrchr(shared_sock->name, ':'); int current_defer_accept = uwsgi.no_defer_accept; if (shared_sock->no_defer) { uwsgi.no_defer_accept = 1; } if (tcp_port == NULL) { shared_sock->fd = bind_to_unix(shared_sock->name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); shared_sock->family = AF_UNIX; uwsgi_log("uwsgi shared socket %d bound to UNIX address %s fd %d\n", uwsgi_get_shared_socket_num(shared_sock), shared_sock->name, shared_sock->fd); } else { #ifdef UWSGI_IPV6 if (shared_sock->name[0] == '[' && tcp_port[-1] == ']') { shared_sock->fd = bind_to_tcp6(shared_sock->name, uwsgi.listen_queue, tcp_port); shared_sock->family = AF_INET6; // fix socket name shared_sock->name = uwsgi_getsockname(shared_sock->fd); uwsgi_log("uwsgi shared socket %d bound to TCP6 address %s fd %d\n", uwsgi_get_shared_socket_num(shared_sock), shared_sock->name, shared_sock->fd); } else { #endif shared_sock->fd = bind_to_tcp(shared_sock->name, uwsgi.listen_queue, tcp_port); shared_sock->family = AF_INET; // fix socket name shared_sock->name = uwsgi_getsockname(shared_sock->fd); uwsgi_log("uwsgi shared socket %d bound to TCP address %s fd %d\n", uwsgi_get_shared_socket_num(shared_sock), shared_sock->name, shared_sock->fd); #ifdef UWSGI_IPV6 } #endif } if (shared_sock->fd < 0) { uwsgi_log("unable to create shared socket on: %s\n", shared_sock->name); exit(1); } if (shared_sock->no_defer) { uwsgi.no_defer_accept = current_defer_accept; } } else { for (i = 3; i < (int) uwsgi.max_fd; i++) { char *sock = uwsgi_getsockname(i); if (sock) { if (!strcmp(sock, shared_sock->name)) { if (strchr(sock, ':')) { uwsgi_log("uwsgi shared socket %d inherited TCP address %s fd %d\n", uwsgi_get_shared_socket_num(shared_sock), sock, i); shared_sock->family = AF_INET; } else { uwsgi_log("uwsgi shared socket %d inherited UNIX address %s fd %d\n", uwsgi_get_shared_socket_num(shared_sock), sock, i); shared_sock->family = AF_UNIX; } shared_sock->fd = i; } else { free(sock); } } } } shared_sock->bound = 1; shared_sock = shared_sock->next; } struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (uwsgi_sock->shared) { shared_sock = uwsgi_get_shared_socket_by_num(uwsgi_sock->from_shared); if (!shared_sock) { uwsgi_log("unable to find shared socket %d\n", uwsgi_sock->from_shared); exit(1); } uwsgi_sock->fd = shared_sock->fd; uwsgi_sock->family = shared_sock->family; uwsgi_sock->name = shared_sock->name; uwsgi_log("uwsgi socket %d mapped to shared socket %d (%s) fd %d\n", uwsgi_get_socket_num(uwsgi_sock), uwsgi_get_shared_socket_num(shared_sock), shared_sock->name, uwsgi_sock->fd); } uwsgi_sock = uwsgi_sock->next; } }
void *uwsgi_python_tracebacker_thread(void *foobar) { struct iovec iov[11]; PyObject *new_thread = uwsgi_python_setup_thread("uWSGITraceBacker"); if (!new_thread) return NULL; struct sockaddr_un so_sun; socklen_t so_sun_len = 0; char *str_wid = uwsgi_num2str(uwsgi.mywid); char *sock_path = uwsgi_concat2(up.tracebacker, str_wid); int current_defer_accept = uwsgi.no_defer_accept; uwsgi.no_defer_accept = 1; int fd = bind_to_unix(sock_path, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); if (fd < 0) { uwsgi.no_defer_accept = current_defer_accept; free(str_wid); free(sock_path); return NULL; } uwsgi.no_defer_accept = current_defer_accept; PyObject *traceback_module = PyImport_ImportModule("traceback"); if (!traceback_module) { free(str_wid); free(sock_path); close(fd); return NULL; } PyObject *traceback_dict = PyModule_GetDict(traceback_module); PyObject *extract_stack = PyDict_GetItemString(traceback_dict, "extract_stack"); PyObject *sys_module = PyImport_ImportModule("sys"); PyObject *sys_dict = PyModule_GetDict(sys_module); PyObject *_current_frames = PyDict_GetItemString(sys_dict, "_current_frames"); uwsgi_log("python tracebacker for worker %d available on %s\n", uwsgi.mywid, sock_path); for(;;) { UWSGI_RELEASE_GIL; int client_fd = accept(fd, (struct sockaddr *) &so_sun, &so_sun_len); if (client_fd < 0) { uwsgi_error("accept()"); UWSGI_GET_GIL; continue; } UWSGI_GET_GIL; // here is the core of the tracebacker PyObject *current_frames = PyEval_CallObject(_current_frames, (PyObject *)NULL); if (!current_frames) goto end2; PyObject *current_frames_items = PyObject_GetAttrString(current_frames, "items"); if (!current_frames_items) goto end; PyObject *frames_ret = PyEval_CallObject(current_frames_items, (PyObject *)NULL); if (!frames_ret) goto end3; PyObject *frames_iter = PyObject_GetIter(frames_ret); if (!frames_iter) goto end4; // we have the first frame, lets parse it if (write(client_fd, "*** uWSGI Python tracebacker output ***\n\n", 41) < 0) { uwsgi_error("write()"); } PyObject *frame = PyIter_Next(frames_iter); while(frame) { PyObject *thread_id = PyTuple_GetItem(frame, 0); if (!thread_id) goto next2; PyObject *stack = PyTuple_GetItem(frame, 1); if (!stack) goto next2; PyObject *arg_tuple = PyTuple_New(1); PyTuple_SetItem(arg_tuple, 0, stack); Py_INCREF(stack); PyObject *stacktrace = PyEval_CallObject( extract_stack, arg_tuple); Py_DECREF(arg_tuple); if (!stacktrace) goto next2; PyObject *stacktrace_iter = PyObject_GetIter(stacktrace); if (!stacktrace_iter) { Py_DECREF(stacktrace); goto next2;} PyObject *st_items = PyIter_Next(stacktrace_iter); // we have the first traceback item while(st_items) { #ifdef PYTHREE int thread_name_need_free = 0; #endif PyObject *st_filename = PyTuple_GetItem(st_items, 0); if (!st_filename) { Py_DECREF(st_items); goto next; } PyObject *st_lineno = PyTuple_GetItem(st_items, 1); if (!st_lineno) {Py_DECREF(st_items); goto next;} PyObject *st_name = PyTuple_GetItem(st_items, 2); if (!st_name) {Py_DECREF(st_items); goto next;} PyObject *st_line = PyTuple_GetItem(st_items, 3); iov[0].iov_base = "thread_id = "; iov[0].iov_len = 12; iov[1].iov_base = uwsgi_python_get_thread_name(thread_id); if (!iov[1].iov_base) { iov[1].iov_base = "<UnnamedPythonThread>"; } #ifdef PYTHREE else { thread_name_need_free = 1; } #endif iov[1].iov_len = strlen(iov[1].iov_base); iov[2].iov_base = " filename = "; iov[2].iov_len = 12; #ifdef PYTHREE PyObject *st_filename_utf8 = PyUnicode_AsUTF8String(st_filename); if (!st_filename_utf8) { if (thread_name_need_free) free(iov[1].iov_base); goto next; } iov[3].iov_base = PyString_AsString(st_filename_utf8); #else iov[3].iov_base = PyString_AsString(st_filename); #endif iov[3].iov_len = strlen(iov[3].iov_base); iov[4].iov_base = " lineno = "; iov[4].iov_len = 10 ; iov[5].iov_base = uwsgi_num2str(PyInt_AsLong(st_lineno)); iov[5].iov_len = strlen(iov[5].iov_base); iov[6].iov_base = " function = "; iov[6].iov_len = 12 ; #ifdef PYTHREE PyObject *st_name_utf8 = PyUnicode_AsUTF8String(st_name); if (!st_name_utf8) { if (thread_name_need_free) free(iov[1].iov_base); Py_DECREF(st_filename_utf8); goto next; } iov[7].iov_base = PyString_AsString(st_name_utf8); #else iov[7].iov_base = PyString_AsString(st_name); #endif iov[7].iov_len = strlen(iov[7].iov_base); iov[8].iov_base = ""; iov[8].iov_len = 0 ; iov[9].iov_base = ""; iov[9].iov_len = 0; iov[10].iov_base = "\n"; iov[10].iov_len = 1; #ifdef PYTHREE PyObject *st_line_utf8 = NULL; #endif if (st_line) { iov[8].iov_base = " line = "; iov[8].iov_len = 8; #ifdef PYTHREE PyObject *st_line_utf8 = PyUnicode_AsUTF8String(st_line); if (!st_line_utf8) { if (thread_name_need_free) free(iov[1].iov_base); Py_DECREF(st_filename_utf8); Py_DECREF(st_name_utf8); goto next; } iov[9].iov_base = PyString_AsString(st_line_utf8); #else iov[9].iov_base = PyString_AsString(st_line); #endif iov[9].iov_len = strlen(iov[9].iov_base); } if (writev(client_fd, iov, 11) < 0) { uwsgi_error("writev()"); } // free the line_no free(iov[5].iov_base); Py_DECREF(st_items); #ifdef PYTHREE Py_DECREF(st_filename_utf8); Py_DECREF(st_name_utf8); if (st_line_utf8) { Py_DECREF(st_line_utf8); } if (thread_name_need_free) free(iov[1].iov_base); #endif st_items = PyIter_Next(stacktrace_iter); } if (write(client_fd, "\n", 1) < 0) { uwsgi_error("write()"); } next: Py_DECREF(stacktrace_iter); Py_DECREF(stacktrace); next2: Py_DECREF(frame); frame = PyIter_Next(frames_iter); } Py_DECREF(frames_iter); end4: Py_DECREF(frames_ret); end3: Py_DECREF(current_frames_items); end: Py_DECREF(current_frames); end2: close(client_fd); } return NULL; }
void uwsgi_fork_server(char *socket) { // map fd 0 to /dev/null to avoid mess uwsgi_remap_fd(0, "/dev/null"); int fd = bind_to_unix(socket, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket); if (fd < 0) exit(1); // automatically receive credentials (TODO make something useful with them, like checking the pid is from the Emperor) if (uwsgi_socket_passcred(fd)) exit(1); // initialize the event queue int eq = event_queue_init(); if (uwsgi.has_emperor) { event_queue_add_fd_read(eq, uwsgi.emperor_fd); } event_queue_add_fd_read(eq, fd); // now start waiting for connections for(;;) { int interesting_fd = -1; int rlen = event_queue_wait(eq, -1, &interesting_fd); if (rlen <= 0) continue; if (uwsgi.has_emperor && interesting_fd == uwsgi.emperor_fd) { char byte; ssize_t rlen = read(uwsgi.emperor_fd, &byte, 1); if (rlen > 0) { uwsgi_log_verbose("received message %d from emperor\n", byte); } exit(0); } if (interesting_fd != fd) continue; struct sockaddr_un client_src; socklen_t client_src_len = 0; int client_fd = accept(fd, (struct sockaddr *) &client_src, &client_src_len); if (client_fd < 0) { uwsgi_error("uwsgi_fork_server()/accept()"); continue; } char hbuf[4]; pid_t ppid = -1; uid_t uid = -1; gid_t gid = -1; int fds_count = 8; size_t remains = 4; // we can receive up to 8 fds (generally from 1 to 3) int fds[8]; // we only read 4 bytes header ssize_t len = uwsgi_recv_cred_and_fds(client_fd, hbuf, remains, &ppid, &uid, &gid, fds, &fds_count); uwsgi_log_verbose("[uwsgi-fork-server] connection from pid: %d uid: %d gid:%d fds:%d\n", ppid, uid, gid, fds_count); if (len <= 0 || fds_count < 1) { uwsgi_error("uwsgi_fork_server()/recvmsg()"); goto end; } remains -= len; if (uwsgi_read_nb(client_fd, hbuf + (4-remains), remains, uwsgi.socket_timeout)) { uwsgi_error("uwsgi_fork_server()/uwsgi_read_nb()"); goto end; } struct uwsgi_header *uh = (struct uwsgi_header *) hbuf; // this memory area must be freed in the right place !!! char *body_argv = uwsgi_malloc(uh->_pktsize); if (uwsgi_read_nb(client_fd, body_argv, uh->_pktsize, uwsgi.socket_timeout)) { free(body_argv); uwsgi_error("uwsgi_fork_server()/uwsgi_read_nb()"); goto end; } pid_t pid = fork(); if (pid < 0) { free(body_argv); int i; for(i=0;i<fds_count;i++) close(fds[i]); // error on fork() uwsgi_error("uwsgi_fork_server()/fork()"); goto end; } else if (pid > 0) { free(body_argv); // close inherited decriptors int i; for(i=0;i<fds_count;i++) close(fds[i]); // wait for child death... waitpid(pid, NULL, 0); goto end; } else { // close Emperor channels // we do not close others file desctiptor as lot // of funny tricks could be accomplished with them if (uwsgi.has_emperor) { close(uwsgi.emperor_fd); if (uwsgi.emperor_fd_config > -1) close(uwsgi.emperor_fd_config); } // set EMPEROR_FD and FD_CONFIG env vars char *uef = uwsgi_num2str(fds[0]); if (setenv("UWSGI_EMPEROR_FD", uef, 1)) { uwsgi_error("uwsgi_fork_server()/setenv()"); exit(1); } free(uef); int pipe_config = -1; int on_demand = -1; if (uh->modifier2 & VASSAL_HAS_CONFIG && fds_count > 1) { pipe_config = fds[1]; char *uef = uwsgi_num2str(pipe_config); if (setenv("UWSGI_EMPEROR_FD_CONFIG", uef, 1)) { uwsgi_error("uwsgi_fork_server()/setenv()"); exit(1); } free(uef); } if (uh->modifier2 & VASSAL_HAS_ON_DEMAND && fds_count > 1) { if (pipe_config > -1) { if (fds_count > 2) { on_demand = fds[2]; } } else { on_demand = fds[1]; } } // dup the on_demand socket to 0 and close it if (on_demand > -1) { if (dup2(on_demand, 0) < 0) { uwsgi_error("uwsgi_fork_server()/dup2()"); exit(1); } close(on_demand); } // now fork again and die pid_t new_pid = fork(); if (new_pid < 0) { uwsgi_error("uwsgi_fork_server()/fork()"); exit(1); } else if (new_pid > 0) { exit(0); } else { // send the pid to the client_fd and close it struct uwsgi_buffer *ub = uwsgi_buffer_new(uwsgi.page_size); // leave space for header ub->pos = 4; if (uwsgi_buffer_append_keynum(ub, "pid", 3, getpid())) exit(1); // fix uwsgi header if (uwsgi_buffer_set_uh(ub, 35, 0)) goto end; // send_pid() if (uwsgi_write_nb(client_fd, ub->buf, ub->pos, uwsgi.socket_timeout)) exit(1); close(client_fd); uwsgi_log("double fork() and reparenting successful (new pid: %d)\n", getpid()); uwsgi_buffer_destroy(ub); // now parse the uwsgi packet array and build the argv struct uwsgi_string_list *usl = NULL, *usl_argv = NULL; uwsgi_hooked_parse_array(body_argv, uh->_pktsize, parse_argv_hook, &usl_argv); free(body_argv); // build new argc/argv uwsgi.new_argc = 0; size_t procname_len = 1; uwsgi_foreach(usl, usl_argv) { uwsgi.new_argc++; procname_len += usl->len + 1; } char *new_procname = uwsgi_calloc(procname_len); uwsgi.new_argv = uwsgi_calloc(sizeof(char *) * (uwsgi.new_argc + 1)); int counter = 0; uwsgi_foreach(usl, usl_argv) { uwsgi.new_argv[counter] = usl->value; strcat(new_procname, usl->value); strcat(new_procname, " "); counter++; } // fix process name uwsgi_set_processname(new_procname); free(new_procname); // this is the only step required to have a consistent environment uwsgi.fork_socket = NULL; // this avoids the process to re-exec itself uwsgi.exit_on_reload = 1; // fixup the Emperor communication uwsgi_check_emperor(); // continue with uWSGI startup return; } }