void uwsgi_mule(int id) { int i; pid_t pid = uwsgi_fork(uwsgi.mules[id - 1].name); if (pid == 0) { #ifdef __linux__ if (prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0)) { uwsgi_error("prctl()"); } #endif uwsgi.muleid = id; // avoid race conditions uwsgi.mules[id - 1].id = id; uwsgi.mules[id - 1].pid = getpid(); uwsgi_fixup_fds(0, id, NULL); uwsgi.my_signal_socket = uwsgi.mules[id - 1].signal_pipe[1]; uwsgi.signal_socket = uwsgi.shared->mule_signal_pipe[1]; uwsgi_close_all_sockets(); for (i = 0; i < 256; i++) { if (uwsgi.p[i]->master_fixup) { uwsgi.p[i]->master_fixup(1); } } for (i = 0; i < 256; i++) { if (uwsgi.p[i]->post_fork) { uwsgi.p[i]->post_fork(); } } if (uwsgi.mules[id - 1].patch) { for (i = 0; i < 256; i++) { if (uwsgi.p[i]->mule) { if (uwsgi.p[i]->mule(uwsgi.mules[id - 1].patch) == 1) { // never here break; } } } } uwsgi_mule_handler(); } else if (pid > 0) { uwsgi.mules[id - 1].id = id; uwsgi.mules[id - 1].pid = pid; uwsgi_log("spawned uWSGI mule %d (pid: %d)\n", id, (int) pid); } }
void gateway_respawn(int id) { pid_t gw_pid; struct uwsgi_gateway *ug = &ushared->gateways[id]; if (uwsgi.master_process) uwsgi.shared->gateways_harakiri[id] = 0; gw_pid = uwsgi_fork(ug->fullname); if (gw_pid < 0) { uwsgi_error("fork()"); return; } if (gw_pid == 0) { uwsgi_fixup_fds(0, 0, ug); if (uwsgi.master_as_root) uwsgi_as_root(); #ifdef __linux__ if (prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0)) { uwsgi_error("prctl()"); } #endif uwsgi.mypid = getpid(); atexit(gateway_brutal_end); signal(SIGALRM, SIG_IGN); signal(SIGHUP, SIG_IGN); signal(SIGINT, end_me); signal(SIGTERM, end_me); signal(SIGUSR1, SIG_IGN); signal(SIGUSR2, SIG_IGN); signal(SIGPIPE, SIG_IGN); signal(SIGSTOP, SIG_IGN); signal(SIGTSTP, SIG_IGN); ug->loop(id, ug->data); // never here !!! (i hope) exit(1); } ug->pid = gw_pid; ug->respawns++; if (ug->respawns == 1) { uwsgi_log("spawned %s %d (pid: %d)\n", ug->name, ug->num, (int) gw_pid); } else { uwsgi_log("respawned %s %d (pid: %d)\n", ug->name, ug->num, (int) gw_pid); } }
void uwsgi_mule(int id) { int i; pid_t pid = uwsgi_fork(uwsgi.mules[id-1].name); if (pid == 0) { uwsgi.muleid = id; // avoid race conditions uwsgi.mules[id-1].id = id; uwsgi.mules[id-1].pid = getpid(); uwsgi_fixup_fds(0, id); uwsgi.my_signal_socket = uwsgi.mules[id-1].signal_pipe[1]; uwsgi.signal_socket = uwsgi.shared->mule_signal_pipe[1]; uwsgi_close_all_sockets(); for (i = 0; i < 0xFF; i++) { if (uwsgi.p[i]->master_fixup) { uwsgi.p[i]->master_fixup(1); } } for (i = 0; i < 0xFF; i++) { if (uwsgi.p[i]->post_fork) { uwsgi.p[i]->post_fork(); } } if (uwsgi.mules[id-1].patch) { for (i = 0; i < 0xFF; i++) { if (uwsgi.p[i]->mule) { if (uwsgi.p[i]->mule(uwsgi.mules[id-1].patch) == 1) { // never here break; } } } } uwsgi_mule_handler(); } else if (pid > 0) { uwsgi.mules[id-1].id = id; uwsgi.mules[id-1].pid = pid; uwsgi_log("spawned uWSGI mule %d (pid: %d)\n", id, (int) pid); } }
pid_t spooler_start(struct uwsgi_spooler *uspool) { int i; pid_t pid = uwsgi_fork("uWSGI spooler"); if (pid < 0) { uwsgi_error("fork()"); exit(1); } else if (pid == 0) { // USR1 will be used to wake up the spooler uwsgi_unix_signal(SIGUSR1, spooler_wakeup); uwsgi.mywid = -1; uwsgi.mypid = getpid(); uspool->pid = uwsgi.mypid; // avoid race conditions !!! uwsgi.i_am_a_spooler = uspool; uwsgi_fixup_fds(0, 0, NULL); uwsgi_close_all_sockets(); for (i = 0; i < 256; i++) { if (uwsgi.p[i]->post_fork) { uwsgi.p[i]->post_fork(); } } uwsgi.signal_socket = uwsgi.shared->spooler_signal_pipe[1]; for (i = 0; i < 256; i++) { if (uwsgi.p[i]->spooler_init) { uwsgi.p[i]->spooler_init(); } } for (i = 0; i < uwsgi.gp_cnt; i++) { if (uwsgi.gp[i]->spooler_init) { uwsgi.gp[i]->spooler_init(); } } spooler(uspool); } else if (pid > 0) { uwsgi_log("spawned the uWSGI spooler on dir %s with pid %d\n", uspool->dir, pid); } return pid; }
pid_t spooler_start(struct uwsgi_spooler * uspool) { int i; pid_t pid = uwsgi_fork("uWSGI spooler"); if (pid < 0) { uwsgi_error("fork()"); exit(1); } else if (pid == 0) { signal(SIGALRM, SIG_IGN); signal(SIGHUP, SIG_IGN); signal(SIGINT, end_me); signal(SIGTERM, end_me); // USR1 will be used to wake up the spooler uwsgi_unix_signal(SIGUSR1, spooler_wakeup); signal(SIGUSR2, SIG_IGN); signal(SIGPIPE, SIG_IGN); signal(SIGSTOP, SIG_IGN); signal(SIGTSTP, SIG_IGN); uwsgi.mypid = getpid(); uspool->pid = uwsgi.mypid; // avoid race conditions !!! uwsgi.i_am_a_spooler = uspool; uwsgi_fixup_fds(0, 0, NULL); uwsgi_close_all_sockets(); for (i = 0; i < 256; i++) { if (uwsgi.p[i]->post_fork) { uwsgi.p[i]->post_fork(); } } uwsgi_spooler_run(); } else if (pid > 0) { uwsgi_log("spawned the uWSGI spooler on dir %s with pid %d\n", uspool->dir, pid); } return pid; }
int uwsgi_respawn_worker(int wid) { int respawns = uwsgi.workers[wid].respawn_count; int i; if (uwsgi.threaded_logger) { pthread_mutex_lock(&uwsgi.threaded_logger_lock); } pid_t pid = uwsgi_fork(uwsgi.workers[wid].name); if (pid == 0) { signal(SIGWINCH, worker_wakeup); signal(SIGTSTP, worker_wakeup); uwsgi.mywid = wid; uwsgi.mypid = getpid(); uwsgi.workers[uwsgi.mywid].pid = uwsgi.mypid; uwsgi.workers[uwsgi.mywid].id = uwsgi.mywid; uwsgi.workers[uwsgi.mywid].harakiri = 0; uwsgi.workers[uwsgi.mywid].rss_size = 0; uwsgi.workers[uwsgi.mywid].vsz_size = 0; // do not reset worker counters on reload !!! //uwsgi.workers[uwsgi.mywid].requests = 0; // ...but maintain a delta counter (yes this is racy in multithread) uwsgi.workers[uwsgi.mywid].delta_requests = 0; //uwsgi.workers[uwsgi.mywid].failed_requests = 0; uwsgi.workers[uwsgi.mywid].respawn_count++; uwsgi.workers[uwsgi.mywid].last_spawn = uwsgi.current_time; uwsgi.workers[uwsgi.mywid].manage_next_request = 1; uwsgi.workers[uwsgi.mywid].cheaped = 0; uwsgi.workers[uwsgi.mywid].busy = 0; uwsgi.workers[uwsgi.mywid].suspended = 0; uwsgi.workers[uwsgi.mywid].sig = 0; // reset the apps count with a copy from the master uwsgi.workers[uwsgi.mywid].apps_cnt = uwsgi.workers[0].apps_cnt; uwsgi_fixup_fds(wid, 0, NULL); uwsgi.my_signal_socket = uwsgi.workers[wid].signal_pipe[1]; if (uwsgi.master_process) { if ((uwsgi.workers[uwsgi.mywid].respawn_count || uwsgi.cheap)) { for (i = 0; i < 256; i++) { if (uwsgi.p[i]->master_fixup) { uwsgi.p[i]->master_fixup(1); } } } } return 1; } else if (pid < 1) { uwsgi_error("fork()"); } else { if (respawns > 0) { uwsgi_log("Respawned uWSGI worker %d (new pid: %d)\n", wid, (int) pid); } else { uwsgi_log("spawned uWSGI worker %d (pid: %d, cores: %d)\n", wid, pid, uwsgi.cores); } } if (uwsgi.threaded_logger) { pthread_mutex_unlock(&uwsgi.threaded_logger_lock); } return 0; }
int uwsgi_respawn_worker(int wid) { int respawns = uwsgi.workers[wid].respawn_count; // we count the respawns before errors... uwsgi.workers[wid].respawn_count++; // ... same for update time uwsgi.workers[wid].last_spawn = uwsgi.current_time; // ... and memory/harakiri uwsgi.workers[wid].harakiri = 0; uwsgi.workers[wid].user_harakiri = 0; uwsgi.workers[wid].pending_harakiri = 0; uwsgi.workers[wid].rss_size = 0; uwsgi.workers[wid].vsz_size = 0; // ... reset stopped_at uwsgi.workers[wid].cursed_at = 0; uwsgi.workers[wid].no_mercy_at = 0; // internal statuses should be reset too uwsgi.workers[wid].cheaped = 0; // SUSPENSION is managed by the user, not the master... //uwsgi.workers[wid].suspended = 0; uwsgi.workers[wid].sig = 0; // this is required for various checks uwsgi.workers[wid].delta_requests = 0; int i; if (uwsgi.threaded_logger) { pthread_mutex_lock(&uwsgi.threaded_logger_lock); } pid_t pid = uwsgi_fork(uwsgi.workers[wid].name); if (pid == 0) { signal(SIGWINCH, worker_wakeup); signal(SIGTSTP, worker_wakeup); uwsgi.mywid = wid; uwsgi.mypid = getpid(); // pid is updated by the master //uwsgi.workers[uwsgi.mywid].pid = uwsgi.mypid; // OVERENGINEERING (just to be safe) uwsgi.workers[uwsgi.mywid].id = uwsgi.mywid; /* uwsgi.workers[uwsgi.mywid].harakiri = 0; uwsgi.workers[uwsgi.mywid].user_harakiri = 0; uwsgi.workers[uwsgi.mywid].rss_size = 0; uwsgi.workers[uwsgi.mywid].vsz_size = 0; */ // do not reset worker counters on reload !!! //uwsgi.workers[uwsgi.mywid].requests = 0; // ...but maintain a delta counter (yes this is racy in multithread) //uwsgi.workers[uwsgi.mywid].delta_requests = 0; //uwsgi.workers[uwsgi.mywid].failed_requests = 0; //uwsgi.workers[uwsgi.mywid].respawn_count++; //uwsgi.workers[uwsgi.mywid].last_spawn = uwsgi.current_time; uwsgi.workers[uwsgi.mywid].manage_next_request = 1; /* uwsgi.workers[uwsgi.mywid].cheaped = 0; uwsgi.workers[uwsgi.mywid].suspended = 0; uwsgi.workers[uwsgi.mywid].sig = 0; */ // reset the apps count with a copy from the master uwsgi.workers[uwsgi.mywid].apps_cnt = uwsgi.workers[0].apps_cnt; // reset wsgi_request structures for(i=0;i<uwsgi.cores;i++) { uwsgi.workers[uwsgi.mywid].cores[i].in_request = 0; memset(&uwsgi.workers[uwsgi.mywid].cores[i].req, 0, sizeof(struct wsgi_request)); } uwsgi_fixup_fds(wid, 0, NULL); uwsgi.my_signal_socket = uwsgi.workers[wid].signal_pipe[1]; if (uwsgi.master_process) { if ((uwsgi.workers[uwsgi.mywid].respawn_count || uwsgi.status.is_cheap)) { for (i = 0; i < 256; i++) { if (uwsgi.p[i]->master_fixup) { uwsgi.p[i]->master_fixup(1); } } } } return 1; } else if (pid < 1) { uwsgi_error("fork()"); } else { // the pid is set only in the master, as the worker should never use it uwsgi.workers[wid].pid = pid; if (respawns > 0) { uwsgi_log("Respawned uWSGI worker %d (new pid: %d)\n", wid, (int) pid); } else { uwsgi_log("spawned uWSGI worker %d (pid: %d, cores: %d)\n", wid, pid, uwsgi.cores); } } if (uwsgi.threaded_logger) { pthread_mutex_unlock(&uwsgi.threaded_logger_lock); } return 0; }