예제 #1
0
파일: master.c 프로젝트: simbha/uwsgi
void suspend_resume_them_all(int signum) {

	int i;
	int suspend = 0;

	if (uwsgi.workers[0].suspended == 1) {
		uwsgi_log_verbose("*** (SIGTSTP received) resuming workers ***\n");
		uwsgi.workers[0].suspended = 0;
	}
	else {
		uwsgi_log_verbose("*** PAUSE (press start to resume, if you do not have a joypad send SIGTSTP) ***\n");
		suspend = 1;
		uwsgi.workers[0].suspended = 1;
	}

	// subscribe/unsubscribe if needed
	uwsgi_subscribe_all(suspend, 1);

	for (i = 1; i <= uwsgi.numproc; i++) {
		uwsgi.workers[i].suspended = suspend;
		if (uwsgi.workers[i].pid > 0) {
			if (kill(uwsgi.workers[i].pid, SIGTSTP)) {
				uwsgi_error("kill()");
			}
		}
	}
}
예제 #2
0
파일: gevent.c 프로젝트: suchkultur/uwsgi
PyObject *py_uwsgi_gevent_graceful(PyObject *self, PyObject *args) {

    uwsgi_log("Gracefully killing worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
    uwsgi.workers[uwsgi.mywid].manage_next_request = 0;

    uwsgi_log_verbose("stopping gevent signals watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
    PyObject_CallMethod(ugevent.my_signal_watcher, "stop", NULL);
    PyObject_CallMethod(ugevent.signal_watcher, "stop", NULL);

    uwsgi_log_verbose("stopping gevent sockets watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
    int i,count = uwsgi_count_sockets(uwsgi.sockets);
    for(i=0; i<count; i++) {
        PyObject_CallMethod(ugevent.watchers[i], "stop", NULL);
    }
    uwsgi_log_verbose("main gevent watchers stopped for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);

    int running_cores = 0;
    for(i=0; i<uwsgi.async; i++) {
        if (uwsgi.workers[uwsgi.mywid].cores[i].in_request) {
            struct wsgi_request *wsgi_req = &uwsgi.workers[uwsgi.mywid].cores[i].req;
            uwsgi_log_verbose("worker %d (pid: %d) core %d is managing \"%.*s %.*s\" for %.*s\n", uwsgi.mywid, uwsgi.mypid, i,
                              wsgi_req->method_len, wsgi_req->method, wsgi_req->uri_len, wsgi_req->uri,
                              wsgi_req->remote_addr_len, wsgi_req->remote_addr);
            running_cores++;
        }
    }

    if (running_cores > 0) {
        uwsgi_log_verbose("waiting for %d running requests on worker %d (pid: %d)...\n", running_cores, uwsgi.mywid, uwsgi.mypid);
    }

    Py_INCREF(Py_None);
    return Py_None;
}
예제 #3
0
파일: mule.c 프로젝트: dbhattar/uwsgi
void uwsgi_mule_handler() {

	ssize_t len;
	uint8_t uwsgi_signal;
	int rlen;
	int interesting_fd;

	// this must be configurable
	char message[65536];

	int mule_queue = event_queue_init();

	event_queue_add_fd_read(mule_queue, uwsgi.signal_socket);
	event_queue_add_fd_read(mule_queue, uwsgi.my_signal_socket);
	event_queue_add_fd_read(mule_queue, uwsgi.mules[uwsgi.muleid - 1].queue_pipe[1]);
	event_queue_add_fd_read(mule_queue, uwsgi.shared->mule_queue_pipe[1]);

	uwsgi_mule_add_farm_to_queue(mule_queue);

	for (;;) {
		rlen = event_queue_wait(mule_queue, -1, &interesting_fd);
		if (rlen <= 0) {
			continue;
		}

		if (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket || farm_has_signaled(interesting_fd)) {
			len = read(interesting_fd, &uwsgi_signal, 1);
			if (len <= 0) {
				uwsgi_log_verbose("uWSGI mule %d braying: my master died, i will follow him...\n", uwsgi.muleid);
				end_me(0);
			}
#ifdef UWSGI_DEBUG
			uwsgi_log_verbose("master sent signal %d to mule %d\n", uwsgi_signal, uwsgi.muleid);
#endif
			if (uwsgi_signal_handler(uwsgi_signal)) {
				uwsgi_log_verbose("error managing signal %d on mule %d\n", uwsgi_signal, uwsgi.muleid);
			}
		}
		else if (interesting_fd == uwsgi.mules[uwsgi.muleid - 1].queue_pipe[1] || interesting_fd == uwsgi.shared->mule_queue_pipe[1] || farm_has_msg(interesting_fd)) {
			len = read(interesting_fd, message, 65536);
			if (len < 0) {
				uwsgi_error("read()");
			}
			else {
				int i, found = 0;
				for (i = 0; i < 256; i++) {
					if (uwsgi.p[i]->mule_msg) {
						if (uwsgi.p[i]->mule_msg(message, len)) {
							found = 1;
							break;
						}
					}
				}
				if (!found)
					uwsgi_log("*** mule %d received a %ld bytes message ***\n", uwsgi.muleid, (long) len);
			}
		}
	}

}
예제 #4
0
파일: gevent.c 프로젝트: sashka/uwsgi
PyObject *py_uwsgi_gevent_signal_handler(PyObject * self, PyObject * args) {

	uint8_t uwsgi_signal;
	int signal_socket;

	if (!PyArg_ParseTuple(args, "i:uwsgi_gevent_signal_handler", &signal_socket)) {
        	return NULL;
	}

	if (read(signal_socket, &uwsgi_signal, 1) <= 0) {
        	if (uwsgi.no_orphans) {
                	uwsgi_log_verbose("uWSGI worker %d screams: UAAAAAAH my master died, i will follow him...\n", uwsgi.mywid);
                        end_me(0);
                }
		// close the socket to end the mess...from now on the worker is alone (no master)
                else close(signal_socket);
        }
        else {
#ifdef UWSGI_DEBUG
        	uwsgi_log_verbose("master sent signal %d to worker %d\n", uwsgi_signal, uwsgi.mywid);
#endif
		if (uwsgi_signal_handler(uwsgi_signal)) {
                	uwsgi_log_verbose("error managing signal %d on worker %d\n", uwsgi_signal, uwsgi.mywid);
                }
        }

	Py_INCREF(Py_None);
	return Py_None;
}
예제 #5
0
void suspend_resume_them_all(int signum) {

	int i;
	int suspend = 0;

	if (uwsgi.workers[0].suspended == 1) {
		uwsgi_log_verbose("*** (SIGTSTP received) resuming workers ***\n");
		uwsgi.workers[0].suspended = 0;
	}
	else {
		uwsgi_log_verbose("*** PAUSE (press start to resume, if you do not have a joypad send SIGTSTP) ***\n");
		suspend = 1;
		uwsgi.workers[0].suspended = 1;
	}

	// subscribe/unsubscribe if needed
	struct uwsgi_string_list *subscriptions = uwsgi.subscriptions;
	while (subscriptions) {
		uwsgi_log("%s %s\n", suspend ? "unsubscribing from" : "subscribing to", subscriptions->value);
		uwsgi_subscribe(subscriptions->value, suspend);
		subscriptions = subscriptions->next;
	}


	for (i = 1; i <= uwsgi.numproc; i++) {
		uwsgi.workers[i].suspended = suspend;
		if (uwsgi.workers[i].pid > 0) {
			if (kill(uwsgi.workers[i].pid, SIGTSTP)) {
				uwsgi_error("kill()");
			}
		}
	}
}
예제 #6
0
void uwsgi_receive_signal(int fd, char *name, int id) {

	uint8_t uwsgi_signal;

	ssize_t ret = read(fd, &uwsgi_signal, 1);

	if (ret == 0) {
		goto destroy;
	}
	else if (ret < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
		uwsgi_error("[uwsgi-signal] read()");
		goto destroy;
	}
	else if (ret > 0) {
#ifdef UWSGI_DEBUG
		uwsgi_log_verbose("master sent signal %d to %s %d\n", uwsgi_signal, name, id);
#endif
		if (uwsgi_signal_handler(uwsgi_signal)) {
			uwsgi_log_verbose("error managing signal %d on %s %d\n", uwsgi_signal, name, id);
		}
	}

	return;

destroy:
	// better to kill the whole worker...
	uwsgi_log_verbose("uWSGI %s %d screams: UAAAAAAH my master disconnected: i will kill myself !!!\n", name, id);
	end_me(0);

}
예제 #7
0
파일: master.c 프로젝트: simbha/uwsgi
void uwsgi_chain_reload() {
	if (!uwsgi.status.chain_reloading) {
		uwsgi_log_verbose("chain reload starting...\n");
		uwsgi.status.chain_reloading = 1;
	}
	else {
		uwsgi_log_verbose("chain reload already running...\n");
	}
}
예제 #8
0
파일: gevent.c 프로젝트: RyuaNerin/uwsgi
PyObject *py_uwsgi_gevent_graceful(PyObject *self, PyObject *args) {

	int running_cores = 0;
	int rounds = 0;

	uwsgi_log("Gracefully killing worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
        uwsgi.workers[uwsgi.mywid].manage_next_request = 0;

	uwsgi_log_verbose("stopping gevent signals watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
	PyObject_CallMethod(ugevent.my_signal_watcher, "stop", NULL);
	PyObject_CallMethod(ugevent.signal_watcher, "stop", NULL);

	uwsgi_log_verbose("stopping gevent sockets watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
	int i,count = uwsgi_count_sockets(uwsgi.sockets);
	for(i=0;i<count;i++) {
		PyObject_CallMethod(ugevent.watchers[i], "stop", NULL);
	}
	uwsgi_log_verbose("main gevent watchers stopped for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);

retry:
	running_cores = 0;
	for(i=0;i<uwsgi.async;i++) {
		if (uwsgi.workers[uwsgi.mywid].cores[i].in_request) {
			struct wsgi_request *wsgi_req = &uwsgi.workers[uwsgi.mywid].cores[i].req;
			if (!rounds) {
				uwsgi_log_verbose("worker %d (pid: %d) core %d is managing \"%.*s %.*s\" for %.*s\n", uwsgi.mywid, uwsgi.mypid, i, 
					wsgi_req->method_len, wsgi_req->method, wsgi_req->uri_len, wsgi_req->uri,
					wsgi_req->remote_addr_len, wsgi_req->remote_addr);
			}
			running_cores++;
		}
	}

	if (running_cores > 0) {
		uwsgi_log_verbose("waiting for %d running requests on worker %d (pid: %d)...\n", running_cores, uwsgi.mywid, uwsgi.mypid);
		PyObject *gevent_sleep_args = PyTuple_New(1);
		PyTuple_SetItem(gevent_sleep_args, 0, PyInt_FromLong(1));
		PyObject *gswitch = python_call(ugevent.greenlet_switch, gevent_sleep_args, 0, NULL);
		Py_DECREF(gswitch);
		Py_DECREF(gevent_sleep_args);
		rounds++;
		goto retry;
	}

	if (!ugevent.wait_for_hub) {
		PyObject_CallMethod(ugevent.ctrl_gl, "kill", NULL);
	}

	Py_INCREF(Py_None);
	return Py_None;
}
예제 #9
0
파일: greenlet.c 프로젝트: CashStar/uwsgi
static void greenlet_schedule_to_req() {

	int id = uwsgi.wsgi_req->async_id;
	uint8_t modifier1 = uwsgi.wsgi_req->uh->modifier1;

        // ensure gil
        UWSGI_GET_GIL

	if (!uwsgi.wsgi_req->suspended) {
		ugl.gl[id] = PyGreenlet_New(ugl.callable, NULL);
		PyObject_SetAttrString((PyObject *)ugl.gl[id], "uwsgi_wsgi_req", PyLong_FromLong((long) uwsgi.wsgi_req));
		uwsgi.wsgi_req->suspended = 1;
	}

	// call it in the main core
        if (is_not_python(modifier1) && uwsgi.p[modifier1]->suspend) {
                uwsgi.p[modifier1]->suspend(NULL);
        }

	PyObject *ret = PyGreenlet_Switch(ugl.gl[id], NULL, NULL);
	if (!ret) {
		PyErr_Print();
		uwsgi_log_verbose("[BUG] unable to switch greenlet !!!\n");
		exit(1);
	}
	Py_DECREF(ret);

	if (is_not_python(modifier1) && uwsgi.p[modifier1]->resume) {
                uwsgi.p[modifier1]->resume(NULL);
        }


}
예제 #10
0
파일: tornado.c 프로젝트: CashStar/uwsgi
PyObject *py_uwsgi_tornado_request(PyObject *self, PyObject *args) {
        int fd = -1;
        PyObject *events = NULL;
        if (!PyArg_ParseTuple(args, "iO:uwsgi_tornado_request", &fd, &events)) {
		uwsgi_log_verbose("[BUG] invalid arguments for tornado callback !!!\n");
		exit(1);
        }

        struct wsgi_request *wsgi_req = find_wsgi_req_proto_by_fd(fd);
	uwsgi.wsgi_req = wsgi_req;

	int status = wsgi_req->socket->proto(wsgi_req);
	if (status > 0) goto again;
	if (PyObject_CallMethod(utornado.ioloop, "remove_handler", "i", fd) == NULL) {
		PyErr_Print();
		goto end;
	}

	if (status == 0) {
		// we call this two time... overengineering :(
		uwsgi.async_proto_fd_table[wsgi_req->fd] = NULL;
		uwsgi.schedule_to_req();
		goto again;
	}

end:
	uwsgi.async_proto_fd_table[wsgi_req->fd] = NULL;
	uwsgi_close_request(uwsgi.wsgi_req);	
	free_req_queue;
again:
	Py_INCREF(Py_None);
	return Py_None;
}
예제 #11
0
파일: master_utils.c 프로젝트: rebx/uwsgi
void master_check_cluster_nodes() {

	int i;

	for (i = 0; i < MAX_CLUSTER_NODES; i++) {
		struct uwsgi_cluster_node *ucn = &uwsgi.shared->nodes[i];

		if (ucn->name[0] != 0 && ucn->type == CLUSTER_NODE_STATIC && ucn->status == UWSGI_NODE_FAILED) {
			// should i retry ?
			if (uwsgi.master_cycles % ucn->errors == 0) {
				if (!uwsgi_ping_node(i, uwsgi.wsgi_req)) {
					ucn->status = UWSGI_NODE_OK;
					uwsgi_log("re-enabled cluster node %d/%s\n", i, ucn->name);
				}
				else {
					ucn->errors++;
				}
			}
		}
		else if (ucn->name[0] != 0 && ucn->type == CLUSTER_NODE_DYNAMIC) {
			// if the last_seen attr is higher than 30 secs ago, mark the node as dead
			if ((uwsgi.current_time - ucn->last_seen) > 30) {
				uwsgi_log_verbose("no presence announce in the last 30 seconds by node %s, i assume it is dead.\n", ucn->name);
				ucn->name[0] = 0;
			}
		}
	}
}
예제 #12
0
void get_linux_tcp_info(int fd) {
	socklen_t tis = sizeof(struct tcp_info);

	if (!getsockopt(fd, IPPROTO_TCP, TCP_INFO, &uwsgi.shared->ti, &tis)) {
		// a check for older linux kernels
		if (!uwsgi.shared->ti.tcpi_sacked) {
			return;
		}

		uwsgi.shared->load = uwsgi.shared->ti.tcpi_unacked;

		uwsgi.shared->options[UWSGI_OPTION_BACKLOG_STATUS] = uwsgi.shared->ti.tcpi_unacked;
		if (uwsgi.vassal_sos_backlog > 0 && uwsgi.has_emperor) {
			if ((int) uwsgi.shared->ti.tcpi_unacked >= uwsgi.vassal_sos_backlog) {
				// ask emperor for help
				char byte = 30;
				if (write(uwsgi.emperor_fd, &byte, 1) != 1) {
					uwsgi_error("write()");
				}
				else {
					uwsgi_log("asking emperor for reinforcements (backlog: %d)...\n", (int) uwsgi.shared->ti.tcpi_unacked);
				}
			}
		}
		if (uwsgi.shared->ti.tcpi_unacked >= uwsgi.shared->ti.tcpi_sacked) {
			uwsgi_log_verbose("*** uWSGI listen queue of socket %d full !!! (%d/%d) ***\n", fd, uwsgi.shared->ti.tcpi_unacked, uwsgi.shared->ti.tcpi_sacked);
			uwsgi.shared->options[UWSGI_OPTION_BACKLOG_ERRORS]++;
		}
	}
}
예제 #13
0
void uwsgi_master_manage_emperor() {
	char byte;
	ssize_t rlen = read(uwsgi.emperor_fd, &byte, 1);
	if (rlen > 0) {
		uwsgi_log_verbose("received message %d from emperor\n", byte);
		// remove me
		if (byte == 0) {
			close(uwsgi.emperor_fd);
			if (!uwsgi.to_hell)
				kill_them_all(0);
		}
		// reload me
		else if (byte == 1) {
			// un-lazy the stack to trigger a real reload
			uwsgi.lazy = 0;
			uwsgi_block_signal(SIGHUP);
			grace_them_all(0);
			uwsgi_unblock_signal(SIGHUP);
		}
	}
	else {
		uwsgi_log("lost connection with my emperor !!!\n");
		close(uwsgi.emperor_fd);
		if (!uwsgi.to_hell)
			kill_them_all(0);
		sleep(2);
		exit(1);
	}

}
예제 #14
0
파일: master_checks.c 프로젝트: alex/uwsgi
int uwsgi_master_check_emperor_death(int diedpid) {
	if (uwsgi.emperor_pid >= 0 && diedpid == uwsgi.emperor_pid) {
		uwsgi_log_verbose("!!! Emperor died !!!\n");
		uwsgi_emperor_start();
		return -1;
	}
	return 0;
}
예제 #15
0
파일: master.c 프로젝트: simbha/uwsgi
void uwsgi_brutally_reload_workers() {
	int i;
	for (i = 1; i <= uwsgi.numproc; i++) {
		if (uwsgi.workers[i].pid > 0) {
			uwsgi_log_verbose("killing worker %d (pid: %d)\n", i, (int) uwsgi.workers[i].pid);
			uwsgi_curse(i, SIGINT);
		}
	}
}
예제 #16
0
파일: gevent.c 프로젝트: jpcollinsiii/uwsgi
PyObject *py_uwsgi_gevent_graceful(PyObject *self, PyObject *args) {

	uwsgi_log("Gracefully killing worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
        uwsgi.workers[uwsgi.mywid].manage_next_request = 0;

	uwsgi_log_verbose("stopping gevent signals watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
	PyObject_CallMethod(ugevent.my_signal_watcher, "stop", NULL);
	PyObject_CallMethod(ugevent.signal_watcher, "stop", NULL);

	uwsgi_log_verbose("stopping gevent sockets watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
	int i,count = uwsgi_count_sockets(uwsgi.sockets);
	for(i=0;i<count;i++) {
		PyObject_CallMethod(ugevent.watchers[i], "stop", NULL);
	}
	uwsgi_log_verbose("main gevent watchers stopped for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);

	int running_cores = 0;
	for(i=0;i<uwsgi.async;i++) {
		if (uwsgi.workers[uwsgi.mywid].cores[i].in_request) {
			struct wsgi_request *wsgi_req = &uwsgi.workers[uwsgi.mywid].cores[i].req;
			uwsgi_log_verbose("worker %d (pid: %d) core %d is managing \"%.*s %.*s\" for %.*s\n", uwsgi.mywid, uwsgi.mypid, i, 
				wsgi_req->method_len, wsgi_req->method, wsgi_req->uri_len, wsgi_req->uri,
				wsgi_req->remote_addr_len, wsgi_req->remote_addr);
			running_cores++;
		}
	}

	if (running_cores > 0) {
		uwsgi_log_verbose("waiting for %d running requests on worker %d (pid: %d)...\n", running_cores, uwsgi.mywid, uwsgi.mypid);
	} else {
            // no need to worry about freeing memory
            PyObject *uwsgi_dict = get_uwsgi_pydict("uwsgi");
            if (uwsgi_dict) {
              PyObject *ae = PyDict_GetItemString(uwsgi_dict, "atexit");
              if (ae) {
                python_call(ae, PyTuple_New(0), 0, NULL);
              }
            }
        }

	Py_INCREF(Py_None);
	return Py_None;
}
예제 #17
0
파일: asyncio.c 프로젝트: Algy/uwsgi
static PyObject *py_uwsgi_asyncio_request(PyObject *self, PyObject *args) {
        long wsgi_req_ptr = 0;
	int timed_out = 0;
        if (!PyArg_ParseTuple(args, "l|i:uwsgi_asyncio_request", &wsgi_req_ptr, &timed_out)) {
		uwsgi_log_verbose("[BUG] invalid arguments for asyncio callback !!!\n");
		exit(1);
        }

        struct wsgi_request *wsgi_req = (struct wsgi_request *) wsgi_req_ptr;
	uwsgi.wsgi_req = wsgi_req;

	PyObject *ob_timeout = (PyObject *) wsgi_req->async_timeout;
	if (PyObject_CallMethod(ob_timeout, "cancel", NULL) == NULL) PyErr_Print();
	Py_DECREF(ob_timeout);
	// avoid mess when closing the request
	wsgi_req->async_timeout = NULL;

	if (timed_out > 0) {
		if (PyObject_CallMethod(uasyncio.loop, "remove_reader", "i", wsgi_req->fd) == NULL) PyErr_Print();
		goto end;
	}

	int status = wsgi_req->socket->proto(wsgi_req);
	if (status > 0) {
		ob_timeout = PyObject_CallMethod(uasyncio.loop, "call_later", "iOli", uwsgi.socket_timeout, uasyncio.request, wsgi_req_ptr, 1);
        	if (!ob_timeout) {
                	if (PyObject_CallMethod(uasyncio.loop, "remove_reader", "i", wsgi_req->fd) == NULL) PyErr_Print();
			goto end;
        	}
                // trick for reference counting
                wsgi_req->async_timeout = (struct uwsgi_rb_timer *) ob_timeout;
		goto again;
	}

	if (PyObject_CallMethod(uasyncio.loop, "remove_reader", "i", wsgi_req->fd) == NULL) {
		PyErr_Print();
		goto end;
	}

	if (status == 0) {
		// we call this two time... overengineering :(
		uwsgi.async_proto_fd_table[wsgi_req->fd] = NULL;
		uwsgi.schedule_to_req();
		goto again;
	}

end:
	uwsgi.async_proto_fd_table[wsgi_req->fd] = NULL;
	uwsgi_close_request(uwsgi.wsgi_req);	
	free_req_queue;
again:
	Py_INCREF(Py_None);
	return Py_None;
}
예제 #18
0
void get_linux_unbit_SIOBKLGQ(int fd) {

	int queue = 0;
	if (ioctl(fd, SIOBKLGQ, &queue) >= 0) {
		uwsgi.shared->load = queue;
		uwsgi.shared->options[UWSGI_OPTION_BACKLOG_STATUS] = queue;
		if (queue >= uwsgi.listen_queue) {
			uwsgi_log_verbose("*** uWSGI listen queue of socket %d full !!! (%d/%d) ***\n", fd, queue, uwsgi.listen_queue);
			uwsgi.shared->options[UWSGI_OPTION_BACKLOG_ERRORS]++;
		}
	}

}
예제 #19
0
파일: async.c 프로젝트: EasyPost/uwsgi
void async_add_timeout(struct wsgi_request *wsgi_req, int timeout) {

	if (uwsgi.async < 1 || !uwsgi.rb_async_timeouts) {
		uwsgi_log_verbose("ASYNC call without async mode !!!\n");
		return;
	}

	wsgi_req->async_ready_fd = 0;

	if (timeout > 0 && wsgi_req->async_timeout == NULL) {
		wsgi_req->async_timeout = uwsgi_add_rb_timer(uwsgi.rb_async_timeouts, uwsgi_now() + timeout, wsgi_req);
	}

}
예제 #20
0
파일: master.c 프로젝트: simbha/uwsgi
void uwsgi_master_check_mercy() {

	int i;

	for (i = 1; i <= uwsgi.numproc; i++) {
		if (uwsgi.workers[i].pid > 0 && uwsgi.workers[i].cursed_at) {
			if (uwsgi_now() > uwsgi.workers[i].no_mercy_at) {
				uwsgi_log_verbose("worker %d (pid: %d) is taking too much time to die...NO MERCY !!!\n", i, uwsgi.workers[i].pid);
				// yes that look strangem but we avoid callign it again if we skip waitpid() call below
				uwsgi_curse(i, SIGKILL);
			}
		}
	}
}
예제 #21
0
파일: gevent.c 프로젝트: RyuaNerin/uwsgi
PyObject *py_uwsgi_gevent_int(PyObject *self, PyObject *args) {

        uwsgi_log("Brutally killing worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
        uwsgi.workers[uwsgi.mywid].manage_next_request = 0;

        uwsgi_log_verbose("stopping gevent signals watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
        PyObject_CallMethod(ugevent.my_signal_watcher, "stop", NULL);
        PyObject_CallMethod(ugevent.signal_watcher, "stop", NULL);

        uwsgi_log_verbose("stopping gevent sockets watchers for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);
        int i,count = uwsgi_count_sockets(uwsgi.sockets);
        for(i=0;i<count;i++) {
                PyObject_CallMethod(ugevent.watchers[i], "stop", NULL);
        }
        uwsgi_log_verbose("main gevent watchers stopped for worker %d (pid: %d)...\n", uwsgi.mywid, uwsgi.mypid);

        if (!ugevent.wait_for_hub) {
                PyObject_CallMethod(ugevent.ctrl_gl, "kill", NULL);
        }

        Py_INCREF(Py_None);
        return Py_None;
}
예제 #22
0
파일: master_checks.c 프로젝트: alex/uwsgi
// check for chain reload
void uwsgi_master_check_chain() {
	if (!uwsgi.status.chain_reloading) return;
	if (uwsgi.status.chain_reloading > uwsgi.numproc) {
		uwsgi.status.chain_reloading = 0;
                uwsgi_log_verbose("chain reloading complete\n");
	}
	int i;
	uwsgi_block_signal(SIGHUP);
	for(i=1;i<=uwsgi.numproc;i++) {
		if (uwsgi.workers[i].pid > 0 && uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].cursed_at == 0 && i == uwsgi.status.chain_reloading) {
			uwsgi_curse(i, SIGHUP);
			break;
		}
	}
	uwsgi_unblock_signal(SIGHUP);
}
예제 #23
0
파일: rados.c 프로젝트: Nikolo/uwsgi
// callback used to asynchronously signal the completion
static void uwsgi_rados_read_async_cb(rados_completion_t comp, void *data) {
	struct uwsgi_rados_cb *urcb = (struct uwsgi_rados_cb *) data;
	struct uwsgi_rados_io *urio = urcb->urio;

	pthread_mutex_lock(&urio->mutex);
	if (urcb->rid != urio->rid) {
		uwsgi_log_verbose("[uwsgi-rados] callback %llu woke up too late\n", (unsigned long long) urcb->rid);
	}
	else {
		// signal the core
		if (write(urio->fds[1], "\1", 1) <= 0) {
			uwsgi_error("uwsgi_rados_read_async_cb()/write()");
		}
	}

	pthread_mutex_unlock(&urio->mutex);

	free(urcb);
}
예제 #24
0
파일: async.c 프로젝트: EasyPost/uwsgi
int async_add_fd_write(struct wsgi_request *wsgi_req, int fd, int timeout) {

	if (uwsgi.async < 1 || !uwsgi.async_waiting_fd_table) {
		uwsgi_log_verbose("ASYNC call without async mode !!!\n");
		return -1;
	}

	struct uwsgi_async_fd *last_uad = NULL, *uad = wsgi_req->waiting_fds;

	if (fd < 0)
		return -1;

	// find last slot
	while (uad) {
		last_uad = uad;
		uad = uad->next;
	}

	uad = uwsgi_malloc(sizeof(struct uwsgi_async_fd));
	uad->fd = fd;
	uad->event = event_queue_write();
	uad->prev = last_uad;
	uad->next = NULL;

	if (last_uad) {
		last_uad->next = uad;
	}
	else {
		wsgi_req->waiting_fds = uad;
	}

	if (timeout > 0) {
		async_add_timeout(wsgi_req, timeout);
	}

	uwsgi.async_waiting_fd_table[fd] = wsgi_req;
	wsgi_req->async_force_again = 1;
	return event_queue_add_fd_write(uwsgi.async_queue, fd);
}
예제 #25
0
파일: master_utils.c 프로젝트: rebx/uwsgi
void uwsgi_manage_command_cron(time_t now) {

	struct tm *uwsgi_cron_delta;

	struct uwsgi_cron *current_cron = uwsgi.crons;
	int uc_minute, uc_hour, uc_day, uc_month, uc_week;

	uwsgi_cron_delta = localtime(&now);


	if (!uwsgi_cron_delta) {
		uwsgi_error("localtime()");
		return;
	}

	// fix month
	uwsgi_cron_delta->tm_mon++;

	while (current_cron) {

		uc_minute = current_cron->minute;
		uc_hour = current_cron->hour;
		uc_day = current_cron->day;
		uc_month = current_cron->month;
		uc_week = current_cron->week;

		// negative values as interval -1 = * , -5 = */5
		if (current_cron->minute < 0) {
			if ((uwsgi_cron_delta->tm_min % abs(current_cron->minute)) == 0) {
				uc_minute = uwsgi_cron_delta->tm_min;
			}
		}
		if (current_cron->hour < 0) {
			if ((uwsgi_cron_delta->tm_hour % abs(current_cron->hour)) == 0) {
				uc_hour = uwsgi_cron_delta->tm_hour;
			}
		}
		if (current_cron->month < 0) {
			if ((uwsgi_cron_delta->tm_hour % abs(current_cron->month)) == 0) {
				uc_month = uwsgi_cron_delta->tm_mon;
			}
		}
		if (current_cron->day < 0) {
			if ((uwsgi_cron_delta->tm_mday % abs(current_cron->day)) == 0) {
				uc_day = uwsgi_cron_delta->tm_mday;
			}
		}
		if (current_cron->week < 0) {
			if ((uwsgi_cron_delta->tm_wday % abs(current_cron->week)) == 0) {
				uc_week = uwsgi_cron_delta->tm_wday;
			}
		}

		int run_task = 0;
		// mday and wday are ORed
		if (current_cron->day >= 0 && current_cron->week >= 0) {
			if (uwsgi_cron_delta->tm_min == uc_minute && uwsgi_cron_delta->tm_hour == uc_hour && uwsgi_cron_delta->tm_mon == uc_month && (uwsgi_cron_delta->tm_mday == uc_day || uwsgi_cron_delta->tm_wday == uc_week)) {
				run_task = 1;
			}
		}
		else {
			if (uwsgi_cron_delta->tm_min == uc_minute && uwsgi_cron_delta->tm_hour == uc_hour && uwsgi_cron_delta->tm_mon == uc_month && uwsgi_cron_delta->tm_mday == uc_day && uwsgi_cron_delta->tm_wday == uc_week) {
				run_task = 1;
			}
		}


		if (run_task == 1) {

			// date match, run command ?
			if (now - current_cron->last_job > 60) {
				//call command
				if (current_cron->command) {
					if (uwsgi_run_command(current_cron->command, NULL, -1) >= 0) {
						uwsgi_log_verbose("[uWSGI-cron] running %s\n", current_cron->command);
					}
				}
				current_cron->last_job = now;
			}
		}



		current_cron = current_cron->next;
	}


}
예제 #26
0
파일: master.c 프로젝트: simbha/uwsgi
int uwsgi_get_tcp_info(int fd) {

#if defined(__linux__) || defined(__FreeBSD__)
	socklen_t tis = sizeof(struct tcp_info);

	if (!getsockopt(fd, IPPROTO_TCP, TCP_INFO, &uwsgi.shared->ti, &tis)) {

		// checks for older kernels
#if defined(__linux__)
		if (!uwsgi.shared->ti.tcpi_sacked) {
#elif defined(__FreeBSD__)
		if (!uwsgi.shared->ti.__tcpi_sacked) {
#endif
			return 0;
		}

#if defined(__linux__)
		uwsgi.shared->load = (uint64_t) uwsgi.shared->ti.tcpi_unacked;
		uwsgi.shared->max_load = (uint64_t) uwsgi.shared->ti.tcpi_sacked;
#elif defined(__FreeBSD__)
		uwsgi.shared->load = (uint64_t) uwsgi.shared->ti.__tcpi_unacked;
		uwsgi.shared->max_load = (uint64_t) uwsgi.shared->ti.__tcpi_sacked;
#endif

		uwsgi.shared->options[UWSGI_OPTION_BACKLOG_STATUS] = uwsgi.shared->load;
		if (uwsgi.vassal_sos_backlog > 0 && uwsgi.has_emperor) {
			if (uwsgi.shared->load >= (uint64_t) uwsgi.vassal_sos_backlog) {
				// ask emperor for help
				char byte = 30;
				if (write(uwsgi.emperor_fd, &byte, 1) != 1) {
					uwsgi_error("write()");
				}
				else {
					uwsgi_log("asking emperor for reinforcements (backlog: %llu)...\n", (unsigned long long) uwsgi.shared->load);
				}
			}
		}
		if (uwsgi.shared->load >= uwsgi.shared->max_load) {
			uwsgi_log_verbose("*** uWSGI listen queue of socket %d full !!! (%llu/%llu) ***\n", fd, (unsigned long long) uwsgi.shared->load, (unsigned long long) uwsgi.shared->max_load);
			uwsgi.shared->options[UWSGI_OPTION_BACKLOG_ERRORS]++;
		}

		return uwsgi.shared->load;
	}
#endif

	return 0;
}


#ifdef __linux__
#include <linux/sockios.h>

#ifdef UNBIT
#define SIOBKLGQ 0x8908
#endif

#ifdef SIOBKLGQ

int get_linux_unbit_SIOBKLGQ(int fd) {

	int queue = 0;
	if (ioctl(fd, SIOBKLGQ, &queue) >= 0) {
		uwsgi.shared->load = queue;
		uwsgi.shared->options[UWSGI_OPTION_BACKLOG_STATUS] = queue;
		if (queue >= uwsgi.listen_queue) {
			uwsgi_log_verbose("*** uWSGI listen queue of socket %d full !!! (%d/%d) ***\n", fd, queue, uwsgi.listen_queue);
			uwsgi.shared->options[UWSGI_OPTION_BACKLOG_ERRORS]++;
		}
		return queue;
	}

	return 0;

}
예제 #27
0
파일: master.c 프로젝트: simbha/uwsgi
int master_loop(char **argv, char **environ) {

	struct timeval last_respawn;
	int last_respawn_rate = 0;

	pid_t diedpid;
	int waitpid_status;

	time_t now = 0;

	int i = 0;
	int rlen;

	int check_interval = 1;

	struct uwsgi_rb_timer *min_timeout;
	struct uwsgi_rbtree *rb_timers = uwsgi_init_rb_timer();

	if (uwsgi.procname_master) {
		uwsgi_set_processname(uwsgi.procname_master);
	}
	else if (uwsgi.procname) {
		uwsgi_set_processname(uwsgi.procname);
	}
	else if (uwsgi.auto_procname) {
		uwsgi_set_processname("uWSGI master");
	}


	uwsgi.current_time = uwsgi_now();

	uwsgi_unix_signal(SIGTSTP, suspend_resume_them_all);
	uwsgi_unix_signal(SIGHUP, grace_them_all);
	if (uwsgi.die_on_term) {
		uwsgi_unix_signal(SIGTERM, kill_them_all);
		uwsgi_unix_signal(SIGQUIT, reap_them_all);
	}
	else {
		uwsgi_unix_signal(SIGTERM, reap_them_all);
		uwsgi_unix_signal(SIGQUIT, kill_them_all);
	}
	uwsgi_unix_signal(SIGINT, kill_them_all);
	uwsgi_unix_signal(SIGUSR1, stats);
	if (uwsgi.auto_snapshot) {
		uwsgi_unix_signal(SIGURG, uwsgi_restore_auto_snapshot);
	}

	atexit(uwsgi_master_cleanup_hooks);

	uwsgi.master_queue = event_queue_init();

	/* route signals to workers... */
#ifdef UWSGI_DEBUG
	uwsgi_log("adding %d to signal poll\n", uwsgi.shared->worker_signal_pipe[0]);
#endif
	event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_signal_pipe[0]);

	if (uwsgi.master_fifo) {
		uwsgi.master_fifo_fd = uwsgi_master_fifo();
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.master_fifo_fd);
	}

	if (uwsgi.spoolers) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->spooler_signal_pipe[0]);
	}

	if (uwsgi.mules_cnt > 0) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->mule_signal_pipe[0]);
	}

	if (uwsgi.log_master) {
		uwsgi.log_master_buf = uwsgi_malloc(uwsgi.log_master_bufsize);
		if (!uwsgi.threaded_logger) {
#ifdef UWSGI_DEBUG
			uwsgi_log("adding %d to master logging\n", uwsgi.shared->worker_log_pipe[0]);
#endif
			event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]);
			if (uwsgi.req_log_master) {
				event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_req_log_pipe[0]);
			}
		}
		else {
			uwsgi_threaded_logger_spawn();
		}

	}

#ifdef UWSGI_SSL
	uwsgi_start_legions();
#endif
	uwsgi_metrics_start_collector();

	uwsgi_add_reload_fds();

	uwsgi_cache_start_sweepers();
	uwsgi_cache_start_sync_servers();

	uwsgi.wsgi_req->buffer = uwsgi.workers[0].cores[0].buffer;

	if (uwsgi.has_emperor) {
		if (uwsgi.emperor_proxy) {
			uwsgi.emperor_fd_proxy = bind_to_unix(uwsgi.emperor_proxy, uwsgi.listen_queue, 0, 0);
			if (uwsgi.emperor_fd_proxy < 0) exit(1);
			if (chmod(uwsgi.emperor_proxy, S_IRUSR|S_IWUSR)) {
				uwsgi_error("[emperor-proxy] chmod()");
				exit(1);
			}
			event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd_proxy);
		}
		else {
			event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd);
		}
	}

	if (uwsgi.zerg_server) {
		uwsgi.zerg_server_fd = bind_to_unix(uwsgi.zerg_server, uwsgi.listen_queue, 0, 0);
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.zerg_server_fd);
		uwsgi_log("*** Zerg server enabled on %s ***\n", uwsgi.zerg_server);
	}

	if (uwsgi.stats) {
		char *tcp_port = strrchr(uwsgi.stats, ':');
		if (tcp_port) {
			// disable deferred accept for this socket
			int current_defer_accept = uwsgi.no_defer_accept;
			uwsgi.no_defer_accept = 1;
			uwsgi.stats_fd = bind_to_tcp(uwsgi.stats, uwsgi.listen_queue, tcp_port);
			uwsgi.no_defer_accept = current_defer_accept;
		}
		else {
			uwsgi.stats_fd = bind_to_unix(uwsgi.stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket);
		}

		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.stats_fd);
		uwsgi_log("*** Stats server enabled on %s fd: %d ***\n", uwsgi.stats, uwsgi.stats_fd);
	}


	if (uwsgi.stats_pusher_instances) {
		if (!uwsgi_thread_new(uwsgi_stats_pusher_loop)) {
			uwsgi_log("!!! unable to spawn stats pusher thread !!!\n");
			exit(1);
		}
	}

	if (uwsgi.udp_socket) {
		uwsgi.udp_fd = bind_to_udp(uwsgi.udp_socket, 0, 0);
		if (uwsgi.udp_fd < 0) {
			uwsgi_log("unable to bind to udp socket. SNMP services will be disabled.\n");
		}
		else {
			uwsgi_log("UDP server enabled.\n");
			event_queue_add_fd_read(uwsgi.master_queue, uwsgi.udp_fd);
		}
	}

	uwsgi.snmp_fd = uwsgi_setup_snmp();

	if (uwsgi.status.is_cheap) {
		uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1);
		for (i = 1; i <= uwsgi.numproc; i++) {
			uwsgi.workers[i].cheaped = 1;
		}
		uwsgi_log("cheap mode enabled: waiting for socket connection...\n");
	}


	// spawn mules
	for (i = 0; i < uwsgi.mules_cnt; i++) {
		size_t mule_patch_size = 0;
		uwsgi.mules[i].patch = uwsgi_string_get_list(&uwsgi.mules_patches, i, &mule_patch_size);
		uwsgi_mule(i + 1);
	}

	// spawn gateways
	for (i = 0; i < ushared->gateways_cnt; i++) {
		if (ushared->gateways[i].pid == 0) {
			gateway_respawn(i);
		}
	}

	// spawn daemons
	uwsgi_daemons_spawn_all();

	// first subscription
	uwsgi_subscribe_all(0, 1);

	// sync the cache store if needed
	uwsgi_cache_sync_all();

	if (uwsgi.queue_store && uwsgi.queue_filesize) {
		if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
			uwsgi_error("msync()");
		}
	}

	// update touches timestamps
	uwsgi_check_touches(uwsgi.touch_reload);
	uwsgi_check_touches(uwsgi.touch_logrotate);
	uwsgi_check_touches(uwsgi.touch_logreopen);
	uwsgi_check_touches(uwsgi.touch_chain_reload);
	uwsgi_check_touches(uwsgi.touch_workers_reload);
	uwsgi_check_touches(uwsgi.touch_gracefully_stop);
	// update exec touches
	struct uwsgi_string_list *usl = uwsgi.touch_exec;
	while (usl) {
		char *space = strchr(usl->value, ' ');
		if (space) {
			*space = 0;
			usl->len = strlen(usl->value);
			usl->custom_ptr = space + 1;
		}
		usl = usl->next;
	}
	uwsgi_check_touches(uwsgi.touch_exec);
	// update signal touches
	usl = uwsgi.touch_signal;
	while (usl) {
		char *space = strchr(usl->value, ' ');
		if (space) {
			*space = 0;
			usl->len = strlen(usl->value);
			usl->custom_ptr = space + 1;
		}
		usl = usl->next;
	}
	uwsgi_check_touches(uwsgi.touch_signal);

	// fsmon
	uwsgi_fsmon_setup();

	// setup cheaper algos (can be stacked)
	uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
	if (uwsgi.requested_cheaper_algo) {
		uwsgi.cheaper_algo = NULL;
		struct uwsgi_cheaper_algo *uca = uwsgi.cheaper_algos;
		while (uca) {
			if (!strcmp(uca->name, uwsgi.requested_cheaper_algo)) {
				uwsgi.cheaper_algo = uca->func;
				break;
			}
			uca = uca->next;
		}

		if (!uwsgi.cheaper_algo) {
			uwsgi_log("unable to find requested cheaper algorithm, falling back to spare\n");
			uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
		}

	}

	// here really starts the master loop

	for (;;) {
		//uwsgi_log("uwsgi.ready_to_reload %d %d\n", uwsgi.ready_to_reload, uwsgi.numproc);

		// run master_cycle hook for every plugin
		for (i = 0; i < uwsgi.gp_cnt; i++) {
			if (uwsgi.gp[i]->master_cycle) {
				uwsgi.gp[i]->master_cycle();
			}
		}
		for (i = 0; i < 256; i++) {
			if (uwsgi.p[i]->master_cycle) {
				uwsgi.p[i]->master_cycle();
			}
		}

		// check for death (before reload !!!)
		uwsgi_master_check_death();
		// check for realod
		if (uwsgi_master_check_reload(argv)) {
			return -1;
		}

		// check chain reload
		uwsgi_master_check_chain();

		// check if some worker is taking too much to die...
		uwsgi_master_check_mercy();

		// check for daemons (smart and dumb)
		uwsgi_daemons_smart_check();


		if (uwsgi.respawn_snapshots) {
			for (i = 1; i <= uwsgi.respawn_snapshots; i++) {
				if (uwsgi_respawn_worker(i))
					return 0;
			}

			uwsgi.respawn_snapshots = 0;
		}

		if (uwsgi.restore_snapshot) {
			uwsgi_master_restore_snapshot();
			continue;
		}

		// cheaper management
		if (uwsgi.cheaper && !uwsgi.status.is_cheap && !uwsgi_instance_is_reloading && !uwsgi_instance_is_dying && !uwsgi.workers[0].suspended) {
			if (!uwsgi_calc_cheaper())
				return 0;
		}


		// check if someone is dead
		diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
		if (diedpid == -1) {
			if (errno == ECHILD) {
				// something did not work as expected, just assume all has been cleared
				uwsgi_master_commit_status();
				diedpid = 0;
			}
			else {
				uwsgi_error("waitpid()");
				/* here is better to reload all the uWSGI stack */
				uwsgi_log("something horrible happened...\n");
				reap_them_all(0);
				exit(1);
			}
		}

		// no one died just run all of the standard master tasks
		if (diedpid == 0) {

			/* all processes ok, doing status scan after N seconds */
			check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
			if (!check_interval)
				check_interval = 1;


			// add unregistered file monitors
			// locking is not needed as monitors can only increase
			for (i = 0; i < ushared->files_monitored_cnt; i++) {
				if (!ushared->files_monitored[i].registered) {
					ushared->files_monitored[i].fd = event_queue_add_file_monitor(uwsgi.master_queue, ushared->files_monitored[i].filename, &ushared->files_monitored[i].id);
					ushared->files_monitored[i].registered = 1;
				}
			}


			// add unregistered timers
			// locking is not needed as timers can only increase
			for (i = 0; i < ushared->timers_cnt; i++) {
				if (!ushared->timers[i].registered) {
					ushared->timers[i].fd = event_queue_add_timer(uwsgi.master_queue, &ushared->timers[i].id, ushared->timers[i].value);
					ushared->timers[i].registered = 1;
				}
			}

			// add unregistered rb_timers
			// locking is not needed as rb_timers can only increase
			for (i = 0; i < ushared->rb_timers_cnt; i++) {
				if (!ushared->rb_timers[i].registered) {
					ushared->rb_timers[i].uwsgi_rb_timer = uwsgi_add_rb_timer(rb_timers, uwsgi_now() + ushared->rb_timers[i].value, &ushared->rb_timers[i]);
					ushared->rb_timers[i].registered = 1;
				}
			}

			int interesting_fd = -1;

			if (ushared->rb_timers_cnt > 0) {
				min_timeout = uwsgi_min_rb_timer(rb_timers, NULL);
				if (min_timeout == NULL) {
					check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
				}
				else {
					check_interval = min_timeout->value - uwsgi_now();
					if (check_interval <= 0) {
						expire_rb_timeouts(rb_timers);
						check_interval = 0;
					}
				}
			}

			// wait for event
			rlen = event_queue_wait(uwsgi.master_queue, check_interval, &interesting_fd);

			if (rlen == 0) {
				if (ushared->rb_timers_cnt > 0) {
					expire_rb_timeouts(rb_timers);
				}
			}


			// check uwsgi-cron table
			if (ushared->cron_cnt) {
				uwsgi_manage_signal_cron(uwsgi_now());
			}

			if (uwsgi.crons) {
				uwsgi_manage_command_cron(uwsgi_now());
			}

			// some event returned
			if (rlen > 0) {
				// if the following function returns -1, a new worker has just spawned
				if (uwsgi_master_manage_events(interesting_fd)) {
					return 0;
				}
			}

			now = uwsgi_now();
			if (now - uwsgi.current_time < 1) {
				continue;
			}
			uwsgi.current_time = now;

			// checking logsize
			if (uwsgi.logfile) {
				uwsgi_check_logrotate();
			}

			// this will be incremented at (more or less) regular intervals
			uwsgi.master_cycles++;

			// recalculate requests counter on race conditions risky configurations
			// a bit of inaccuracy is better than locking;)
			uwsgi_master_fix_request_counters();

			// check for idle
			uwsgi_master_check_idle();

			check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
			if (!check_interval)
				check_interval = 1;


			// get listen_queue status
			struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
			int tmp_queue = 0;
			while (uwsgi_sock) {
				if (uwsgi_sock->family == AF_INET) {
					uwsgi_sock->queue = uwsgi_get_tcp_info(uwsgi_sock->fd);
				}
#ifdef __linux__
#ifdef SIOBKLGQ
				else if (uwsgi_sock->family == AF_UNIX) {
					uwsgi_sock->queue = get_linux_unbit_SIOBKLGQ(uwsgi_sock->fd);
				}
#endif
#endif

				if (uwsgi_sock->queue > tmp_queue) {
					tmp_queue = uwsgi_sock->queue;
				}
				uwsgi_sock = uwsgi_sock->next;
			}
			// fix queue size on multiple sockets
			uwsgi.shared->load = tmp_queue;

			// check if some worker has to die (harakiri, evil checks...)
			uwsgi_master_check_workers_deadline();

			uwsgi_master_check_gateways_deadline();
			uwsgi_master_check_mules_deadline();
			uwsgi_master_check_spoolers_deadline();
			uwsgi_master_check_crons_deadline();
			uwsgi_master_check_mountpoints();

#ifdef __linux__
#ifdef MADV_MERGEABLE
			if (uwsgi.linux_ksm > 0 && (uwsgi.master_cycles % uwsgi.linux_ksm) == 0) {
				uwsgi_linux_ksm_map();
			}
#endif
#endif

			// resubscribe every 10 cycles by default
			if (((uwsgi.subscriptions || uwsgi.subscriptions2) && ((uwsgi.master_cycles % uwsgi.subscribe_freq) == 0 || uwsgi.master_cycles == 1)) && !uwsgi_instance_is_reloading && !uwsgi_instance_is_dying && !uwsgi.workers[0].suspended) {
				uwsgi_subscribe_all(0, 0);
			}

			uwsgi_cache_sync_all();

			if (uwsgi.queue_store && uwsgi.queue_filesize && uwsgi.queue_store_sync && ((uwsgi.master_cycles % uwsgi.queue_store_sync) == 0)) {
				if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
					uwsgi_error("msync()");
				}
			}

			// check touch_reload
			if (!uwsgi_instance_is_reloading && !uwsgi_instance_is_dying) {
				char *touched = uwsgi_check_touches(uwsgi.touch_reload);
				if (touched) {
					uwsgi_log_verbose("*** %s has been touched... grace them all !!! ***\n", touched);
					uwsgi_block_signal(SIGHUP);
					grace_them_all(0);
					uwsgi_unblock_signal(SIGHUP);
					continue;
				}
				touched = uwsgi_check_touches(uwsgi.touch_workers_reload);
				if (touched) {
					uwsgi_log_verbose("*** %s has been touched... workers reload !!! ***\n", touched);
					uwsgi_reload_workers();
					continue;
				}
				touched = uwsgi_check_touches(uwsgi.touch_chain_reload);
				if (touched) {
					if (uwsgi.status.chain_reloading == 0) {
						uwsgi_log_verbose("*** %s has been touched... chain reload !!! ***\n", touched);
						uwsgi.status.chain_reloading = 1;
					}
					else {
						uwsgi_log_verbose("*** %s has been touched... but chain reload is already running ***\n", touched);
					}
				}

				// be sure to run it as the last touch check
				touched = uwsgi_check_touches(uwsgi.touch_exec);
				if (touched) {
					if (uwsgi_run_command(touched, NULL, -1) >= 0) {
						uwsgi_log_verbose("[uwsgi-touch-exec] running %s\n", touched);
					}
				}
				touched = uwsgi_check_touches(uwsgi.touch_signal);
				if (touched) {
					uint8_t signum = atoi(touched);
					uwsgi_route_signal(signum);
					uwsgi_log_verbose("[uwsgi-touch-signal] raising %u\n", signum);
				}
			}

			continue;

		}

		// no one died
		if (diedpid <= 0)
			continue;

		// check for deadlocks first
		uwsgi_deadlock_check(diedpid);

		// reload gateways and daemons only on normal workflow (+outworld status)
		if (!uwsgi_instance_is_reloading && !uwsgi_instance_is_dying) {

			if (uwsgi_master_check_emperor_death(diedpid))
				continue;
			if (uwsgi_master_check_spoolers_death(diedpid))
				continue;
			if (uwsgi_master_check_mules_death(diedpid))
				continue;
			if (uwsgi_master_check_gateways_death(diedpid))
				continue;
			if (uwsgi_master_check_daemons_death(diedpid))
				continue;
			if (uwsgi_master_check_cron_death(diedpid))
				continue;
		}


		/* What happens here ?

		   case 1) the diedpid is not a worker, report it and continue
		   case 2) the diedpid is a worker and we are not in a reload procedure -> reload it
		   case 3) the diedpid is a worker and we are in graceful reload -> uwsgi.ready_to_reload++ and continue
		   case 3) the diedpid is a worker and we are in brutal reload -> uwsgi.ready_to_die++ and continue


		 */

		int thewid = find_worker_id(diedpid);
		if (thewid <= 0) {
			// check spooler, mules, gateways and daemons
			struct uwsgi_spooler *uspool = uwsgi.spoolers;
			while (uspool) {
				if (uspool->pid > 0 && diedpid == uspool->pid) {
					uwsgi_log("spooler (pid: %d) annihilated\n", (int) diedpid);
					goto next;
				}
				uspool = uspool->next;
			}

			for (i = 0; i < uwsgi.mules_cnt; i++) {
				if (uwsgi.mules[i].pid == diedpid) {
					uwsgi_log("mule %d (pid: %d) annihilated\n", i + 1, (int) diedpid);
					goto next;
				}
			}

			for (i = 0; i < ushared->gateways_cnt; i++) {
				if (ushared->gateways[i].pid == diedpid) {
					uwsgi_log("gateway %d (%s, pid: %d) annihilated\n", i + 1, ushared->gateways[i].fullname, (int) diedpid);
					goto next;
				}
			}

			if (uwsgi_daemon_check_pid_death(diedpid))
				goto next;

			if (WIFEXITED(waitpid_status)) {
				uwsgi_log("subprocess %d exited with code %d\n", (int) diedpid, WEXITSTATUS(waitpid_status));
			}
			else if (WIFSIGNALED(waitpid_status)) {
				uwsgi_log("subprocess %d exited by signal %d\n", (int) diedpid, WTERMSIG(waitpid_status));
			}
			else if (WIFSTOPPED(waitpid_status)) {
				uwsgi_log("subprocess %d stopped\n", (int) diedpid);
			}
next:
			continue;
		}


		// ok a worker died...
		uwsgi.workers[thewid].pid = 0;
		// only to be safe :P
		uwsgi.workers[thewid].harakiri = 0;

		// ok, if we are reloading or dying, just continue the master loop
		// as soon as all of the workers have pid == 0, the action (exit, or reload) is triggered
		if (uwsgi_instance_is_reloading || uwsgi_instance_is_dying) {
			if (!uwsgi.workers[thewid].cursed_at)
				uwsgi.workers[thewid].cursed_at = uwsgi_now();
			uwsgi_log("worker %d buried after %d seconds\n", thewid, (int) (uwsgi_now() - uwsgi.workers[thewid].cursed_at));
			uwsgi.workers[thewid].cursed_at = 0;
			continue;
		}

		// if we are stopping workers, just end here

		if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_FAILED_APP_CODE) {
			uwsgi_log("OOPS ! failed loading app in worker %d (pid %d) :( trying again...\n", thewid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_DE_HIJACKED_CODE) {
			uwsgi_log("...restoring worker %d (pid: %d)...\n", thewid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXCEPTION_CODE) {
			uwsgi_log("... monitored exception detected, respawning worker %d (pid: %d)...\n", thewid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_QUIET_CODE) {
			// noop
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_BRUTAL_RELOAD_CODE) {
			uwsgi_log("!!! inconsistent state reported by worker %d (pid: %d) !!!\n", thewid, (int) diedpid);
			reap_them_all(0);
			continue;
		}
		else if (uwsgi.workers[thewid].manage_next_request) {
			if (WIFSIGNALED(waitpid_status)) {
				uwsgi_log("DAMN ! worker %d (pid: %d) died, killed by signal %d :( trying respawn ...\n", thewid, (int) diedpid, (int) WTERMSIG(waitpid_status));
			}
			else {
				uwsgi_log("DAMN ! worker %d (pid: %d) died :( trying respawn ...\n", thewid, (int) diedpid);
			}
		}
		else if (uwsgi.workers[thewid].cursed_at > 0) {
			uwsgi_log("worker %d killed successfully (pid: %d)\n", thewid, (int) diedpid);
		}
		// manage_next_request is zero, but killed by signal...
		else if (WIFSIGNALED(waitpid_status)) {
			uwsgi_log("DAMN ! worker %d (pid: %d) MISTERIOUSLY killed by signal %d :( trying respawn ...\n", thewid, (int) diedpid, (int) WTERMSIG(waitpid_status));
		}

		if (uwsgi.workers[thewid].cheaped == 1) {
			uwsgi_log("uWSGI worker %d cheaped.\n", thewid);
			continue;
		}

		// avoid fork bombing
		gettimeofday(&last_respawn, NULL);
		if (last_respawn.tv_sec <= uwsgi.respawn_delta + check_interval) {
			last_respawn_rate++;
			if (last_respawn_rate > uwsgi.numproc) {
				if (uwsgi.forkbomb_delay > 0) {
					uwsgi_log("worker respawning too fast !!! i have to sleep a bit (%d seconds)...\n", uwsgi.forkbomb_delay);
					/* use --forkbomb-delay 0 to disable sleeping */
					sleep(uwsgi.forkbomb_delay);
				}
				last_respawn_rate = 0;
			}
		}
		else {
			last_respawn_rate = 0;
		}
		gettimeofday(&last_respawn, NULL);
		uwsgi.respawn_delta = last_respawn.tv_sec;

		// are we chain reloading it ?
		if (uwsgi.status.chain_reloading == thewid) {
			uwsgi.status.chain_reloading++;
		}

		// respawn the worker (if needed)
		if (uwsgi_respawn_worker(thewid))
			return 0;

		// end of the loop
	}

	// never here
}
예제 #28
0
파일: async.c 프로젝트: bennypk/uwsgi
// this is called whenever a new connection is ready, but there are no cores to handle it
void uwsgi_async_queue_is_full(time_t now) {
	if (now > uwsgi.async_queue_is_full) {
		uwsgi_log_verbose("[DANGER] async queue is full !!!\n");
		uwsgi.async_queue_is_full = now;
	}
}
예제 #29
0
파일: mule.c 프로젝트: dbhattar/uwsgi
ssize_t uwsgi_mule_get_msg(int manage_signals, int manage_farms, char *message, size_t buffer_size, int timeout) {

	ssize_t len = 0;
	struct pollfd *mulepoll;
	int count = 4;
	int farms_count = 0;
	uint8_t uwsgi_signal;
	int i;

	if (uwsgi.muleid == 0)
		return -1;

	if (manage_signals)
		count = 2;

	if (!manage_farms)
		goto next;

	for (i = 0; i < uwsgi.farms_cnt; i++) {
		if (uwsgi_farm_has_mule(&uwsgi.farms[i], uwsgi.muleid))
			farms_count++;
	}
next:

	if (timeout > -1)
		timeout = timeout * 1000;

	mulepoll = uwsgi_malloc(sizeof(struct pollfd) * (count + farms_count));

	mulepoll[0].fd = uwsgi.mules[uwsgi.muleid - 1].queue_pipe[1];
	mulepoll[0].events = POLLIN;
	mulepoll[1].fd = uwsgi.shared->mule_queue_pipe[1];
	mulepoll[1].events = POLLIN;
	if (count > 2) {
		mulepoll[2].fd = uwsgi.signal_socket;
		mulepoll[2].events = POLLIN;
		mulepoll[3].fd = uwsgi.my_signal_socket;
		mulepoll[3].events = POLLIN;
	}

	if (farms_count > 0) {
		int tmp_cnt = 0;
		for (i = 0; i < uwsgi.farms_cnt; i++) {
			if (uwsgi_farm_has_mule(&uwsgi.farms[i], uwsgi.muleid)) {
				mulepoll[count + tmp_cnt].fd = uwsgi.farms[i].queue_pipe[1];
				mulepoll[count + tmp_cnt].events = POLLIN;
				tmp_cnt++;
			}
		}
	}

	int ret = poll(mulepoll, count + farms_count, timeout);
	if (ret <= 0) {
		uwsgi_error("poll");
	}
	else {
		if (mulepoll[0].revents & POLLIN) {
			len = read(uwsgi.mules[uwsgi.muleid - 1].queue_pipe[1], message, buffer_size);
		}
		else if (mulepoll[1].revents & POLLIN) {
			len = read(uwsgi.shared->mule_queue_pipe[1], message, buffer_size);
		}
		else {
			if (count > 2) {
				int interesting_fd = -1;
				if (mulepoll[2].revents & POLLIN) {
					interesting_fd = mulepoll[2].fd;
				}
				else if (mulepoll[3].revents & POLLIN) {
					interesting_fd = mulepoll[3].fd;
				}

				if (interesting_fd > -1) {
					len = read(interesting_fd, &uwsgi_signal, 1);
					if (len <= 0) {
						uwsgi_log_verbose("uWSGI mule %d braying: my master died, i will follow him...\n", uwsgi.muleid);
						end_me(0);
					}
#ifdef UWSGI_DEBUG
					uwsgi_log_verbose("master sent signal %d to mule %d\n", uwsgi_signal, uwsgi.muleid);
#endif
					if (uwsgi_signal_handler(uwsgi_signal)) {
						uwsgi_log_verbose("error managing signal %d on mule %d\n", uwsgi_signal, uwsgi.mywid);
					}
					// set the error condition
					len = -1;
					goto clear;
				}
			}

			// read messages in the farm
			for (i = 0; i < farms_count; i++) {
				if (mulepoll[count + i].revents & POLLIN) {
					len = read(mulepoll[count + i].fd, message, buffer_size);
					break;
				}
			}
		}
	}

	if (len < 0) {
		uwsgi_error("read()");
		goto clear;
	}

clear:
	free(mulepoll);
	return len;
}
예제 #30
0
int master_loop(char **argv, char **environ) {

	uint64_t tmp_counter;

	struct timeval last_respawn;
	int last_respawn_rate = 0;

	int pid_found = 0;

	pid_t diedpid;
	int waitpid_status;

	uint8_t uwsgi_signal;

	time_t last_request_timecheck = 0, now = 0;
	uint64_t last_request_count = 0;

	pthread_t logger_thread;
	pthread_t cache_sweeper;

#ifdef UWSGI_UDP
	int udp_fd = -1;
#ifdef UWSGI_MULTICAST
	char *cluster_opt_buf = NULL;
	size_t cluster_opt_size = 4;
#endif
#endif



#ifdef UWSGI_SNMP
	int snmp_fd = -1;
#endif
	int i = 0;
	int rlen;

	int check_interval = 1;

	struct uwsgi_rb_timer *min_timeout;
	struct rb_root *rb_timers = uwsgi_init_rb_timer();


	if (uwsgi.procname_master) {
		uwsgi_set_processname(uwsgi.procname_master);
	}
	else if (uwsgi.procname) {
		uwsgi_set_processname(uwsgi.procname);
	}
	else if (uwsgi.auto_procname) {
		uwsgi_set_processname("uWSGI master");
	}


	uwsgi.current_time = uwsgi_now();

	uwsgi_unix_signal(SIGTSTP, suspend_resume_them_all);
	uwsgi_unix_signal(SIGHUP, grace_them_all);
	if (uwsgi.die_on_term) {
		uwsgi_unix_signal(SIGTERM, kill_them_all);
		uwsgi_unix_signal(SIGQUIT, reap_them_all);
	}
	else {
		uwsgi_unix_signal(SIGTERM, reap_them_all);
		uwsgi_unix_signal(SIGQUIT, kill_them_all);
	}
	uwsgi_unix_signal(SIGINT, kill_them_all);
	uwsgi_unix_signal(SIGUSR1, stats);
	if (uwsgi.auto_snapshot) {
		uwsgi_unix_signal(SIGURG, uwsgi_restore_auto_snapshot);
	}

	atexit(uwsgi_master_cleanup_hooks);

	uwsgi.master_queue = event_queue_init();

	/* route signals to workers... */
#ifdef UWSGI_DEBUG
	uwsgi_log("adding %d to signal poll\n", uwsgi.shared->worker_signal_pipe[0]);
#endif
	event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_signal_pipe[0]);

#ifdef UWSGI_SPOOLER
	if (uwsgi.spoolers) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->spooler_signal_pipe[0]);
	}
#endif

	if (uwsgi.mules_cnt > 0) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->mule_signal_pipe[0]);
	}

	if (uwsgi.log_master) {
		uwsgi.log_master_buf = uwsgi_malloc(uwsgi.log_master_bufsize);
		if (!uwsgi.threaded_logger) {
#ifdef UWSGI_DEBUG
			uwsgi_log("adding %d to master logging\n", uwsgi.shared->worker_log_pipe[0]);
#endif
			event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]);
		}
		else {
			if (pthread_create(&logger_thread, NULL, logger_thread_loop, NULL)) {
				uwsgi_error("pthread_create()");
				uwsgi_log("falling back to non-threaded logger...\n");
				event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]);
				uwsgi.threaded_logger = 0;
			}
		}

#ifdef UWSGI_ALARM
		// initialize the alarm subsystem
		uwsgi_alarms_init();
#endif
	}

	if (uwsgi.cache_max_items > 0 && !uwsgi.cache_no_expire) {
		if (pthread_create(&cache_sweeper, NULL, cache_sweeper_loop, NULL)) {
			uwsgi_error("pthread_create()");
			uwsgi_log("unable to run the cache sweeper !!!\n");
		}
		else {
			uwsgi_log("cache sweeper thread enabled\n");
		}
	}


	uwsgi.wsgi_req->buffer = uwsgi.workers[0].cores[0].buffer;

	if (uwsgi.has_emperor) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd);
	}

	if (uwsgi.zerg_server) {
		uwsgi.zerg_server_fd = bind_to_unix(uwsgi.zerg_server, uwsgi.listen_queue, 0, 0);
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.zerg_server_fd);
		uwsgi_log("*** Zerg server enabled on %s ***\n", uwsgi.zerg_server);
	}

	if (uwsgi.stats) {
		char *tcp_port = strchr(uwsgi.stats, ':');
		if (tcp_port) {
			// disable deferred accept for this socket
			int current_defer_accept = uwsgi.no_defer_accept;
			uwsgi.no_defer_accept = 1;
			uwsgi.stats_fd = bind_to_tcp(uwsgi.stats, uwsgi.listen_queue, tcp_port);
			uwsgi.no_defer_accept = current_defer_accept;
		}
		else {
			uwsgi.stats_fd = bind_to_unix(uwsgi.stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket);
		}

		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.stats_fd);
		uwsgi_log("*** Stats server enabled on %s fd: %d ***\n", uwsgi.stats, uwsgi.stats_fd);
	}
#ifdef UWSGI_UDP
	if (uwsgi.udp_socket) {
		udp_fd = bind_to_udp(uwsgi.udp_socket, 0, 0);
		if (udp_fd < 0) {
			uwsgi_log("unable to bind to udp socket. SNMP and cluster management services will be disabled.\n");
		}
		else {
			uwsgi_log("UDP server enabled.\n");
			event_queue_add_fd_read(uwsgi.master_queue, udp_fd);
		}
	}

#ifdef UWSGI_MULTICAST
	if (uwsgi.cluster) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.cluster_fd);
		cluster_opt_buf = uwsgi_setup_clusterbuf(&cluster_opt_size);
	}
#endif
#endif

#ifdef UWSGI_SNMP
	snmp_fd = uwsgi_setup_snmp();
#endif

	if (uwsgi.cheap) {
		uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1);
		for (i = 1; i <= uwsgi.numproc; i++) {
			uwsgi.workers[i].cheaped = 1;
		}
		uwsgi_log("cheap mode enabled: waiting for socket connection...\n");
	}


	// spawn mules
	for (i = 0; i < uwsgi.mules_cnt; i++) {
		size_t mule_patch_size = 0;
		uwsgi.mules[i].patch = uwsgi_string_get_list(&uwsgi.mules_patches, i, &mule_patch_size);
		uwsgi_mule(i + 1);
	}

	// spawn gateways
	for (i = 0; i < ushared->gateways_cnt; i++) {
		if (ushared->gateways[i].pid == 0) {
			gateway_respawn(i);
		}
	}

	// spawn daemons
	uwsgi_daemons_spawn_all();

	// first subscription
	struct uwsgi_string_list *subscriptions = uwsgi.subscriptions;
	while (subscriptions) {
		uwsgi_subscribe(subscriptions->value, 0);
		subscriptions = subscriptions->next;
	}

	// sync the cache store if needed
	if (uwsgi.cache_store && uwsgi.cache_filesize) {
		if (msync(uwsgi.cache_items, uwsgi.cache_filesize, MS_ASYNC)) {
			uwsgi_error("msync()");
		}
	}

	if (uwsgi.queue_store && uwsgi.queue_filesize) {
		if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
			uwsgi_error("msync()");
		}
	}

	// update touches timestamps
	uwsgi_check_touches(uwsgi.touch_reload);
	uwsgi_check_touches(uwsgi.touch_logrotate);
	uwsgi_check_touches(uwsgi.touch_logreopen);

	// setup cheaper algos
	uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
	if (uwsgi.requested_cheaper_algo) {
		uwsgi.cheaper_algo = NULL;
		struct uwsgi_cheaper_algo *uca = uwsgi.cheaper_algos;
		while (uca) {
			if (!strcmp(uca->name, uwsgi.requested_cheaper_algo)) {
				uwsgi.cheaper_algo = uca->func;
				break;
			}
			uca = uca->next;
		}

		if (!uwsgi.cheaper_algo) {
			uwsgi_log("unable to find requested cheaper algorithm, falling back to spare\n");
			uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
		}

	}

	// here really starts the master loop

	for (;;) {
		//uwsgi_log("uwsgi.ready_to_reload %d %d\n", uwsgi.ready_to_reload, uwsgi.numproc);

		// run master_cycle hook for every plugin
		for (i = 0; i < uwsgi.gp_cnt; i++) {
			if (uwsgi.gp[i]->master_cycle) {
				uwsgi.gp[i]->master_cycle();
			}
		}
		for (i = 0; i < 256; i++) {
			if (uwsgi.p[i]->master_cycle) {
				uwsgi.p[i]->master_cycle();
			}
		}

		uwsgi_daemons_smart_check();

		// count the number of active workers
		int active_workers = 0;
                for (i = 1; i <= uwsgi.numproc; i++) {
                        if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) {
                                active_workers++;
                        }
                }


		if (uwsgi.to_outworld) {
			//uwsgi_log("%d/%d\n", uwsgi.lazy_respawned, uwsgi.numproc);
			if (uwsgi.lazy_respawned >= active_workers) {
				uwsgi.to_outworld = 0;
				uwsgi.master_mercy = 0;
				uwsgi.lazy_respawned = 0;
			}
		}


		if (uwsgi_master_check_mercy())
			return 0;

		if (uwsgi.respawn_workers) {
			for (i = 1; i <= uwsgi.respawn_workers; i++) {
				if (uwsgi_respawn_worker(i))
					return 0;
			}

			uwsgi.respawn_workers = 0;
		}

		if (uwsgi.restore_snapshot) {
			uwsgi_master_restore_snapshot();
			continue;
		}

		// cheaper management
		if (uwsgi.cheaper && !uwsgi.cheap && !uwsgi.to_heaven && !uwsgi.to_hell && !uwsgi.to_outworld && !uwsgi.workers[0].suspended) {
			if (!uwsgi_calc_cheaper())
				return 0;
		}

		if ((uwsgi.cheap || uwsgi.ready_to_die >= active_workers) && uwsgi.to_hell) {
			// call a series of waitpid to ensure all processes (gateways, mules and daemons) are dead
			for (i = 0; i < (ushared->gateways_cnt + uwsgi.daemons_cnt + uwsgi.mules_cnt); i++) {
				diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
			}

			uwsgi_log("goodbye to uWSGI.\n");
			exit(0);
		}

		if ((uwsgi.cheap || uwsgi.ready_to_reload >= active_workers) && uwsgi.to_heaven) {
			uwsgi_reload(argv);
			// never here (unless in shared library mode)
			return -1;
		}

		diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
		if (diedpid == -1) {
			if (errno == ECHILD) {
				// something did not work as expected, just assume all has been cleared
				if (uwsgi.to_heaven) {
					uwsgi.ready_to_reload = uwsgi.numproc;
					continue;
				}
				else if (uwsgi.to_hell) {
					uwsgi.ready_to_die = uwsgi.numproc;
					continue;
				}
				else if (uwsgi.to_outworld) {
					uwsgi.lazy_respawned = uwsgi.numproc;
					uwsgi_log("*** no workers to reload found ***\n");
					continue;
				}
				diedpid = 0;
			}
			else {
				uwsgi_error("waitpid()");
				/* here is better to reload all the uWSGI stack */
				uwsgi_log("something horrible happened...\n");
				reap_them_all(0);
				exit(1);
			}
		}

		if (diedpid == 0) {

			/* all processes ok, doing status scan after N seconds */
			check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
			if (!check_interval)
				check_interval = 1;


			// add unregistered file monitors
			// locking is not needed as monitors can only increase
			for (i = 0; i < ushared->files_monitored_cnt; i++) {
				if (!ushared->files_monitored[i].registered) {
					ushared->files_monitored[i].fd = event_queue_add_file_monitor(uwsgi.master_queue, ushared->files_monitored[i].filename, &ushared->files_monitored[i].id);
					ushared->files_monitored[i].registered = 1;
				}
			}


			// add unregistered timers
			// locking is not needed as timers can only increase
			for (i = 0; i < ushared->timers_cnt; i++) {
				if (!ushared->timers[i].registered) {
					ushared->timers[i].fd = event_queue_add_timer(uwsgi.master_queue, &ushared->timers[i].id, ushared->timers[i].value);
					ushared->timers[i].registered = 1;
				}
			}

			// add unregistered rb_timers
			// locking is not needed as rb_timers can only increase
			for (i = 0; i < ushared->rb_timers_cnt; i++) {
				if (!ushared->rb_timers[i].registered) {
					ushared->rb_timers[i].uwsgi_rb_timer = uwsgi_add_rb_timer(rb_timers, uwsgi_now() + ushared->rb_timers[i].value, &ushared->rb_timers[i]);
					ushared->rb_timers[i].registered = 1;
				}
			}

			int interesting_fd = -1;

			if (ushared->rb_timers_cnt > 0) {
				min_timeout = uwsgi_min_rb_timer(rb_timers);
				if (min_timeout == NULL) {
					check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
				}
				else {
					check_interval = min_timeout->key - uwsgi_now();
					if (check_interval <= 0) {
						expire_rb_timeouts(rb_timers);
						check_interval = 0;
					}
				}
			}

			// wait for event
			rlen = event_queue_wait(uwsgi.master_queue, check_interval, &interesting_fd);

			if (rlen == 0) {
				if (ushared->rb_timers_cnt > 0) {
					expire_rb_timeouts(rb_timers);
				}
			}


			// check uwsgi-cron table
			if (ushared->cron_cnt) {
				uwsgi_manage_signal_cron(uwsgi_now());
			}

			if (uwsgi.crons) {
				uwsgi_manage_command_cron(uwsgi_now());
			}


			// check for probes
			if (ushared->probes_cnt > 0) {
				uwsgi_lock(uwsgi.probe_table_lock);
				for (i = 0; i < ushared->probes_cnt; i++) {
					if (interesting_fd == -1) {
						// increment cycles
						ushared->probes[i].cycles++;
					}
					if (ushared->probes[i].func(interesting_fd, &ushared->probes[i])) {
						uwsgi_route_signal(ushared->probes[i].sig);
					}
				}
				uwsgi_unlock(uwsgi.probe_table_lock);
			}

			if (rlen > 0) {

				if (uwsgi.log_master && !uwsgi.threaded_logger) {
					if (interesting_fd == uwsgi.shared->worker_log_pipe[0]) {
						uwsgi_master_log();
						goto health_cycle;
					}
				}

				if (uwsgi.stats && uwsgi.stats_fd > -1) {
					if (interesting_fd == uwsgi.stats_fd) {
						uwsgi_send_stats(uwsgi.stats_fd);
						goto health_cycle;
					}
				}

				if (uwsgi.zerg_server) {
					if (interesting_fd == uwsgi.zerg_server_fd) {
						uwsgi_manage_zerg(uwsgi.zerg_server_fd, 0, NULL);
						goto health_cycle;
					}
				}

				if (uwsgi.has_emperor) {
					if (interesting_fd == uwsgi.emperor_fd) {
						uwsgi_master_manage_emperor();
						goto health_cycle;
					}
				}


				if (uwsgi.cheap) {
					int found = 0;
					struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
					while (uwsgi_sock) {
						if (interesting_fd == uwsgi_sock->fd) {
							found = 1;
							uwsgi.cheap = 0;
							uwsgi_del_sockets_from_queue(uwsgi.master_queue);
							int needed = uwsgi.numproc;
							if (uwsgi.cheaper) {
								needed = uwsgi.cheaper_count;
							}
							for (i = 1; i <= needed; i++) {
								if (uwsgi_respawn_worker(i))
									return 0;
							}
							break;
						}
						uwsgi_sock = uwsgi_sock->next;
					}
					// here is better to continue instead going to health_cycle
					if (found)
						continue;
				}
#ifdef UWSGI_SNMP
				if (uwsgi.snmp_addr && interesting_fd == snmp_fd) {
					uwsgi_master_manage_snmp(snmp_fd);
					goto health_cycle;
				}
#endif

#ifdef UWSGI_UDP
				if (uwsgi.udp_socket && interesting_fd == udp_fd) {
					uwsgi_master_manage_udp(udp_fd);
					goto health_cycle;
				}

#ifdef UWSGI_MULTICAST
				if (interesting_fd == uwsgi.cluster_fd) {

					if (uwsgi_get_dgram(uwsgi.cluster_fd, &uwsgi.workers[0].cores[0].req)) {
						goto health_cycle;
					}

					manage_cluster_message(cluster_opt_buf, cluster_opt_size);

					goto health_cycle;
				}
#endif

#endif


				int next_iteration = 0;

				uwsgi_lock(uwsgi.fmon_table_lock);
				for (i = 0; i < ushared->files_monitored_cnt; i++) {
					if (ushared->files_monitored[i].registered) {
						if (interesting_fd == ushared->files_monitored[i].fd) {
							struct uwsgi_fmon *uf = event_queue_ack_file_monitor(uwsgi.master_queue, interesting_fd);
							// now call the file_monitor handler
							if (uf)
								uwsgi_route_signal(uf->sig);
							break;
						}
					}
				}

				uwsgi_unlock(uwsgi.fmon_table_lock);
				if (next_iteration)
					goto health_cycle;;

				next_iteration = 0;

				uwsgi_lock(uwsgi.timer_table_lock);
				for (i = 0; i < ushared->timers_cnt; i++) {
					if (ushared->timers[i].registered) {
						if (interesting_fd == ushared->timers[i].fd) {
							struct uwsgi_timer *ut = event_queue_ack_timer(interesting_fd);
							// now call the file_monitor handler
							if (ut)
								uwsgi_route_signal(ut->sig);
							break;
						}
					}
				}
				uwsgi_unlock(uwsgi.timer_table_lock);
				if (next_iteration)
					goto health_cycle;;


				// check for worker signal
				if (interesting_fd == uwsgi.shared->worker_signal_pipe[0]) {
					rlen = read(interesting_fd, &uwsgi_signal, 1);
					if (rlen < 0) {
						uwsgi_error("read()");
					}
					else if (rlen > 0) {
#ifdef UWSGI_DEBUG
						uwsgi_log_verbose("received uwsgi signal %d from a worker\n", uwsgi_signal);
#endif
						uwsgi_route_signal(uwsgi_signal);
					}
					else {
						uwsgi_log_verbose("lost connection with worker %d\n", i);
						close(interesting_fd);
					}
					goto health_cycle;
				}

#ifdef UWSGI_SPOOLER
				// check for spooler signal
				if (uwsgi.spoolers) {
					if (interesting_fd == uwsgi.shared->spooler_signal_pipe[0]) {
						rlen = read(interesting_fd, &uwsgi_signal, 1);
						if (rlen < 0) {
							uwsgi_error("read()");
						}
						else if (rlen > 0) {
#ifdef UWSGI_DEBUG
							uwsgi_log_verbose("received uwsgi signal %d from a spooler\n", uwsgi_signal);
#endif
							uwsgi_route_signal(uwsgi_signal);
						}
						else {
							uwsgi_log_verbose("lost connection with the spooler\n");
							close(interesting_fd);
						}
						goto health_cycle;
					}

				}
#endif

				// check for mules signal
				if (uwsgi.mules_cnt > 0) {
					if (interesting_fd == uwsgi.shared->mule_signal_pipe[0]) {
						rlen = read(interesting_fd, &uwsgi_signal, 1);
						if (rlen < 0) {
							uwsgi_error("read()");
						}
						else if (rlen > 0) {
#ifdef UWSGI_DEBUG
							uwsgi_log_verbose("received uwsgi signal %d from a mule\n", uwsgi_signal);
#endif
							uwsgi_route_signal(uwsgi_signal);
						}
						else {
							uwsgi_log_verbose("lost connection with a mule\n");
							close(interesting_fd);
						}
						goto health_cycle;
					}

				}


			}

health_cycle:
			now = uwsgi_now();
			if (now - uwsgi.current_time < 1) {
				continue;
			}
			uwsgi.current_time = now;

			// checking logsize
			if (uwsgi.logfile) {
				uwsgi_check_logrotate();
			}

			// this will be incremented at (more or less) regular intervals
			uwsgi.master_cycles++;

			// recalculate requests counter on race conditions risky configurations
			// a bit of inaccuracy is better than locking;)

			if (uwsgi.numproc > 1) {
				tmp_counter = 0;
				for (i = 1; i < uwsgi.numproc + 1; i++)
					tmp_counter += uwsgi.workers[i].requests;
				uwsgi.workers[0].requests = tmp_counter;
			}

			if (uwsgi.idle > 0 && !uwsgi.cheap) {
				uwsgi.current_time = uwsgi_now();
				if (!last_request_timecheck)
					last_request_timecheck = uwsgi.current_time;

				int busy_workers = 0;
				for (i = 1; i <= uwsgi.numproc; i++) {
                                        if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) {
                                                if (uwsgi.workers[i].busy == 1) {
                                                	busy_workers = 1;
                                                	break;
                                        	}
                                	}
				}

				if (last_request_count != uwsgi.workers[0].requests) {
					last_request_timecheck = uwsgi.current_time;
					last_request_count = uwsgi.workers[0].requests;
				}
				// a bit of over-engeneering to avoid clock skews
				else if (last_request_timecheck < uwsgi.current_time && (uwsgi.current_time - last_request_timecheck > uwsgi.idle) && !busy_workers) {
					uwsgi_log("workers have been inactive for more than %d seconds (%llu-%llu)\n", uwsgi.idle, (unsigned long long) uwsgi.current_time, (unsigned long long) last_request_timecheck);
					uwsgi.cheap = 1;
					if (uwsgi.die_on_idle) {
						if (uwsgi.has_emperor) {
							char byte = 22;
							if (write(uwsgi.emperor_fd, &byte, 1) != 1) {
								uwsgi_error("write()");
								kill_them_all(0);
							}
						}
						else {
							kill_them_all(0);
						}
						continue;
					}
					for (i = 1; i <= uwsgi.numproc; i++) {
						uwsgi.workers[i].cheaped = 1;
						if (uwsgi.workers[i].pid == 0)
							continue;
						kill(uwsgi.workers[i].pid, SIGKILL);
						if (waitpid(uwsgi.workers[i].pid, &waitpid_status, 0) < 0) {
							if (errno != ECHILD)
								uwsgi_error("waitpid()");
						}
					}
					uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1);
					uwsgi_log("cheap mode enabled: waiting for socket connection...\n");
					last_request_timecheck = 0;
					continue;
				}
			}

			check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
			if (!check_interval)
				check_interval = 1;


#ifdef __linux__
			// get listen_queue status
			struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
			while (uwsgi_sock) {
				if (uwsgi_sock->family == AF_INET) {
					get_linux_tcp_info(uwsgi_sock->fd);
				}
#ifdef SIOBKLGQ
				else if (uwsgi_sock->family == AF_UNIX) {
					get_linux_unbit_SIOBKLGQ(uwsgi_sock->fd);
				}
#endif
				uwsgi_sock = uwsgi_sock->next;
			}
#endif

			for (i = 1; i <= uwsgi.numproc; i++) {
				/* first check for harakiri */
				if (uwsgi.workers[i].harakiri > 0) {
					if (uwsgi.workers[i].harakiri < (time_t) uwsgi.current_time) {
						trigger_harakiri(i);
					}
				}
				/* then user-defined harakiri */
				if (uwsgi.workers[i].user_harakiri > 0) {
					if (uwsgi.workers[i].user_harakiri < (time_t) uwsgi.current_time) {
						trigger_harakiri(i);
					}
				}
				// then for evil memory checkers
				if (uwsgi.evil_reload_on_as) {
					if ((rlim_t) uwsgi.workers[i].vsz_size >= uwsgi.evil_reload_on_as) {
						uwsgi_log("*** EVIL RELOAD ON WORKER %d ADDRESS SPACE: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].vsz_size, uwsgi.workers[i].pid);
						kill(uwsgi.workers[i].pid, SIGKILL);
						uwsgi.workers[i].vsz_size = 0;
					}
				}
				if (uwsgi.evil_reload_on_rss) {
					if ((rlim_t) uwsgi.workers[i].rss_size >= uwsgi.evil_reload_on_rss) {
						uwsgi_log("*** EVIL RELOAD ON WORKER %d RSS: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].rss_size, uwsgi.workers[i].pid);
						kill(uwsgi.workers[i].pid, SIGKILL);
						uwsgi.workers[i].rss_size = 0;
					}
				}

				// need to find a better way
				//uwsgi.workers[i].last_running_time = uwsgi.workers[i].running_time;
			}

			for (i = 0; i < ushared->gateways_cnt; i++) {
				if (ushared->gateways_harakiri[i] > 0) {
					if (ushared->gateways_harakiri[i] < (time_t) uwsgi.current_time) {
						if (ushared->gateways[i].pid > 0) {
							kill(ushared->gateways[i].pid, SIGKILL);
						}
						ushared->gateways_harakiri[i] = 0;
					}
				}
			}

			for (i = 0; i < uwsgi.mules_cnt; i++) {
				if (uwsgi.mules[i].harakiri > 0) {
					if (uwsgi.mules[i].harakiri < (time_t) uwsgi.current_time) {
						uwsgi_log("*** HARAKIRI ON MULE %d HANDLING SIGNAL %d (pid: %d) ***\n", i + 1, uwsgi.mules[i].signum, uwsgi.mules[i].pid);
						kill(uwsgi.mules[i].pid, SIGKILL);
						uwsgi.mules[i].harakiri = 0;
					}
				}
			}
#ifdef UWSGI_SPOOLER
			struct uwsgi_spooler *uspool = uwsgi.spoolers;
			while (uspool) {
				if (uspool->harakiri > 0 && uspool->harakiri < (time_t) uwsgi.current_time) {
					uwsgi_log("*** HARAKIRI ON THE SPOOLER (pid: %d) ***\n", uspool->pid);
					kill(uspool->pid, SIGKILL);
					uspool->harakiri = 0;
				}
				uspool = uspool->next;
			}
#endif

#ifdef __linux__
#ifdef MADV_MERGEABLE
			if (uwsgi.linux_ksm > 0 && (uwsgi.master_cycles % uwsgi.linux_ksm) == 0) {
				uwsgi_linux_ksm_map();
			}
#endif
#endif

#ifdef UWSGI_UDP
			// check for cluster nodes
			master_check_cluster_nodes();

			// reannounce myself every 10 cycles
			if (uwsgi.cluster && uwsgi.cluster_fd >= 0 && !uwsgi.cluster_nodes && (uwsgi.master_cycles % 10) == 0) {
				uwsgi_cluster_add_me();
			}

			// resubscribe every 10 cycles by default
			if ((uwsgi.subscriptions && ((uwsgi.master_cycles % uwsgi.subscribe_freq) == 0 || uwsgi.master_cycles == 1)) && !uwsgi.to_heaven && !uwsgi.to_hell && !uwsgi.workers[0].suspended) {
				struct uwsgi_string_list *subscriptions = uwsgi.subscriptions;
				while (subscriptions) {
					uwsgi_subscribe(subscriptions->value, 0);
					subscriptions = subscriptions->next;
				}
			}

#endif

			if (uwsgi.cache_store && uwsgi.cache_filesize && uwsgi.cache_store_sync && ((uwsgi.master_cycles % uwsgi.cache_store_sync) == 0)) {
				if (msync(uwsgi.cache_items, uwsgi.cache_filesize, MS_ASYNC)) {
					uwsgi_error("msync()");
				}
			}

			if (uwsgi.queue_store && uwsgi.queue_filesize && uwsgi.queue_store_sync && ((uwsgi.master_cycles % uwsgi.queue_store_sync) == 0)) {
				if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
					uwsgi_error("msync()");
				}
			}

			// check touch_reload
			if (!uwsgi.to_heaven && !uwsgi.to_hell) {
				char *touched = uwsgi_check_touches(uwsgi.touch_reload);
				if (touched) {
					uwsgi_log("*** %s has been touched... grace them all !!! ***\n", touched);
					uwsgi_block_signal(SIGHUP);
					grace_them_all(0);
					uwsgi_unblock_signal(SIGHUP);
				}
			}

			continue;

		}

		// no one died
		if (diedpid <= 0)
			continue;

		// check for deadlocks first
		uwsgi_deadlock_check(diedpid);

		// reload gateways and daemons only on normal workflow (+outworld status)
		if (!uwsgi.to_heaven && !uwsgi.to_hell) {

#ifdef UWSGI_SPOOLER
			/* reload the spooler */
			struct uwsgi_spooler *uspool = uwsgi.spoolers;
			pid_found = 0;
			while (uspool) {
				if (uspool->pid > 0 && diedpid == uspool->pid) {
					uwsgi_log("OOOPS the spooler is no more...trying respawn...\n");
					uspool->respawned++;
					uspool->pid = spooler_start(uspool);
					pid_found = 1;
					break;
				}
				uspool = uspool->next;
			}

			if (pid_found)
				continue;
#endif

			pid_found = 0;
			for (i = 0; i < uwsgi.mules_cnt; i++) {
				if (uwsgi.mules[i].pid == diedpid) {
					uwsgi_log("OOOPS mule %d (pid: %d) crippled...trying respawn...\n", i + 1, uwsgi.mules[i].pid);
					uwsgi_mule(i + 1);
					pid_found = 1;
					break;
				}
			}

			if (pid_found)
				continue;


			/* reload the gateways */
			pid_found = 0;
			for (i = 0; i < ushared->gateways_cnt; i++) {
				if (ushared->gateways[i].pid == diedpid) {
					gateway_respawn(i);
					pid_found = 1;
					break;
				}
			}

			if (pid_found)
				continue;

			/* reload the daemons */
			pid_found = uwsgi_daemon_check_pid_reload(diedpid);

			if (pid_found)
				continue;

		}


		/* What happens here ?

		   case 1) the diedpid is not a worker, report it and continue
		   case 2) the diedpid is a worker and we are not in a reload procedure -> reload it
		   case 3) the diedpid is a worker and we are in graceful reload -> uwsgi.ready_to_reload++ and continue
		   case 3) the diedpid is a worker and we are in brutal reload -> uwsgi.ready_to_die++ and continue


		 */

		uwsgi.mywid = find_worker_id(diedpid);
		if (uwsgi.mywid <= 0) {
			// check spooler, mules, gateways and daemons
#ifdef UWSGI_SPOOLER
			struct uwsgi_spooler *uspool = uwsgi.spoolers;
			while (uspool) {
				if (uspool->pid > 0 && diedpid == uspool->pid) {
					uwsgi_log("spooler (pid: %d) annihilated\n", (int) diedpid);
					goto next;
				}
				uspool = uspool->next;
			}
#endif

			for (i = 0; i < uwsgi.mules_cnt; i++) {
				if (uwsgi.mules[i].pid == diedpid) {
					uwsgi_log("mule %d (pid: %d) annihilated\n", i + 1, (int) diedpid);
					goto next;
				}
			}

			for (i = 0; i < ushared->gateways_cnt; i++) {
				if (ushared->gateways[i].pid == diedpid) {
					uwsgi_log("gateway %d (%s, pid: %d) annihilated\n", i + 1, ushared->gateways[i].fullname, (int) diedpid);
					goto next;
				}
			}

			if (uwsgi_daemon_check_pid_death(diedpid)) goto next;

			if (WIFEXITED(waitpid_status)) {
				uwsgi_log("subprocess %d exited with code %d\n", (int) diedpid, WEXITSTATUS(waitpid_status));
			}
			else if (WIFSIGNALED(waitpid_status)) {
				uwsgi_log("subprocess %d exited by signal %d\n", (int) diedpid, WTERMSIG(waitpid_status));
			}
			else if (WIFSTOPPED(waitpid_status)) {
				uwsgi_log("subprocess %d stopped\n", (int) diedpid);
			}
next:
			continue;
		}


		// ok a worker died...
		if (uwsgi.to_heaven) {
			uwsgi.ready_to_reload++;
			uwsgi.workers[uwsgi.mywid].pid = 0;
			// only to be safe :P
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
			continue;
		}
		else if (uwsgi.to_hell) {
			uwsgi.ready_to_die++;
			uwsgi.workers[uwsgi.mywid].pid = 0;
			// only to be safe :P
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
			continue;
		}
		else if (uwsgi.to_outworld) {
			uwsgi.lazy_respawned++;
			uwsgi.workers[uwsgi.mywid].destroy = 0;
			uwsgi.workers[uwsgi.mywid].pid = 0;
			// only to be safe :P
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
		}

		if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_FAILED_APP_CODE) {
			uwsgi_log("OOPS ! failed loading app in worker %d (pid %d) :( trying again...\n", uwsgi.mywid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_DE_HIJACKED_CODE) {
			uwsgi_log("...restoring worker %d (pid: %d)...\n", uwsgi.mywid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXCEPTION_CODE) {
			uwsgi_log("... monitored exception detected, respawning worker %d (pid: %d)...\n", uwsgi.mywid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_QUIET_CODE) {
			// noop
		}
		else if (uwsgi.workers[uwsgi.mywid].manage_next_request) {
			if (WIFSIGNALED(waitpid_status)) {
				uwsgi_log("DAMN ! worker %d (pid: %d) died, killed by signal %d :( trying respawn ...\n", uwsgi.mywid, (int) diedpid, (int) WTERMSIG(waitpid_status));
			}
			else {
				uwsgi_log("DAMN ! worker %d (pid: %d) died :( trying respawn ...\n", uwsgi.mywid, (int) diedpid);
			}
		}
		else {
                	uwsgi_log("DAMN ! worker %d (pid: %d) MISTERIOUSLY died :( trying respawn ...\n", uwsgi.mywid, (int) diedpid);
		}

		if (uwsgi.workers[uwsgi.mywid].cheaped == 1) {
			uwsgi.workers[uwsgi.mywid].pid = 0;
			uwsgi_log("uWSGI worker %d cheaped.\n", uwsgi.mywid);
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
			continue;
		}
		gettimeofday(&last_respawn, NULL);
		if (last_respawn.tv_sec <= uwsgi.respawn_delta + check_interval) {
			last_respawn_rate++;
			if (last_respawn_rate > uwsgi.numproc) {
				if (uwsgi.forkbomb_delay > 0) {
					uwsgi_log("worker respawning too fast !!! i have to sleep a bit (%d seconds)...\n", uwsgi.forkbomb_delay);
					/* use --forkbomb-delay 0 to disable sleeping */
					sleep(uwsgi.forkbomb_delay);
				}
				last_respawn_rate = 0;
			}
		}
		else {
			last_respawn_rate = 0;
		}
		gettimeofday(&last_respawn, NULL);
		uwsgi.respawn_delta = last_respawn.tv_sec;

		if (uwsgi_respawn_worker(uwsgi.mywid))
			return 0;

		// end of the loop
	}

	// never here
}