Esempio n. 1
0
void uwsgi_mule_handler() {

	ssize_t len;
	uint8_t uwsgi_signal;
	int rlen;
	int interesting_fd;

	// this must be configurable
	char message[65536];

	int mule_queue = event_queue_init();

	event_queue_add_fd_read(mule_queue, uwsgi.signal_socket);
	event_queue_add_fd_read(mule_queue, uwsgi.my_signal_socket);
	event_queue_add_fd_read(mule_queue, uwsgi.mules[uwsgi.muleid - 1].queue_pipe[1]);
	event_queue_add_fd_read(mule_queue, uwsgi.shared->mule_queue_pipe[1]);

	uwsgi_mule_add_farm_to_queue(mule_queue);

	for (;;) {
		rlen = event_queue_wait(mule_queue, -1, &interesting_fd);
		if (rlen <= 0) {
			continue;
		}

		if (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket || farm_has_signaled(interesting_fd)) {
			len = read(interesting_fd, &uwsgi_signal, 1);
			if (len <= 0) {
				uwsgi_log_verbose("uWSGI mule %d braying: my master died, i will follow him...\n", uwsgi.muleid);
				end_me(0);
			}
#ifdef UWSGI_DEBUG
			uwsgi_log_verbose("master sent signal %d to mule %d\n", uwsgi_signal, uwsgi.muleid);
#endif
			if (uwsgi_signal_handler(uwsgi_signal)) {
				uwsgi_log_verbose("error managing signal %d on mule %d\n", uwsgi_signal, uwsgi.muleid);
			}
		}
		else if (interesting_fd == uwsgi.mules[uwsgi.muleid - 1].queue_pipe[1] || interesting_fd == uwsgi.shared->mule_queue_pipe[1] || farm_has_msg(interesting_fd)) {
			len = read(interesting_fd, message, 65536);
			if (len < 0) {
				uwsgi_error("read()");
			}
			else {
				int i, found = 0;
				for (i = 0; i < 256; i++) {
					if (uwsgi.p[i]->mule_msg) {
						if (uwsgi.p[i]->mule_msg(message, len)) {
							found = 1;
							break;
						}
					}
				}
				if (!found)
					uwsgi_log("*** mule %d received a %ld bytes message ***\n", uwsgi.muleid, (long) len);
			}
		}
	}

}
Esempio n. 2
0
void uwsgi_corerouter_manage_subscription(struct uwsgi_corerouter *ucr, int id, struct uwsgi_gateway_socket *ugs) {

	int i;
	struct uwsgi_subscribe_req usr;
	char bbuf[4096];

	ssize_t len = recv(ugs->fd, bbuf, 4096, 0);
#ifdef UWSGI_EVENT_USE_PORT
	event_queue_add_fd_read(ucr->queue, ugs->fd);
#endif
	if (len > 0) {
		memset(&usr, 0, sizeof(struct uwsgi_subscribe_req));
		uwsgi_hooked_parse(bbuf + 4, len - 4, corerouter_manage_subscription, &usr);

		// subscribe request ?
		if (bbuf[3] == 0) {
			if (uwsgi_add_subscribe_node(&ucr->subscriptions, &usr, ucr->subscription_regexp) && ucr->i_am_cheap) {
				struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets;
				while (ugs) {
					if (!strcmp(ugs->owner, ucr->name) && !ugs->subscription) {
						event_queue_add_fd_read(ucr->queue, ugs->fd);
					}
					ugs = ugs->next;
				}
				ucr->i_am_cheap = 0;
				uwsgi_log("[%s pid %d] leaving cheap mode...\n", ucr->name, (int) uwsgi.mypid);
			}
		}
		//unsubscribe 
		else {
			struct uwsgi_subscribe_node *node = uwsgi_get_subscribe_node_by_name(&ucr->subscriptions, usr.key, usr.keylen, usr.address, usr.address_len, ucr->subscription_regexp);
			if (node && node->len) {
				if (node->death_mark == 0)
					uwsgi_log("[%s pid %d] %.*s => marking %.*s as failed\n", ucr->name, (int) uwsgi.mypid, (int) usr.keylen, usr.key, (int) usr.address_len, usr.address);
				node->failcnt++;
				node->death_mark = 1;
				// check if i can remove the node
				if (node->reference == 0) {
					uwsgi_remove_subscribe_node(&ucr->subscriptions, node);
				}
				if (ucr->subscriptions == NULL && ucr->cheap && !ucr->i_am_cheap) {
					uwsgi_gateway_go_cheap(ucr->name, ucr->queue, &ucr->i_am_cheap);
				}
			}
		}

		// propagate the subscription to other nodes
		for (i = 0; i < ushared->gateways_cnt; i++) {
			if (i == id)
				continue;
			if (!strcmp(ushared->gateways[i].name, ucr->name)) {
				if (send(ushared->gateways[i].internal_subscription_pipe[0], bbuf, len, 0) != len) {
					uwsgi_error("send()");
				}
			}
		}
	}

}
Esempio n. 3
0
void uwsgi_mule_add_farm_to_queue(int queue) {

	int i;
	for (i = 0; i < uwsgi.farms_cnt; i++) {
		if (uwsgi_farm_has_mule(&uwsgi.farms[i], uwsgi.muleid)) {
			event_queue_add_fd_read(queue, uwsgi.farms[i].signal_pipe[1]);
			event_queue_add_fd_read(queue, uwsgi.farms[i].queue_pipe[1]);
		}
	}
}
Esempio n. 4
0
static int u_offload_pipe_do(struct uwsgi_thread *ut, struct uwsgi_offload_request *uor, int fd) {
	
	ssize_t rlen;

	// setup
	if (fd == -1) {
		event_queue_add_fd_read(ut->queue, uor->fd);
		return 0;
	}

	switch(uor->status) {
		// read event from fd
		case 0:
			if (!uor->buf) {
				uor->buf = uwsgi_malloc(4096);
			}
			rlen = read(uor->fd, uor->buf, 4096);
			if (rlen > 0) {
				uor->to_write = rlen;
				uor->pos = 0;
				if (event_queue_del_fd(ut->queue, uor->fd, event_queue_read())) return -1;
				if (event_queue_add_fd_write(ut->queue, uor->s)) return -1;
				uor->status = 1;
				return 0;
			}
			if (rlen < 0) {
				uwsgi_offload_retry
				uwsgi_error("u_offload_pipe_do() -> read()");
			}
			return -1;
		// write event on s
		case 1:
			rlen = write(uor->s, uor->buf + uor->pos, uor->to_write);
			if (rlen > 0) {
				uor->to_write -= rlen;
				uor->pos += rlen;
				if (uor->to_write == 0) {
					if (event_queue_del_fd(ut->queue, uor->s, event_queue_write())) return -1;
					if (event_queue_add_fd_read(ut->queue, uor->fd)) return -1;
					uor->status = 0;
				}
				return 0;
			}
			else if (rlen < 0) {
				uwsgi_offload_retry
				uwsgi_error("u_offload_pipe_do() -> write()");
			}
			return -1;
		default:
			break;
	}

	return -1;
}
Esempio n. 5
0
File: loop.c Progetto: AGoodId/uwsgi
void *simple_loop_run(void *arg1) {

	long core_id = (long) arg1;

	struct wsgi_request *wsgi_req = &uwsgi.workers[uwsgi.mywid].cores[core_id].req;

	if (uwsgi.threads > 1) {
		uwsgi_setup_thread_req(core_id, wsgi_req);
	}
	// initialize the main event queue to monitor sockets
	int main_queue = event_queue_init();

	uwsgi_add_sockets_to_queue(main_queue, core_id);

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(main_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(main_queue, uwsgi.my_signal_socket);
	}


	// ok we are ready, let's start managing requests and signals
	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		wsgi_req_setup(wsgi_req, core_id, NULL);

		if (wsgi_req_accept(main_queue, wsgi_req)) {
			continue;
		}

		if (wsgi_req_recv(main_queue, wsgi_req)) {
			uwsgi_destroy_request(wsgi_req);
			continue;
		}

		uwsgi_close_request(wsgi_req);
	}

	// end of the loop
	if (uwsgi.workers[uwsgi.mywid].destroy && uwsgi.workers[0].pid > 0) {
#ifdef __APPLE__
		kill(uwsgi.workers[0].pid, SIGTERM);
#else
		if (uwsgi.propagate_touch) {
			kill(uwsgi.workers[0].pid, SIGHUP);
		}
		else {
			gracefully_kill(0);
		}
#endif
	}
	return NULL;
}
Esempio n. 6
0
void uwsgi_add_sockets_to_queue(int queue, int async_id) {

	struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
	while (uwsgi_sock) {
		if (uwsgi_sock->fd_threads && async_id > -1) {
			event_queue_add_fd_read(queue, uwsgi_sock->fd_threads[async_id]);
		}
		else if (uwsgi_sock->fd > -1) {
			event_queue_add_fd_read(queue, uwsgi_sock->fd);
		}
		uwsgi_sock = uwsgi_sock->next;
	}

}
Esempio n. 7
0
File: event.c Progetto: sashka/uwsgi
int event_queue_add_file_monitor(int eq, char *filename, int *id) {

	int ifd = -1;
	int i;
	int add_to_queue = 0;

	for (i=0;i<ushared->files_monitored_cnt;i++) {
		if (ushared->files_monitored[i].registered) {
			ifd = ushared->files_monitored[0].fd;
			break;
		}
	}

	if (ifd == -1) {
		ifd = inotify_init();
		if (ifd < 0) {
			uwsgi_error("inotify_init()");
			return -1;
		}		
		add_to_queue = 1;
	}

	*id = inotify_add_watch(ifd, filename, IN_ATTRIB|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MODIFY|IN_MOVE_SELF|IN_MOVED_FROM|IN_MOVED_TO);
		
#ifdef UWSGI_DEBUG
	uwsgi_log("added watch %d for filename %s\n", *id, filename);
#endif

	if (add_to_queue) {
		return event_queue_add_fd_read(eq, ifd);
	}
	else {
		return ifd;
	}
}
Esempio n. 8
0
File: event.c Progetto: sashka/uwsgi
int event_queue_add_timer(int eq, int *id, int sec) {

	struct itimerspec it;
	int tfd = timerfd_create(CLOCK_REALTIME, TFD_CLOEXEC);

	if (tfd < 0) {
		uwsgi_error("timerfd_create()");
		return -1;
	}

	it.it_value.tv_sec = sec;
	it.it_value.tv_nsec = 0;

	it.it_interval.tv_sec = sec;
	it.it_interval.tv_nsec = 0;

	if (timerfd_settime(tfd, 0, &it, NULL)) {
		uwsgi_error("timerfd_settime()");
		close(tfd);
		return -1;
	}
	
	*id = tfd;
	return event_queue_add_fd_read(eq, tfd);
}
Esempio n. 9
0
int async_add_fd_read(struct wsgi_request *wsgi_req, int fd, int timeout) {

	struct uwsgi_async_fd *last_uad = NULL, *uad = wsgi_req->waiting_fds;

	if (fd < 0)
		return -1;

	// find last slot
	while (uad) {
		last_uad = uad;
		uad = uad->next;
	}

	uad = uwsgi_malloc(sizeof(struct uwsgi_async_fd));
	uad->fd = fd;
	uad->event = event_queue_read();
	uad->prev = last_uad;
	uad->next = NULL;

	if (last_uad) {
		last_uad->next = uad;
	}
	else {
		wsgi_req->waiting_fds = uad;
	}

	if (timeout > 0) {
		async_add_timeout(wsgi_req, timeout);
	}
	uwsgi.async_waiting_fd_table[fd] = wsgi_req;
	wsgi_req->async_force_again = 1;
	return event_queue_add_fd_read(uwsgi.async_queue, fd);
}
Esempio n. 10
0
// initialize the zmq PULL socket
static void uwsgi_imperial_monitor_zeromq_init(struct uwsgi_emperor_scanner *ues) {

	void *context = uwsgi_zeromq_init();
	
	ues->data = zmq_socket(context, ZMQ_PULL);
	if (!ues->data) {
		uwsgi_error("zmq_socket()");
		exit(1);
	}

	if (zmq_bind(ues->data, ues->arg+6)) {
		uwsgi_error("zmq_socket()");
		exit(1);
	}

	size_t zmq_socket_len = sizeof(int);
        if (zmq_getsockopt(ues->data, ZMQ_FD, &ues->fd, &zmq_socket_len) < 0) {
        	uwsgi_error("zmq_getsockopt()");
        	exit(1);
        }

	ues->event_func = uwsgi_imperial_monitor_zeromq_event;

        event_queue_add_fd_read(uwsgi.emperor_queue, ues->fd);
}
Esempio n. 11
0
void zergpool_loop(int id) {

	int i;

	int zergpool_queue = event_queue_init();

	void *events = event_queue_alloc(ZERGPOOL_EVENTS);

	struct zergpool_socket *zps = zergpool_sockets;
	while(zps) {
		event_queue_add_fd_read(zergpool_queue, zps->fd);
		zps = zps->next;
	}

	for(;;) {
		int nevents = event_queue_wait_multi(zergpool_queue, -1, events, ZERGPOOL_EVENTS);

		for(i=0;i<nevents;i++) {

			int interesting_fd = event_queue_interesting_fd(events, i);

			zps = zergpool_sockets;
			while(zps) {
				if (zps->fd == interesting_fd) {
					uwsgi_manage_zerg(zps->fd, zps->num_sockets, zps->sockets);
				}
				zps = zps->next;
			}
		}
	}
}
Esempio n. 12
0
File: alarm.c Progetto: JuanS/uwsgi
static void uwsgi_alarm_thread_loop(struct uwsgi_thread *ut) {
	// add uwsgi_alarm_fd;
	struct uwsgi_alarm_fd *uafd = uwsgi.alarm_fds;
	while(uafd) {
		event_queue_add_fd_read(ut->queue, uafd->fd);
		uafd = uafd->next;
	}
	char *buf = uwsgi_malloc(uwsgi.alarm_msg_size + sizeof(long));
	for (;;) {
		int interesting_fd = -1;
                int ret = event_queue_wait(ut->queue, -1, &interesting_fd);
		if (ret > 0) {
			if (interesting_fd == ut->pipe[1]) {
				ssize_t len = read(ut->pipe[1], buf, uwsgi.alarm_msg_size + sizeof(long));
				if (len > (ssize_t)(sizeof(long) + 1)) {
					size_t msg_size = len - sizeof(long);
					char *msg = buf + sizeof(long);
					long ptr = 0;
					memcpy(&ptr, buf, sizeof(long));
					struct uwsgi_alarm_instance *uai = (struct uwsgi_alarm_instance *) ptr;
					if (!uai)
						break;
					uwsgi_alarm_run(uai, msg, msg_size);
				}
			}
			// check for alarm_fd
			else {
				uafd = uwsgi.alarm_fds;
				int fd_read = 0;
				while(uafd) {
					if (interesting_fd == uafd->fd) {
						if (fd_read) goto raise;	
						size_t remains = uafd->buf_len;
						while(remains) {
							ssize_t len = read(uafd->fd, uafd->buf + (uafd->buf_len-remains), remains);
							if (len <= 0) {
								uwsgi_error("[uwsgi-alarm-fd]/read()");
								uwsgi_log("[uwsgi-alarm-fd] i will stop monitoring fd %d\n", uafd->fd);
								event_queue_del_fd(ut->queue, uafd->fd, event_queue_read());
								break;
							}
							remains-=len;
						}
						fd_read = 1;
raise:
						uwsgi_alarm_run(uafd->alarm, uafd->msg, uafd->msg_len);
					}
					uafd = uafd->next;
				}
			}
		}
	}
	free(buf);
}
Esempio n. 13
0
void uwsgi_add_sockets_to_queue(int queue) {

	struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
	while (uwsgi_sock) {
		if (uwsgi_sock->fd > -1) {
			event_queue_add_fd_read(queue, uwsgi_sock->fd);
		}
		uwsgi_sock = uwsgi_sock->next;
	}

}
Esempio n. 14
0
int uwsgi_cr_hook_instance_read(struct corerouter_session *cs, ssize_t (*hook)(struct corerouter_session *)) {

        struct uwsgi_corerouter *ucr = cs->corerouter;

        // first check the case of event removal
        if (hook == NULL) {
                // nothing changed
                if (!cs->event_hook_instance_read) goto unchanged;
                // if there is a write event defined, le'ts modify it
                if (cs->event_hook_instance_write) {
#ifdef UWSGI_DEBUG
			uwsgi_log("event_queue_fd_readwrite_to_write() for %d\n", cs->instance_fd);
#endif
                        if (event_queue_fd_readwrite_to_write(ucr->queue, cs->instance_fd)) return -1;
                }
                // simply remove the read event
                else {
#ifdef UWSGI_DEBUG
			uwsgi_log("event_queue_del_fd() for %d\n", cs->instance_fd);
#endif
                        if (event_queue_del_fd(ucr->queue, cs->instance_fd, event_queue_read())) return -1;
                }
        }
        else {
                // set the hook
                // if write is not defined, simply add a single monitor
                if (cs->event_hook_instance_write == NULL) {
                        if (!cs->event_hook_instance_read) {
#ifdef UWSGI_DEBUG
				uwsgi_log("event_queue_add_fd_read() for %d\n", cs->instance_fd);
#endif
                                if (event_queue_add_fd_read(ucr->queue, cs->instance_fd)) return -1;
                        }
                }
                else {
                        if (!cs->event_hook_instance_read) {
#ifdef UWSGI_DEBUG
				uwsgi_log("event_queue_fd_write_to_readwrite() for %d\n", cs->instance_fd);
#endif
                                if (event_queue_fd_write_to_readwrite(ucr->queue, cs->instance_fd)) return -1;
                        }
                }
        }

unchanged:
#ifdef UWSGI_DEBUG
	uwsgi_log("event_hook_instance_read set to %p for %d\n", hook, cs->instance_fd);
#endif
        cs->event_hook_instance_read = hook;
        return 0;
}
Esempio n. 15
0
static VALUE uwsgi_rb_thread_core(void *arg) {
	long core_id = (long) arg;
	struct wsgi_request *wsgi_req = &uwsgi.workers[uwsgi.mywid].cores[core_id].req;

        uwsgi_setup_thread_req(core_id, wsgi_req);

	struct uwsgi_rbthread *urbt = uwsgi_malloc(sizeof(struct uwsgi_rbthread));
        // initialize the main event queue to monitor sockets
        urbt->queue = event_queue_init();
	urbt->wsgi_req = wsgi_req;

        uwsgi_add_sockets_to_queue(urbt->queue, (int)core_id);

        if (uwsgi.signal_socket > -1) {
                event_queue_add_fd_read(urbt->queue, uwsgi.signal_socket);
                event_queue_add_fd_read(urbt->queue, uwsgi.my_signal_socket);
        }

        // ok we are ready, let's start managing requests and signals
        while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

                wsgi_req_setup(wsgi_req, (int)core_id, NULL);

		rb_thread_call_without_gvl(uwsgi_rb_thread_accept, urbt, NULL, NULL);
		// accept failed ?
		if (urbt->ret) continue;

                if (wsgi_req_recv(urbt->queue, wsgi_req)) {
                        uwsgi_destroy_request(wsgi_req);
                        continue;
                }

                uwsgi_close_request(wsgi_req);
        }

	return Qnil;
}
Esempio n. 16
0
struct corerouter_session *corerouter_alloc_session(struct uwsgi_corerouter *ucr, struct uwsgi_gateway_socket *ugs, int new_connection, struct sockaddr *cr_addr, socklen_t cr_addr_len) {

	ucr->cr_table[new_connection] = uwsgi_calloc(ucr->session_size);
        ucr->cr_table[new_connection]->fd = new_connection;
        ucr->cr_table[new_connection]->instance_fd = -1;
        ucr->cr_table[new_connection]->status = COREROUTER_STATUS_RECV_HDR;

        ucr->cr_table[new_connection]->timeout = cr_add_timeout(ucr, ucr->cr_table[new_connection]);
	ucr->cr_table[new_connection]->ugs = ugs;

	ucr->alloc_session(ucr, ugs, ucr->cr_table[new_connection], cr_addr, cr_addr_len);
	event_queue_add_fd_read(ucr->queue, new_connection);

	return ucr->cr_table[new_connection];
}
Esempio n. 17
0
void *uwsgi_corerouter_setup_event_queue(struct uwsgi_corerouter *ucr, int id) {

	ucr->queue = event_queue_init();

	struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets;
	while (ugs) {
		if (!strcmp(ucr->name, ugs->owner)) {
			if (!ucr->cheap || ugs->subscription) {
				event_queue_add_fd_read(ucr->queue, ugs->fd);
			}
			ugs->gateway = &ushared->gateways[id];
		}
		ugs = ugs->next;
	}

	return event_queue_alloc(ucr->nevents);
}
Esempio n. 18
0
void uwsgi_corerouter_manage_internal_subscription(struct uwsgi_corerouter *ucr, int fd) {


	struct uwsgi_subscribe_req usr;
	char bbuf[4096];

	ssize_t len = recv(fd, bbuf, 4096, 0);
	if (len > 0) {
		memset(&usr, 0, sizeof(struct uwsgi_subscribe_req));
		uwsgi_hooked_parse(bbuf + 4, len - 4, corerouter_manage_subscription, &usr);

		// subscribe request ?
		if (bbuf[3] == 0) {
			if (uwsgi_add_subscribe_node(ucr->subscriptions, &usr) && ucr->i_am_cheap) {
				struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets;
				while (ugs) {
					if (!strcmp(ugs->owner, ucr->name) && !ugs->subscription) {
						event_queue_add_fd_read(ucr->queue, ugs->fd);
					}
					ugs = ugs->next;
				}
				ucr->i_am_cheap = 0;
				uwsgi_log("[%s pid %d] leaving cheap mode...\n", ucr->name, (int) uwsgi.mypid);
			}
		}
		//unsubscribe 
		else {
			struct uwsgi_subscribe_node *node = uwsgi_get_subscribe_node_by_name(ucr->subscriptions, usr.key, usr.keylen, usr.address, usr.address_len);
			if (node && node->len) {
				if (node->death_mark == 0)
					uwsgi_log("[%s pid %d] %.*s => marking %.*s as failed\n", ucr->name, (int) uwsgi.mypid, (int) usr.keylen, usr.key, (int) usr.address_len, usr.address);
				node->failcnt++;
				node->death_mark = 1;
				// check if i can remove the node
				if (node->reference == 0) {
					uwsgi_remove_subscribe_node(ucr->subscriptions, node);
				}
				if (ucr->cheap && !ucr->i_am_cheap && uwsgi_no_subscriptions(ucr->subscriptions)) {
					uwsgi_gateway_go_cheap(ucr->name, ucr->queue, &ucr->i_am_cheap);
				}
			}
		}
	}

}
Esempio n. 19
0
int event_queue_interesting_fd(void *events, int id) {
    port_event_t *pe = (port_event_t *) events;
    if (pe[id].portev_source == PORT_SOURCE_FILE || pe[id].portev_source == PORT_SOURCE_TIMER) {
        return (long) pe[id].portev_user;
    }

    int fd = (int) pe[id].portev_object;
    int eq = (int) pe[id].portev_user;

    if (pe[id].portev_events == POLLOUT) {
        event_queue_add_fd_write(eq, fd);
    }
    if (pe[id].portev_events == POLLIN) {
        event_queue_add_fd_read(eq, fd);
    }

    return fd;
}
Esempio n. 20
0
File: snmp.c Progetto: AGoodId/uwsgi
int uwsgi_setup_snmp(void) {
	int snmp_fd = -1;
	int i;
	if (uwsgi.snmp) {
		if (uwsgi.snmp_community) {
			if (strlen(uwsgi.snmp_community) > 72) {
				uwsgi_log("*** warning the supplied SNMP community string will be truncated to 72 chars ***\n");
				memcpy(uwsgi.shared->snmp_community, uwsgi.snmp_community, 72);
			}
			else {
				memcpy(uwsgi.shared->snmp_community, uwsgi.snmp_community, strlen(uwsgi.snmp_community) + 1);
			}
		}

		uwsgi.shared->snmp_gvalue[0].type = SNMP_COUNTER64;
		uwsgi.shared->snmp_gvalue[0].val = &uwsgi.workers[0].requests;

		for (i = 0; i < uwsgi.numproc; i++) {
			uwsgi.shared->snmp_gvalue[30 + i].type = SNMP_COUNTER64;
			uwsgi.shared->snmp_gvalue[30 + i].val = &uwsgi.workers[i + 1].requests;
		}

		if (uwsgi.snmp_addr) {
			snmp_fd = bind_to_udp(uwsgi.snmp_addr, 0, 0);
			if (snmp_fd < 0) {
				uwsgi_log("unable to bind to udp socket. SNMP service will be disabled.\n");
			}
			else {
				uwsgi_log("SNMP server enabled on %s\n", uwsgi.snmp_addr);
				event_queue_add_fd_read(uwsgi.master_queue, snmp_fd);
			}
		}
		else {
			uwsgi_log("SNMP agent enabled.\n");
		}

	}

	return snmp_fd;
}
Esempio n. 21
0
void uwsgi_imperial_monitor_amqp_init(struct uwsgi_emperor_scanner *ues) {

	char *vhost = "/";
	char *username = "******";
	char *password = "******";

        ues->fd = uwsgi_connect(ues->arg+7, -1, 0);
        if (ues->fd < 0) {
                uwsgi_log("unable to connect to AMQP server\n");
                return;
        }

        if (uwsgi_amqp_consume_queue(ues->fd, vhost, username, password, "", "uwsgi.emperor", "fanout") < 0) {
                close(ues->fd);
                ues->fd = -1;
                uwsgi_log("unable to subscribe to AMQP queue\n");
                return;
        }

	ues->event_func = uwsgi_imperial_monitor_amqp_event;

        event_queue_add_fd_read(uwsgi.emperor_queue, ues->fd);
}
Esempio n. 22
0
void emperor_loop() {

	// monitor a directory

	struct uwsgi_instance ui_base;
	struct uwsgi_instance *ui_current;

	pid_t diedpid;
	int waitpid_status;
	int has_children = 0;
	int i_am_alone = 0;
	int i;

	void *events;
	int nevents;
	int interesting_fd;
	char notification_message[64];
	struct rlimit rl;

	uwsgi.emperor_stats_fd = -1;

	if (uwsgi.emperor_pidfile) {
		uwsgi_write_pidfile(uwsgi.emperor_pidfile);
	}

	signal(SIGPIPE, SIG_IGN);
	uwsgi_unix_signal(SIGINT, royal_death);
	uwsgi_unix_signal(SIGTERM, royal_death);
	uwsgi_unix_signal(SIGQUIT, royal_death);
	uwsgi_unix_signal(SIGUSR1, emperor_stats);
	uwsgi_unix_signal(SIGHUP, emperor_massive_reload);

	memset(&ui_base, 0, sizeof(struct uwsgi_instance));

	if (getrlimit(RLIMIT_NOFILE, &rl)) {
		uwsgi_error("getrlimit()");
		exit(1);
	}

	uwsgi.max_fd = rl.rlim_cur;

	emperor_throttle_level = uwsgi.emperor_throttle;

	// the queue must be initialized before adding scanners
	uwsgi.emperor_queue = event_queue_init();

	emperor_build_scanners();

	events = event_queue_alloc(64);

	if (uwsgi.has_emperor) {
		uwsgi_log("*** starting uWSGI sub-Emperor ***\n");
	}
	else {
		uwsgi_log("*** starting uWSGI Emperor ***\n");
	}

	if (uwsgi.emperor_stats) {
		char *tcp_port = strchr(uwsgi.emperor_stats, ':');
		if (tcp_port) {
			// disable deferred accept for this socket
			int current_defer_accept = uwsgi.no_defer_accept;
			uwsgi.no_defer_accept = 1;
			uwsgi.emperor_stats_fd = bind_to_tcp(uwsgi.emperor_stats, uwsgi.listen_queue, tcp_port);
			uwsgi.no_defer_accept = current_defer_accept;
		}
		else {
			uwsgi.emperor_stats_fd = bind_to_unix(uwsgi.emperor_stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket);
		}

		event_queue_add_fd_read(uwsgi.emperor_queue, uwsgi.emperor_stats_fd);
		uwsgi_log("*** Emperor stats server enabled on %s fd: %d ***\n", uwsgi.emperor_stats, uwsgi.emperor_stats_fd);
	}

	ui = &ui_base;

	int freq = 0;

	for (;;) {


		if (!i_am_alone) {
			diedpid = waitpid(uwsgi.emperor_pid, &waitpid_status, WNOHANG);
			if (diedpid < 0 || diedpid > 0) {
				i_am_alone = 1;
			}
		}

		nevents = event_queue_wait_multi(uwsgi.emperor_queue, freq, events, 64);
		freq = uwsgi.emperor_freq;

		for (i = 0; i < nevents; i++) {
			interesting_fd = event_queue_interesting_fd(events, i);

			if (uwsgi.emperor_stats && uwsgi.emperor_stats_fd > -1 && interesting_fd == uwsgi.emperor_stats_fd) {
				emperor_send_stats(uwsgi.emperor_stats_fd);
				continue;
			}

			// check if a monitor is mapped to that file descriptor
			if (uwsgi_emperor_scanner_event(interesting_fd))
				continue;


			ui_current = emperor_get_by_fd(interesting_fd);
			if (ui_current) {
				char byte;
				ssize_t rlen = read(interesting_fd, &byte, 1);
				if (rlen <= 0) {
					// SAFE
					if (!ui_current->config_len) {
						emperor_del(ui_current);
					}
				}
				else {
					if (byte == 17) {
						ui_current->loyal = 1;
						ui_current->last_loyal = uwsgi_now();
						uwsgi_log("[emperor] vassal %s is now loyal\n", ui_current->name);
						// remove it from the blacklist
						uwsgi_emperor_blacklist_remove(ui_current->name);
						// TODO post-start hook
					}
					// heartbeat can be used for spotting blocked instances
					else if (byte == 26) {
						ui_current->last_heartbeat = uwsgi_now();
					}
					else if (byte == 22) {
						emperor_stop(ui_current);
					}
					else if (byte == 30 && uwsgi.emperor_broodlord > 0 && uwsgi.emperor_broodlord_count < uwsgi.emperor_broodlord) {
						uwsgi_log("[emperor] going in broodlord mode: launching zergs for %s\n", ui_current->name);
						char *zerg_name = uwsgi_concat3(ui_current->name, ":", "zerg");
						emperor_add(NULL, zerg_name, uwsgi_now(), NULL, 0, ui_current->uid, ui_current->gid);
						free(zerg_name);
					}
				}
			}
			else {
				uwsgi_log("[emperor] unrecognized vassal event on fd %d\n", interesting_fd);
				close(interesting_fd);
			}

		}

		uwsgi_emperor_run_scanners();

		// check for heartbeat (if required)
		ui_current = ui->ui_next;
		while (ui_current) {
			if (ui_current->last_heartbeat > 0) {
				if ((ui_current->last_heartbeat + uwsgi.emperor_heartbeat) < uwsgi_now()) {
					uwsgi_log("[emperor] vassal %s sent no heartbeat in last %d seconds, respawning it...\n", ui_current->name, uwsgi.emperor_heartbeat);
					// set last_heartbeat to 0 avoiding races
					ui_current->last_heartbeat = 0;
					emperor_respawn(ui_current, uwsgi_now());
				}
			}
			ui_current = ui_current->ui_next;
		}

		// check for removed instances
		ui_current = ui;
		has_children = 0;
		while (ui_current->ui_next) {
			ui_current = ui_current->ui_next;
			has_children++;
		}

		if (uwsgi.notify) {
			if (snprintf(notification_message, 64, "The Emperor is governing %d vassals", has_children) >= 34) {
				uwsgi_notify(notification_message);
			}
		}

		if (has_children) {
			diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
		}
		else {
			// vacuum
			waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
			diedpid = 0;
		}
		if (diedpid < 0) {
			// it looks like it happens when OOM is triggered to Linux cgroup, but it could be a uWSGI bug :P
			// by the way, fallback to a clean situation...
			if (errno == ECHILD) {
				uwsgi_log("--- MUTINY DETECTED !!! IMPALING VASSALS... ---\n");
				ui_current = ui->ui_next;
				while (ui_current) {
					struct uwsgi_instance *rebel_vassal = ui_current;
					ui_current = ui_current->ui_next;
					emperor_del(rebel_vassal);
				}
			}
			else {
				uwsgi_error("waitpid()");
			}
		}
		ui_current = ui;
		while (ui_current->ui_next) {
			ui_current = ui_current->ui_next;
			if (ui_current->status == 1) {
				if (ui_current->config)
					free(ui_current->config);
				// SAFE
				emperor_del(ui_current);
				break;
			}
			else if (ui_current->pid == diedpid) {
				if (ui_current->status == 0) {
					// respawn an accidentally dead instance if its exit code is not UWSGI_EXILE_CODE
					if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXILE_CODE) {
						// SAFE
						emperor_del(ui_current);
					}
					else {
						// UNSAFE
						emperor_add(ui_current->scanner, ui_current->name, ui_current->last_mod, ui_current->config, ui_current->config_len, ui_current->uid, ui_current->gid);
						emperor_del(ui_current);
					}
					break;
				}
				else if (ui_current->status == 1) {
					// remove 'marked for dead' instance
					if (ui_current->config)
						free(ui_current->config);
					// SAFE
					emperor_del(ui_current);
					break;
				}
			}
		}


	}

}
Esempio n. 23
0
File: loop.c Progetto: 20tab/uwsgi
void *simple_loop(void *arg1) {

	long core_id = (long) arg1;

	struct wsgi_request *wsgi_req = uwsgi.wsgi_requests[core_id];

#ifdef UWSGI_THREADING
	int i;
	sigset_t smask;

	if (uwsgi.threads > 1) {

		pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &i);
		pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &i);
		pthread_setspecific(uwsgi.tur_key, (void *) wsgi_req);

		if (core_id > 0) {
			// block all signals on new threads
			sigfillset(&smask);
#ifdef UWSGI_DEBUG
			sigdelset(&smask, SIGSEGV);
#endif
			pthread_sigmask(SIG_BLOCK, &smask, NULL);

			// run per-thread socket hook
			struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
			while(uwsgi_sock) {
				if (uwsgi_sock->proto_thread_fixup) {
					uwsgi_sock->proto_thread_fixup(uwsgi_sock, core_id);
				}
				uwsgi_sock = uwsgi_sock->next;
			}

			for (i = 0; i < 256; i++) {
				if (uwsgi.p[i]->init_thread) {
					uwsgi.p[i]->init_thread(core_id);
				}
			}
		}
	}
#endif

	// initialize the main event queue to monitor sockets
	int main_queue = event_queue_init();

	uwsgi_add_sockets_to_queue(main_queue, core_id);

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(main_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(main_queue, uwsgi.my_signal_socket);
	}


	// ok we are ready, let's start managing requests and signals
	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		wsgi_req_setup(wsgi_req, core_id, NULL);

		if (wsgi_req_accept(main_queue, wsgi_req)) {
			continue;
		}

		if (wsgi_req_recv(wsgi_req)) {
			uwsgi_destroy_request(wsgi_req);
			continue;
		}

		uwsgi_close_request(wsgi_req);
	}

	// end of the loop
	if (uwsgi.workers[uwsgi.mywid].destroy && uwsgi.workers[0].pid > 0) {
#ifdef __APPLE__
		kill(uwsgi.workers[0].pid, SIGTERM);
#else
		if (uwsgi.propagate_touch) {
			kill(uwsgi.workers[0].pid, SIGHUP);
		}
		else {
			gracefully_kill(0);
		}
#endif
	}
	return NULL;
}
Esempio n. 24
0
void uwsgi_corerouter_loop(int id, void *data) {

	int i;

	struct uwsgi_corerouter *ucr = (struct uwsgi_corerouter *) data;

	ucr->cr_stats_server = -1;

	ucr->cr_table = uwsgi_malloc(sizeof(struct corerouter_session *) * uwsgi.max_fd);

	for (i = 0; i < (int) uwsgi.max_fd; i++) {
		ucr->cr_table[i] = NULL;
	}

	ucr->i_am_cheap = ucr->cheap;

	void *events = uwsgi_corerouter_setup_event_queue(ucr, id);

	if (ucr->has_subscription_sockets)
		event_queue_add_fd_read(ucr->queue, ushared->gateways[id].internal_subscription_pipe[1]);


	if (!ucr->socket_timeout)
		ucr->socket_timeout = 60;

	if (!ucr->static_node_gracetime)
		ucr->static_node_gracetime = 30;

	int i_am_the_first = 1;
	for(i=0;i<id;i++) {
		if (!strcmp(ushared->gateways[i].name, ucr->name)) {
			i_am_the_first = 0;
			break;
		}
	}

	if (ucr->stats_server && i_am_the_first) {
		char *tcp_port = strchr(ucr->stats_server, ':');
		if (tcp_port) {
			// disable deferred accept for this socket
			int current_defer_accept = uwsgi.no_defer_accept;
			uwsgi.no_defer_accept = 1;
			ucr->cr_stats_server = bind_to_tcp(ucr->stats_server, uwsgi.listen_queue, tcp_port);
			uwsgi.no_defer_accept = current_defer_accept;
		}
		else {
			ucr->cr_stats_server = bind_to_unix(ucr->stats_server, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket);
		}

		event_queue_add_fd_read(ucr->queue, ucr->cr_stats_server);
		uwsgi_log("*** %s stats server enabled on %s fd: %d ***\n", ucr->short_name, ucr->stats_server, ucr->cr_stats_server);
	}


	if (ucr->use_socket) {
		ucr->to_socket = uwsgi_get_socket_by_num(ucr->socket_num);
		if (ucr->to_socket) {
			// fix socket name_len
			if (ucr->to_socket->name_len == 0 && ucr->to_socket->name) {
				ucr->to_socket->name_len = strlen(ucr->to_socket->name);
			}
		}
	}

	if (!ucr->pb_base_dir) {
		ucr->pb_base_dir = getenv("TMPDIR");
		if (!ucr->pb_base_dir)
			ucr->pb_base_dir = "/tmp";
	}

	int nevents;

	time_t delta;

	struct uwsgi_rb_timer *min_timeout;

	int new_connection;


	if (ucr->pattern) {
		init_magic_table(ucr->magic_table);
	}

	union uwsgi_sockaddr cr_addr;
	socklen_t cr_addr_len = sizeof(struct sockaddr_un);

	ucr->mapper = uwsgi_cr_map_use_void;

			if (ucr->use_cache) {
				ucr->cache = uwsgi_cache_by_name(ucr->use_cache);
				if (!ucr->cache) {
					uwsgi_log("!!! unable to find cache \"%s\" !!!\n", ucr->use_cache);
					exit(1);
				}
                        	ucr->mapper = uwsgi_cr_map_use_cache;
                        }
                        else if (ucr->pattern) {
                                ucr->mapper = uwsgi_cr_map_use_pattern;
                        }
                        else if (ucr->has_subscription_sockets) {
                                ucr->mapper = uwsgi_cr_map_use_subscription;
				if (uwsgi.subscription_dotsplit) {
                                	ucr->mapper = uwsgi_cr_map_use_subscription_dotsplit;
				}
                        }
                        else if (ucr->base) {
                                ucr->mapper = uwsgi_cr_map_use_base;
                        }
                        else if (ucr->code_string_code && ucr->code_string_function) {
                                ucr->mapper = uwsgi_cr_map_use_cs;
			}
                        else if (ucr->to_socket) {
                                ucr->mapper = uwsgi_cr_map_use_to;
                        }
                        else if (ucr->static_nodes) {
                                ucr->mapper = uwsgi_cr_map_use_static_nodes;
                        }

	ucr->timeouts = uwsgi_init_rb_timer();

	for (;;) {

		time_t now = uwsgi_now();

		// set timeouts and harakiri
		min_timeout = uwsgi_min_rb_timer(ucr->timeouts, NULL);
		if (min_timeout == NULL) {
			delta = -1;
		}
		else {
			delta = min_timeout->value - now;
			if (delta <= 0) {
				corerouter_expire_timeouts(ucr, now);
				delta = 0;
			}
		}

		if (uwsgi.master_process && ucr->harakiri > 0) {
			ushared->gateways_harakiri[id] = 0;
		}

		// wait for events
		nevents = event_queue_wait_multi(ucr->queue, delta, events, ucr->nevents);

		now = uwsgi_now();

		if (uwsgi.master_process && ucr->harakiri > 0) {
			ushared->gateways_harakiri[id] = now + ucr->harakiri;
		}

		if (nevents == 0) {
			corerouter_expire_timeouts(ucr, now);
		}

		for (i = 0; i < nevents; i++) {

			// get the interesting fd
			ucr->interesting_fd = event_queue_interesting_fd(events, i);
			// something bad happened
			if (ucr->interesting_fd < 0) continue;

			// check if the ucr->interesting_fd matches a gateway socket
			struct uwsgi_gateway_socket *ugs = uwsgi.gateway_sockets;
			int taken = 0;
			while (ugs) {
				if (ugs->gateway == &ushared->gateways[id] && ucr->interesting_fd == ugs->fd) {
					if (!ugs->subscription) {
#if defined(__linux__) && defined(SOCK_NONBLOCK) && !defined(OBSOLETE_LINUX_KERNEL)
						new_connection = accept4(ucr->interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len, SOCK_NONBLOCK);
						if (new_connection < 0) {
							taken = 1;
							break;
						}
#else
						new_connection = accept(ucr->interesting_fd, (struct sockaddr *) &cr_addr, &cr_addr_len);
						if (new_connection < 0) {
							taken = 1;
							break;
						}
						// set socket in non-blocking mode, on non-linux platforms, clients get the server mode
#ifdef __linux__
                                                uwsgi_socket_nb(new_connection);
#endif
#endif
						struct corerouter_session *cr = corerouter_alloc_session(ucr, ugs, new_connection, (struct sockaddr *) &cr_addr, cr_addr_len);
						//something wrong in the allocation
						if (!cr) break;
					}
					else if (ugs->subscription) {
						uwsgi_corerouter_manage_subscription(ucr, id, ugs);
					}

					taken = 1;
					break;
				}


				ugs = ugs->next;
			}

			if (taken) {
				continue;
			}

			// manage internal subscription
			if (ucr->interesting_fd == ushared->gateways[id].internal_subscription_pipe[1]) {
				uwsgi_corerouter_manage_internal_subscription(ucr, ucr->interesting_fd);
			}
			// manage a stats request
			else if (ucr->interesting_fd == ucr->cr_stats_server) {
				corerouter_send_stats(ucr);
			}
			else {
				struct corerouter_peer *peer = ucr->cr_table[ucr->interesting_fd];

				// something is going wrong...
				if (peer == NULL)
					continue;

				// on error, destroy the session
				if (event_queue_interesting_fd_has_error(events, i)) {
					peer->failed = 1;
					corerouter_close_peer(ucr, peer);
					continue;
				}

				// set timeout (in main_peer too)
				peer->timeout = corerouter_reset_timeout_fast(ucr, peer, now);
				peer->session->main_peer->timeout = corerouter_reset_timeout_fast(ucr, peer->session->main_peer, now);

				ssize_t (*hook)(struct corerouter_peer *) = NULL;

				// call event hook
				if (event_queue_interesting_fd_is_read(events, i)) {
					hook = peer->hook_read;	
				}
				else if (event_queue_interesting_fd_is_write(events, i)) {
					hook = peer->hook_write;	
				}

				if (!hook) continue;
				// reset errno (as we use it for internal signalling)
				errno = 0;
				ssize_t ret = hook(peer);
				// connection closed
				if (ret == 0) {
					corerouter_close_peer(ucr, peer);
					continue;
				}
				else if (ret < 0) {
					if (errno == EINPROGRESS) continue;
					// remove keepalive on error
					peer->session->can_keepalive = 0;
					corerouter_close_peer(ucr, peer);
					continue;
				}
				
			}
		}
	}

}
Esempio n. 25
0
void emperor_add(struct uwsgi_emperor_scanner *ues, char *name, time_t born, char *config, uint32_t config_size, uid_t uid, gid_t gid) {

	struct uwsgi_instance *c_ui = ui;
	struct uwsgi_instance *n_ui = NULL;
	pid_t pid;
	char **vassal_argv;
	char *uef;
	char **uenvs;
	int counter;
	char *colon = NULL;
	int i;
	struct timeval tv;

#ifdef UWSGI_DEBUG
	uwsgi_log("\n\nVASSAL %s %d %.*s %d %d\n", name, born, config_size, config, uid, gid);
#endif

	if (strlen(name) > (0xff - 1)) {
		uwsgi_log("[emperor] invalid vassal name\n", name);
		return;
	}


	gettimeofday(&tv, NULL);
	int now = tv.tv_sec;
	uint64_t micros = (tv.tv_sec * 1000 * 1000) + tv.tv_usec;

	// blacklist check
	struct uwsgi_emperor_blacklist_item *uebi = uwsgi_emperor_blacklist_check(name);
	if (uebi) {
		uint64_t i_micros = (uebi->last_attempt.tv_sec * 1000 * 1000) + uebi->last_attempt.tv_usec + uebi->throttle_level;
		if (i_micros > micros) {
			return;
		}
	}

	if (now - emperor_throttle < 1) {
		emperor_throttle_level = emperor_throttle_level * 2;
	}
	else {
		if (emperor_throttle_level > uwsgi.emperor_throttle) {
			emperor_throttle_level = emperor_throttle_level / 2;
		}

		if (emperor_throttle_level < uwsgi.emperor_throttle) {
			emperor_throttle_level = uwsgi.emperor_throttle;
		}
	}

	emperor_throttle = now;
#ifdef UWSGI_DEBUG
	uwsgi_log("emperor throttle = %d\n", emperor_throttle_level);
#endif
	usleep(emperor_throttle_level);

	if (uwsgi.emperor_tyrant) {
		if (uid == 0 || gid == 0) {
			uwsgi_log("[emperor-tyrant] invalid permissions for vassal %s\n", name);
			return;
		}
	}

	while (c_ui->ui_next) {
		c_ui = c_ui->ui_next;
	}

	n_ui = uwsgi_malloc(sizeof(struct uwsgi_instance));
	memset(n_ui, 0, sizeof(struct uwsgi_instance));

	if (config) {
		n_ui->use_config = 1;
		n_ui->config = config;
		n_ui->config_len = config_size;
	}

	c_ui->ui_next = n_ui;
#ifdef UWSGI_DEBUG
	uwsgi_log("c_ui->ui_next = %p\n", c_ui->ui_next);
#endif
	n_ui->ui_prev = c_ui;

	if (strchr(name, ':')) {
		n_ui->zerg = 1;
		uwsgi.emperor_broodlord_count++;
	}

	n_ui->scanner = ues;
	memcpy(n_ui->name, name, strlen(name));
	n_ui->born = born;
	n_ui->uid = uid;
	n_ui->gid = gid;
	n_ui->last_mod = born;
	// start without loyalty
	n_ui->last_loyal = 0;
	n_ui->loyal = 0;

	n_ui->first_run = uwsgi_now();
	n_ui->last_run = n_ui->first_run;

	if (socketpair(AF_UNIX, SOCK_STREAM, 0, n_ui->pipe)) {
		uwsgi_error("socketpair()");
		goto clear;
	}

	event_queue_add_fd_read(uwsgi.emperor_queue, n_ui->pipe[0]);

	if (n_ui->use_config) {
		if (socketpair(AF_UNIX, SOCK_STREAM, 0, n_ui->pipe_config)) {
			uwsgi_error("socketpair()");
			goto clear;
		}
	}

	// TODO pre-start hook

	// a new uWSGI instance will start 
	pid = fork();
	if (pid < 0) {
		uwsgi_error("fork()")
	}
	else if (pid > 0) {
		n_ui->pid = pid;
		// close the right side of the pipe
		close(n_ui->pipe[1]);
		if (n_ui->use_config) {
			close(n_ui->pipe_config[1]);
		}

		if (n_ui->use_config) {
			if (write(n_ui->pipe_config[0], n_ui->config, n_ui->config_len) <= 0) {
				uwsgi_error("write()");
			}
			close(n_ui->pipe_config[0]);
		}
		return;
	}
	else {

		if (uwsgi.emperor_tyrant) {
			uwsgi_log("[emperor-tyrant] dropping privileges to %d %d for instance %s\n", (int) uid, (int) gid, name);
			if (setgid(gid)) {
				uwsgi_error("setgid()");
				exit(1);
			}
			if (setgroups(0, NULL)) {
				uwsgi_error("setgroups()");
				exit(1);
			}

			if (setuid(uid)) {
				uwsgi_error("setuid()");
				exit(1);
			}

		}

		unsetenv("UWSGI_RELOADS");
		unsetenv("NOTIFY_SOCKET");

		uef = uwsgi_num2str(n_ui->pipe[1]);
		if (setenv("UWSGI_EMPEROR_FD", uef, 1)) {
			uwsgi_error("setenv()");
			exit(1);
		}
		free(uef);

		if (n_ui->use_config) {
			uef = uwsgi_num2str(n_ui->pipe_config[1]);
			if (setenv("UWSGI_EMPEROR_FD_CONFIG", uef, 1)) {
				uwsgi_error("setenv()");
				exit(1);
			}
			free(uef);
		}

		uenvs = environ;
		while (*uenvs) {
			if (!strncmp(*uenvs, "UWSGI_VASSAL_", 13)) {
				char *ne = uwsgi_concat2("UWSGI_", *uenvs + 13);
				char *oe = uwsgi_concat2n(*uenvs, strchr(*uenvs, '=') - *uenvs, "", 0);
				if (unsetenv(oe)) {
					uwsgi_error("unsetenv()");
					break;
				}
				free(oe);
#ifdef UWSGI_DEBUG
				uwsgi_log("putenv %s\n", ne);
#endif

				if (putenv(ne)) {
					uwsgi_error("putenv()");
				}
				// do not free ne as putenv will add it to the environ
				uenvs = environ;
				continue;
			}
			uenvs++;
		}

		// close the left side of the pipe
		close(n_ui->pipe[0]);

		if (n_ui->use_config) {
			close(n_ui->pipe_config[0]);
		}

		counter = 4;
		struct uwsgi_string_list *uct = uwsgi.vassals_templates;
		while (uct) {
			counter += 2;
			uct = uct->next;
		}

		vassal_argv = uwsgi_malloc(sizeof(char *) * counter);
		// set args
		vassal_argv[0] = uwsgi.binary_path;

		if (uwsgi.emperor_broodlord) {
			colon = strchr(name, ':');
			if (colon) {
				colon[0] = 0;
			}
		}
		// initialize to a default value
		vassal_argv[1] = "--inherit";

		if (!strcmp(name + (strlen(name) - 4), ".xml"))
			vassal_argv[1] = "--xml";
		if (!strcmp(name + (strlen(name) - 4), ".ini"))
			vassal_argv[1] = "--ini";
		if (!strcmp(name + (strlen(name) - 4), ".yml"))
			vassal_argv[1] = "--yaml";
		if (!strcmp(name + (strlen(name) - 5), ".yaml"))
			vassal_argv[1] = "--yaml";
		if (!strcmp(name + (strlen(name) - 3), ".js"))
			vassal_argv[1] = "--json";
		if (!strcmp(name + (strlen(name) - 5), ".json"))
			vassal_argv[1] = "--json";

		if (colon) {
			colon[0] = ':';
		}


		vassal_argv[2] = name;
		if (uwsgi.emperor_magic_exec) {
			if (!access(name, R_OK | X_OK)) {
				vassal_argv[2] = uwsgi_concat2("exec://", name);
			}

		}

		if (n_ui->use_config) {
			vassal_argv[2] = uwsgi_concat2("emperor://", name);
		}

		counter = 3;
		uct = uwsgi.vassals_templates;
		while (uct) {
			vassal_argv[counter] = "--inherit";
			vassal_argv[counter + 1] = uct->value;
			counter += 2;
			uct = uct->next;
		}
		vassal_argv[counter] = NULL;

		// disable stdin
		int stdin_fd = open("/dev/null", O_RDONLY);
		if (stdin_fd < 0) {
			uwsgi_error_open("/dev/null");
			exit(1);
		}
		if (stdin_fd != 0) {
			if (dup2(stdin_fd, 0)) {
				uwsgi_error("dup2()");
				exit(1);
			}
			close(stdin_fd);
		}

		// close all of the unneded fd
		for (i = 3; i < (int) uwsgi.max_fd; i++) {
			if (n_ui->use_config) {
				if (i == n_ui->pipe_config[1])
					continue;
			}
			if (i != n_ui->pipe[1]) {
				close(i);
			}
		}

		if (uwsgi.vassals_start_hook) {
			uwsgi_log("[emperor] running vassal start-hook: %s %s\n", uwsgi.vassals_start_hook, n_ui->name);
			if (uwsgi.emperor_absolute_dir) {
				if (setenv("UWSGI_VASSALS_DIR", uwsgi.emperor_absolute_dir, 1)) {
					uwsgi_error("setenv()");
				}
			}
			int start_hook_ret = uwsgi_run_command_and_wait(uwsgi.vassals_start_hook, n_ui->name);
			uwsgi_log("[emperor] %s start-hook returned %d\n", n_ui->name, start_hook_ret);
		}

		// start !!!
		if (execvp(vassal_argv[0], vassal_argv)) {
			uwsgi_error("execvp()");
		}
		uwsgi_log("[emperor] is the uwsgi binary in your system PATH ?\n");
		// never here
		exit(UWSGI_EXILE_CODE);
	}

clear:

	free(n_ui);
	c_ui->ui_next = NULL;

}
Esempio n. 26
0
/*
the offload task starts soon after the call to connect()

	status:
		0 -> waiting for connection on fd
		1 -> sending request to fd (write event)
		2 -> start waiting for read on s and fd
		3 -> write to s
		4 -> write to fd
*/
static int u_offload_transfer_do(struct uwsgi_thread *ut, struct uwsgi_offload_request *uor, int fd) {
	
	ssize_t rlen;

	// setup
	if (fd == -1) {
		event_queue_add_fd_write(ut->queue, uor->fd);
		return 0;
	}

	switch(uor->status) {
		// waiting for connection
		case 0:
			if (fd == uor->fd) {
				uor->status = 1;
				// ok try to send the request right now...
				return u_offload_transfer_do(ut, uor, fd);
			}
			return -1;
		// write event (or just connected)
		case 1:
			if (fd == uor->fd) {
				rlen = write(uor->fd, uor->ubuf->buf + uor->written, uor->ubuf->pos-uor->written);	
				if (rlen > 0) {
					uor->written += rlen;
					if (uor->written >= (size_t)uor->ubuf->pos) {
						uor->status = 2;
						if (event_queue_add_fd_read(ut->queue, uor->s)) return -1;
						if (event_queue_fd_write_to_read(ut->queue, uor->fd)) return -1;
					}
					return 0;
				}
				else if (rlen < 0) {
					uwsgi_offload_retry
					uwsgi_error("u_offload_transfer_do() -> write()");
				}
			}	
			return -1;
		// read event from s or fd
		case 2:
			if (!uor->buf) {
				uor->buf = uwsgi_malloc(4096);
			}
			if (fd == uor->fd) {
				rlen = read(uor->fd, uor->buf, 4096);
				if (rlen > 0) {
					uor->to_write = rlen;
					uor->pos = 0;
					uwsgi_offload_0r_1w(uor->fd, uor->s)
					uor->status = 3;
					return 0;
				}
				if (rlen < 0) {
					uwsgi_offload_retry
					uwsgi_error("u_offload_transfer_do() -> read()/fd");
				}
			}
			else if (fd == uor->s) {
				rlen = read(uor->s, uor->buf, 4096);
				if (rlen > 0) {
					uor->to_write = rlen;
					uor->pos = 0;
					uwsgi_offload_0r_1w(uor->s, uor->fd)
					uor->status = 4;
					return 0;
				}
				if (rlen < 0) {
					uwsgi_offload_retry
					uwsgi_error("u_offload_transfer_do() -> read()/s");
				}
			}
			return -1;
		// write event on s
		case 3:
			rlen = write(uor->s, uor->buf + uor->pos, uor->to_write);
			if (rlen > 0) {
				uor->to_write -= rlen;
				uor->pos += rlen;
				if (uor->to_write == 0) {
					if (event_queue_fd_write_to_read(ut->queue, uor->s)) return -1;
					if (event_queue_add_fd_read(ut->queue, uor->fd)) return -1;
					uor->status = 2;
				}
				return 0;
			}
			else if (rlen < 0) {
				uwsgi_offload_retry
				uwsgi_error("u_offload_transfer_do() -> write()/s");
			}
			return -1;
		// write event on fd
		case 4:
			rlen = write(uor->fd, uor->buf + uor->pos, uor->to_write);
			if (rlen > 0) {
				uor->to_write -= rlen;
				uor->pos += rlen;
				if (uor->to_write == 0) {
					if (event_queue_fd_write_to_read(ut->queue, uor->fd)) return -1;
					if (event_queue_add_fd_read(ut->queue, uor->s)) return -1;
					uor->status = 2;
				}
				return 0;
			}
			else if (rlen < 0) {
				uwsgi_offload_retry
				uwsgi_error("u_offload_transfer_do() -> write()/fd");
			}
			return -1;
		default:
			break;
	}

	return -1;
}
Esempio n. 27
0
void fastrouter_loop() {

	int nevents;
	int interesting_fd;
	int new_connection;
	ssize_t len;
	int i;

	time_t delta;
	char bbuf[UMAX16];

	char *tcp_port;

	char *tmp_socket_name;
	int tmp_socket_name_len;

	struct uwsgi_subscribe_req usr;

	char *magic_table[0xff];

	struct uwsgi_rb_timer *min_timeout;

	void *events;
	struct msghdr msg;
	union {
                struct cmsghdr cmsg;
                char control [CMSG_SPACE (sizeof (int))];
        } msg_control;
        struct cmsghdr *cmsg;

	struct sockaddr_un fr_addr;
        socklen_t fr_addr_len = sizeof(struct sockaddr_un);
	
	struct fastrouter_session *fr_session;

	struct fastrouter_session *fr_table[2048];

	struct iovec iov[2];

	int soopt;
        socklen_t solen = sizeof(int);

	int ufr_subserver = -1;

	for(i=0;i<2048;i++) {
		fr_table[i] = NULL;
	}

	ufr.queue = event_queue_init();

	struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets;

	while(ufr_sock) {
		if (ufr_sock->name[0] == '=') {
			int shared_socket = atoi(ufr_sock->name+1);
			if (shared_socket >= 0) {
				ufr_sock->fd = uwsgi_get_shared_socket_fd_by_num(shared_socket);
				if (ufr_sock->fd == -1) {
					uwsgi_log("unable to use shared socket %d\n", shared_socket);
				}
			}
		}
		else {
			tcp_port = strchr(ufr_sock->name, ':');
			if (tcp_port) {
				ufr_sock->fd = bind_to_tcp(ufr_sock->name, uwsgi.listen_queue, tcp_port);
			}
			else {
				ufr_sock->fd = bind_to_unix(ufr_sock->name, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket);
			}
		}

		uwsgi_log("uwsgi fastrouter/proxy bound on %s\n", ufr_sock->name);

		if (!ufr.cheap) {
			event_queue_add_fd_read(ufr.queue, ufr_sock->fd);
		}
		else {
			uwsgi_log("[uwsgi-fastrouter] cheap mode requested. Waiting for subscriptions...\n");
			ufr.i_am_cheap = 1;
		}
		ufr_sock = ufr_sock->next;
	}


	events = event_queue_alloc(ufr.nevents);

	ufr.timeouts = uwsgi_init_rb_timer();
	if (!ufr.socket_timeout) ufr.socket_timeout = 30;


	if (ufr.subscription_server) {
		ufr_subserver = bind_to_udp(ufr.subscription_server, 0, 0);
		event_queue_add_fd_read(ufr.queue, ufr_subserver);
		if (!ufr.subscription_slot) ufr.subscription_slot = 30;
		// check for node status every 10 seconds
		//ufr.subscriptions_check = add_check_timeout(10);
	}

	if (ufr.pattern) {
		init_magic_table(magic_table);
	}


	for (;;) {

		min_timeout = uwsgi_min_rb_timer(ufr.timeouts);
		if (min_timeout == NULL ) {
			delta = -1;
		}
		else {
			delta = min_timeout->key - time(NULL);
			if (delta <= 0) {
				expire_timeouts(fr_table);
				delta = 0;
			}
		}

		nevents = event_queue_wait_multi(ufr.queue, delta, events, ufr.nevents);

		if (nevents == 0) {
			expire_timeouts(fr_table);
		}

		for (i=0;i<nevents;i++) {

			tmp_socket_name = NULL;
			interesting_fd = event_queue_interesting_fd(events, i);


			int taken = 0;
			struct uwsgi_fastrouter_socket *uwsgi_sock = ufr.sockets;
			while(uwsgi_sock) {
				if (interesting_fd == uwsgi_sock->fd) {
					new_connection = accept(interesting_fd, (struct sockaddr *) &fr_addr, &fr_addr_len);
					if (new_connection < 0) {
						continue;
					}

					fr_table[new_connection] = alloc_fr_session();
					fr_table[new_connection]->fd = new_connection;
					fr_table[new_connection]->instance_fd = -1; 
					fr_table[new_connection]->status = FASTROUTER_STATUS_RECV_HDR;
					fr_table[new_connection]->h_pos = 0;
					fr_table[new_connection]->pos = 0;
					fr_table[new_connection]->un = NULL;
					fr_table[new_connection]->instance_failed = 0;
					fr_table[new_connection]->instance_address_len = 0;
					fr_table[new_connection]->hostname_len = 0;
					fr_table[new_connection]->hostname = NULL;
		
					fr_table[new_connection]->timeout = add_timeout(fr_table[new_connection]);

					event_queue_add_fd_read(ufr.queue, new_connection);
					taken = 1;
					break;
				}
				
				uwsgi_sock = uwsgi_sock->next;
			}	

			if (taken) {
				continue;
			}

			if (interesting_fd == ufr_subserver) {
				len = recv(ufr_subserver, bbuf, 4096, 0);
#ifdef UWSGI_EVENT_USE_PORT
				event_queue_add_fd_read(ufr.queue, ufr_subserver);
#endif
				if (len > 0) {
					memset(&usr, 0, sizeof(struct uwsgi_subscribe_req));
					uwsgi_hooked_parse(bbuf+4, len-4, fastrouter_manage_subscription, &usr);
					if (uwsgi_add_subscribe_node(&ufr.subscriptions, &usr, ufr.subscription_regexp) && ufr.i_am_cheap) {
						struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets;
                                                while(ufr_sock) {
                                                	event_queue_add_fd_read(ufr.queue, ufr_sock->fd);
                                                        ufr_sock = ufr_sock->next;
                                                }
						ufr.i_am_cheap = 0;
						uwsgi_log("[uwsgi-fastrouter] leaving cheap mode...\n");	
					}
				}
			}
			else {
				fr_session = fr_table[interesting_fd];

				// something is going wrong...
				if (fr_session == NULL) continue;

				if (event_queue_interesting_fd_has_error(events, i)) {
					close_session(fr_table, fr_session);
					continue;
				}

				fr_session->timeout = reset_timeout(fr_session);

				switch(fr_session->status) {

					case FASTROUTER_STATUS_RECV_HDR:
						len = recv(fr_session->fd, (char *)(&fr_session->uh) + fr_session->h_pos, 4-fr_session->h_pos, 0);
						if (len <= 0) {
							uwsgi_error("recv()");
							close_session(fr_table, fr_session);
							break;
						}
						fr_session->h_pos += len;
						if (fr_session->h_pos == 4) {
#ifdef UWSGI_DEBUG
							uwsgi_log("modifier1: %d pktsize: %d modifier2: %d\n", fr_session->uh.modifier1, fr_session->uh.pktsize, fr_session->uh.modifier2);
#endif
							fr_session->status = FASTROUTER_STATUS_RECV_VARS;
						}
						break;


					case FASTROUTER_STATUS_RECV_VARS:
                                                len = recv(fr_session->fd, fr_session->buffer + fr_session->pos, fr_session->uh.pktsize - fr_session->pos, 0);
                                                if (len <= 0) {
                                                        uwsgi_error("recv()");
							close_session(fr_table, fr_session);
                                                        break;
                                                }
                                                fr_session->pos += len;
                                                if (fr_session->pos == fr_session->uh.pktsize) {
							if (uwsgi_hooked_parse(fr_session->buffer, fr_session->uh.pktsize, fr_get_hostname, (void *) fr_session)) {
								close_session(fr_table, fr_session);
                                                        	break;
							}

							if (fr_session->hostname_len == 0) {
								close_session(fr_table, fr_session);
                                                        	break;
							}

#ifdef UWSGI_DEBUG
							//uwsgi_log("requested domain %.*s\n", fr_session->hostname_len, fr_session->hostname);
#endif
							if (ufr.use_cache) {
								fr_session->instance_address = uwsgi_cache_get(fr_session->hostname, fr_session->hostname_len, &fr_session->instance_address_len);
								char *cs_mod = uwsgi_str_contains(fr_session->instance_address, fr_session->instance_address_len, ',');
                                                                if (cs_mod) {
                                                                	fr_session->modifier1 = uwsgi_str_num(cs_mod+1, (fr_session->instance_address_len - (cs_mod - fr_session->instance_address))-1);
                                                                        fr_session->instance_address_len = (cs_mod - fr_session->instance_address);
                                                                }
							}
							else if (ufr.pattern) {
								magic_table['s'] = uwsgi_concat2n(fr_session->hostname, fr_session->hostname_len, "", 0);	
								tmp_socket_name = magic_sub(ufr.pattern, ufr.pattern_len, &tmp_socket_name_len, magic_table);
								free(magic_table['s']);
								fr_session->instance_address_len = tmp_socket_name_len;
								fr_session->instance_address = tmp_socket_name;
							}
							else if (ufr.subscription_server) {
								fr_session->un = uwsgi_get_subscribe_node(&ufr.subscriptions, fr_session->hostname, fr_session->hostname_len, ufr.subscription_regexp);
								if (fr_session->un && fr_session->un->len) {
									fr_session->instance_address = fr_session->un->name;
									fr_session->instance_address_len = fr_session->un->len;
									fr_session->modifier1 = fr_session->un->modifier1;
								}
							}
							else if (ufr.base) {
								tmp_socket_name = uwsgi_concat2nn(ufr.base, ufr.base_len, fr_session->hostname, fr_session->hostname_len, &tmp_socket_name_len);
								fr_session->instance_address_len = tmp_socket_name_len;
								fr_session->instance_address = tmp_socket_name;
							}
							else if (ufr.code_string_code && ufr.code_string_function) {
								if (uwsgi.p[ufr.code_string_modifier1]->code_string) {
									fr_session->instance_address = uwsgi.p[ufr.code_string_modifier1]->code_string("uwsgi_fastrouter", ufr.code_string_code, ufr.code_string_function, fr_session->hostname, fr_session->hostname_len);
									if (fr_session->instance_address) {
										fr_session->instance_address_len = strlen(fr_session->instance_address);
										char *cs_mod = uwsgi_str_contains(fr_session->instance_address, fr_session->instance_address_len, ',');
										if (cs_mod) {
											fr_session->modifier1 = uwsgi_str_num(cs_mod+1, (fr_session->instance_address_len - (cs_mod - fr_session->instance_address))-1);
											fr_session->instance_address_len = (cs_mod - fr_session->instance_address);
										}
									}
								}
							}

							// no address found
							if (!fr_session->instance_address_len) {
								close_session(fr_table, fr_session);
                                                        	break;
							}


							fr_session->pass_fd = is_unix(fr_session->instance_address, fr_session->instance_address_len);

							fr_session->instance_fd = uwsgi_connectn(fr_session->instance_address, fr_session->instance_address_len, 0, 1);

							if (tmp_socket_name) free(tmp_socket_name);

							if (fr_session->instance_fd < 0) {
								/*
								if (ufr.subscription_server) {
									if (fr_session->un && fr_session->un->len > 0) { 
	                                                        		uwsgi_log("[uwsgi-fastrouter] %.*s => marking %.*s as failed\n", (int) fr_session->hostname_len, fr_session->hostname, (int) fr_session->instance_address_len,fr_session->instance_address);
                                						uwsgi_remove_subscribe_node(&ufr.subscriptions, fr_session->un);	 
										if (ufr.subscriptions == NULL && ufr.cheap && !ufr.i_am_cheap) {
											uwsgi_log("[uwsgi-fastrouter] no more nodes available. Going cheap...\n");
											struct uwsgi_fastrouter_socket *ufr_sock = ufr.sockets;	
											while(ufr_sock) {
												event_queue_del_fd(ufr.queue, ufr_sock->fd, event_queue_read());	
												ufr_sock = ufr_sock->next;
											}
											ufr.i_am_cheap = 1;
										}
                        						}
	                                                        }
								*/
								fr_session->instance_failed = 1;
								close_session(fr_table, fr_session);
                                                        	break;
							}


							fr_session->status = FASTROUTER_STATUS_CONNECTING;
							fr_table[fr_session->instance_fd] = fr_session;
							event_queue_add_fd_write(ufr.queue, fr_session->instance_fd);
                                                }
                                                break;



					case FASTROUTER_STATUS_CONNECTING:
						
						if (interesting_fd == fr_session->instance_fd) {

							if (getsockopt(fr_session->instance_fd, SOL_SOCKET, SO_ERROR, (void *) (&soopt), &solen) < 0) {
                                                		uwsgi_error("getsockopt()");
								fr_session->instance_failed = 1;
								close_session(fr_table, fr_session);
                                                        	break;
                                        		}

							if (soopt) {
								uwsgi_log("unable to connect() to uwsgi instance: %s\n", strerror(soopt));
								fr_session->instance_failed = 1;
								close_session(fr_table, fr_session);
                                                        	break;
							}

							fr_session->uh.modifier1 = fr_session->modifier1;

							iov[0].iov_base = &fr_session->uh;
							iov[0].iov_len = 4;
							iov[1].iov_base = fr_session->buffer;
							iov[1].iov_len = fr_session->uh.pktsize;

							// increment node requests counter
							if (fr_session->un)
								fr_session->un->requests++;

							// fd passing: PERFORMANCE EXTREME BOOST !!!
							if (fr_session->pass_fd && !uwsgi.no_fd_passing) {
								msg.msg_name    = NULL;
                						msg.msg_namelen = 0;
                						msg.msg_iov     = iov;
                						msg.msg_iovlen  = 2;
                						msg.msg_flags   = 0;
                						msg.msg_control    = &msg_control;
                						msg.msg_controllen = sizeof (msg_control);

                						cmsg = CMSG_FIRSTHDR (&msg);
                						cmsg->cmsg_len   = CMSG_LEN (sizeof (int));
                						cmsg->cmsg_level = SOL_SOCKET;
                						cmsg->cmsg_type  = SCM_RIGHTS;

								memcpy(CMSG_DATA(cmsg), &fr_session->fd, sizeof(int));

                						if (sendmsg(fr_session->instance_fd, &msg, 0) < 0) {
									uwsgi_error("sendmsg()");
								}

								close_session(fr_table, fr_session);	
                                                                break;
							}

							if (writev(fr_session->instance_fd, iov, 2) < 0) {
								uwsgi_error("writev()");
								close_session(fr_table, fr_session);
                                                        	break;
							}

							event_queue_fd_write_to_read(ufr.queue, fr_session->instance_fd);
							fr_session->status = FASTROUTER_STATUS_RESPONSE;
						}

						break;

					case FASTROUTER_STATUS_RESPONSE:
						
						// data from instance
						if (interesting_fd == fr_session->instance_fd) {
							len = recv(fr_session->instance_fd, fr_session->buffer, 0xffff, 0);
							if (len <= 0) {
								if (len < 0) uwsgi_error("recv()");
								close_session(fr_table, fr_session);
                                                        	break;
							}

							len = send(fr_session->fd, fr_session->buffer, len, 0);
							
							if (len <= 0) {
								if (len < 0) uwsgi_error("send()");
								close_session(fr_table, fr_session);
                                                        	break;
							}

							// update transfer statistics
							if (fr_session->un)
								fr_session->un->transferred += len;
						}
						// body from client
						else if (interesting_fd == fr_session->fd) {

							//uwsgi_log("receiving body...\n");
							len = recv(fr_session->fd, fr_session->buffer, 0xffff, 0);
							if (len <= 0) {
								if (len < 0) uwsgi_error("recv()");
								close_session(fr_table, fr_session);
                                                        	break;
							}


							len = send(fr_session->instance_fd, fr_session->buffer, len, 0);
							
							if (len <= 0) {
								if (len < 0) uwsgi_error("send()");
								close_session(fr_table, fr_session);
                                                        	break;
							}
						}

						break;



					// fallback to destroy !!!
					default:
						uwsgi_log("unknown event: closing session\n");
						close_session(fr_table, fr_session);
						break;
					
				}
			}

		}
	}
}
Esempio n. 28
0
int spool_request(struct uwsgi_spooler *uspool, char *filename, int rn, int core_id, char *buffer, int size, char *priority, time_t at, char *body, size_t body_len) {

    struct timeval tv;
    int fd;
    struct uwsgi_header uh;

    if (!uspool) {
        uspool = uwsgi.spoolers;
    }

    // this lock is for threads, the pid value in filename will avoid multiprocess races
    uwsgi_lock(uspool->lock);

    gettimeofday(&tv, NULL);

    if (priority) {
        if (snprintf(filename, 1024, "%s/%s", uspool->dir, priority) <= 0) {
            uwsgi_unlock(uspool->lock);
            return 0;
        }
        // no need to check for errors...
        (void) mkdir(filename, 0777);

        if (snprintf(filename, 1024, "%s/%s/uwsgi_spoolfile_on_%s_%d_%d_%d_%llu_%llu", uspool->dir, priority, uwsgi.hostname, (int) getpid(), rn, core_id, (unsigned long long) tv.tv_sec, (unsigned long long) tv.tv_usec) <= 0) {
            uwsgi_unlock(uspool->lock);
            return 0;
        }
    }
    else {
        if (snprintf(filename, 1024, "%s/uwsgi_spoolfile_on_%s_%d_%d_%d_%llu_%llu", uspool->dir, uwsgi.hostname, (int) getpid(), rn, core_id, (unsigned long long) tv.tv_sec, (unsigned long long) tv.tv_usec) <= 0) {
            uwsgi_unlock(uspool->lock);
            return 0;
        }
    }

    fd = open(filename, O_CREAT | O_EXCL | O_WRONLY, S_IRUSR | S_IWUSR);
    if (fd < 0) {
        uwsgi_error_open(filename);
        uwsgi_unlock(uspool->lock);
        return 0;
    }

    // now lock the file, it will no be runnable, until the lock is not removed
    // a race could come if the spooler take the file before fcntl is called
    // in such case the spooler will detect a zeroed file and will retry later
    if (uwsgi_fcntl_lock(fd)) {
        close(fd);
        uwsgi_unlock(uspool->lock);
        return 0;
    }

    uh.modifier1 = 17;
    uh.modifier2 = 0;
    uh.pktsize = (uint16_t) size;
#ifdef __BIG_ENDIAN__
    uh.pktsize = uwsgi_swap16(uh.pktsize);
#endif

    if (write(fd, &uh, 4) != 4) {
        goto clear;
    }

    if (write(fd, buffer, size) != size) {
        goto clear;
    }

    if (body && body_len > 0) {
        if ((size_t)write(fd, body, body_len) != body_len) {
            goto clear;
        }
    }

    if (at > 0) {
        struct timeval tv[2];
        tv[0].tv_sec = at;
        tv[0].tv_usec = 0;
        tv[1].tv_sec = at;
        tv[1].tv_usec = 0;
#ifdef __sun__
        if (futimesat(fd, NULL, tv)) {
#else
        if (futimes(fd, tv)) {
#endif
            uwsgi_error("futimes()");
        }
    }

    // here the file will be unlocked too
    close(fd);

    if (!uwsgi.spooler_quiet)
        uwsgi_log("[spooler] written %d bytes to file %s\n", size + body_len + 4, filename);

    // and here waiting threads can continue
    uwsgi_unlock(uspool->lock);

    /*	wake up the spoolers attached to the specified dir ... (HACKY)
    	no need to fear races, as USR1 is harmless an all of the uWSGI processes...
    	it could be a problem if a new process takes the old pid, but modern systems should avoid that
    */

    struct uwsgi_spooler *spoolers = uwsgi.spoolers;
    while(spoolers) {
        if (!strcmp(spoolers->dir, uspool->dir)) {
            if (spoolers->pid > 0 && spoolers->running == 0) {
                (void) kill(spoolers->pid, SIGUSR1);
            }
        }
        spoolers = spoolers->next;
    }

    return 1;


clear:
    uwsgi_unlock(uspool->lock);
    uwsgi_error("write()");
    if (unlink(filename)) {
        uwsgi_error("unlink()");
    }
    // unlock the file too
    close(fd);
    return 0;
}



void spooler(struct uwsgi_spooler *uspool) {

    // prevent process blindly reading stdin to make mess
    int nullfd;

    // asked by Marco Beri
#ifdef __HAIKU__
#ifdef UWSGI_DEBUG
    uwsgi_log("lowering spooler priority to %d\n", B_LOW_PRIORITY);
#endif
    set_thread_priority(find_thread(NULL), B_LOW_PRIORITY);
#else
#ifdef UWSGI_DEBUG
    uwsgi_log("lowering spooler priority to %d\n", PRIO_MAX);
#endif
    setpriority(PRIO_PROCESS, getpid(), PRIO_MAX);
#endif

    nullfd = open("/dev/null", O_RDONLY);
    if (nullfd < 0) {
        uwsgi_error_open("/dev/null");
        exit(1);
    }

    if (nullfd != 0) {
        dup2(nullfd, 0);
        close(nullfd);
    }

    int spooler_event_queue = event_queue_init();
    int interesting_fd = -1;

    if (uwsgi.master_process) {
        event_queue_add_fd_read(spooler_event_queue, uwsgi.shared->spooler_signal_pipe[1]);
    }

    // reset the tasks counter
    uspool->tasks = 0;

    for (;;) {


        if (chdir(uspool->dir)) {
            uwsgi_error("chdir()");
            exit(1);
        }

        if (uwsgi.spooler_ordered) {
#ifdef __linux__
            spooler_scandir(uspool, NULL);
#else
            spooler_readdir(uspool, NULL);
#endif
        }
        else {
            spooler_readdir(uspool, NULL);
        }

        int timeout = uwsgi.shared->spooler_frequency;
        if (wakeup > 0) {
            timeout = 0;
        }

        if (event_queue_wait(spooler_event_queue, timeout, &interesting_fd) > 0) {
            if (uwsgi.master_process) {
                if (interesting_fd == uwsgi.shared->spooler_signal_pipe[1]) {
                    uwsgi_receive_signal(interesting_fd, "spooler", (int) getpid());
                }
            }
        }

        // avoid races
        uint64_t tmp_wakeup = wakeup;
        if (tmp_wakeup > 0) {
            tmp_wakeup--;
        }
        wakeup = tmp_wakeup;

        // need to recycle ?
        if (uwsgi.spooler_max_tasks > 0 && uspool->tasks >= (uint64_t)uwsgi.spooler_max_tasks) {
            uwsgi_log("[spooler %s pid: %d] maximum number of tasks reached (%d) recycling ...\n", uspool->dir, (int) uwsgi.mypid, uwsgi.spooler_max_tasks);
            end_me(0);
        }

    }
}
Esempio n. 29
0
void async_loop() {

	if (uwsgi.async < 2) {
		uwsgi_log("the async loop engine requires async mode (--async <n>)\n");
		exit(1);
	}

	int interesting_fd, i;
	struct uwsgi_rb_timer *min_timeout;
	int timeout;
	int is_a_new_connection;
	int proto_parser_status;

	uint64_t now;

	struct uwsgi_async_request *current_request = NULL;

	void *events = event_queue_alloc(64);
	struct uwsgi_socket *uwsgi_sock;

	uwsgi.async_runqueue = NULL;

	uwsgi.wait_write_hook = async_wait_fd_write;
        uwsgi.wait_read_hook = async_wait_fd_read;

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket);
	}

	// set a default request manager
	if (!uwsgi.schedule_to_req)
		uwsgi.schedule_to_req = async_schedule_to_req;

	if (!uwsgi.schedule_to_main) {
		uwsgi_log("*** DANGER *** async mode without coroutine/greenthread engine loaded !!!\n");
	}

	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		now = (uint64_t) uwsgi_now();
		if (uwsgi.async_runqueue) {
			timeout = 0;
		}
		else {
			min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL);
			if (min_timeout) {
				timeout = min_timeout->value - now;
				if (timeout <= 0) {
					async_expire_timeouts(now);
					timeout = 0;
				}
			}
			else {
				timeout = -1;
			}
		}

		uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64);

		now = (uint64_t) uwsgi_now();
		// timeout ???
		if (uwsgi.async_nevents == 0) {
			async_expire_timeouts(now);
		}


		for (i = 0; i < uwsgi.async_nevents; i++) {
			// manage events
			interesting_fd = event_queue_interesting_fd(events, i);

			// signals are executed in the main stack... in the future we could have dedicated stacks for them
			if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) {
				uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid);
				continue;
			}

			is_a_new_connection = 0;

			// new request coming in ?
			uwsgi_sock = uwsgi.sockets;
			while (uwsgi_sock) {

				if (interesting_fd == uwsgi_sock->fd) {

					is_a_new_connection = 1;

					uwsgi.wsgi_req = find_first_available_wsgi_req();
					if (uwsgi.wsgi_req == NULL) {
						uwsgi_async_queue_is_full((time_t)now);
						break;
					}

					// on error re-insert the request in the queue
					wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock);
					if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (wsgi_req_async_recv(uwsgi.wsgi_req)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					// by default the core is in UWSGI_AGAIN mode
					uwsgi.wsgi_req->async_status = UWSGI_AGAIN;
					// some protocol (like zeromq) do not need additional parsing, just push it in the runqueue
					if (uwsgi.wsgi_req->do_not_add_to_async_queue) {
						runqueue_push(uwsgi.wsgi_req);
					}

					break;
				}

				uwsgi_sock = uwsgi_sock->next;
			}

			if (!is_a_new_connection) {
				// proto event
				uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd);
				if (uwsgi.wsgi_req) {
					proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req);
					// reset timeout
					async_reset_request(uwsgi.wsgi_req);
					// parsing complete
					if (!proto_parser_status) {
						// remove fd from event poll and fd proto table 
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read());
						// put request in the runqueue
						runqueue_push(uwsgi.wsgi_req);
						continue;
					}
					else if (proto_parser_status < 0) {
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						close(interesting_fd);
						continue;
					}
					// re-add timer
					async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]);
					continue;
				}

				// app-registered event
				uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd);
				// unknown fd, remove it (for safety)
				if (uwsgi.wsgi_req == NULL) {
					close(interesting_fd);
					continue;
				}

				// remove all the fd monitors and timeout
				async_reset_request(uwsgi.wsgi_req);
				uwsgi.wsgi_req->async_ready_fd = 1;
				uwsgi.wsgi_req->async_last_ready_fd = interesting_fd;

				// put the request in the runqueue again
				runqueue_push(uwsgi.wsgi_req);
			}
		}


		// event queue managed, give cpu to runqueue
		current_request = uwsgi.async_runqueue;

		while(current_request) {

			// current_request could be nulled on error/end of request
			struct uwsgi_async_request *next_request = current_request->next;

			uwsgi.wsgi_req = current_request->wsgi_req;
			uwsgi.schedule_to_req();
			uwsgi.wsgi_req->switches++;

			// request ended ?
			if (uwsgi.wsgi_req->async_status <= UWSGI_OK ||
				uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) {
				// remove from the runqueue
				runqueue_remove(current_request);
			}
			current_request = next_request;
		}

	}

}
Esempio n. 30
0
int master_loop(char **argv, char **environ) {

	uint64_t tmp_counter;

	struct timeval last_respawn;
	int last_respawn_rate = 0;

	int pid_found = 0;

	pid_t diedpid;
	int waitpid_status;

	uint8_t uwsgi_signal;

	time_t last_request_timecheck = 0, now = 0;
	uint64_t last_request_count = 0;

	pthread_t logger_thread;
	pthread_t cache_sweeper;

#ifdef UWSGI_UDP
	int udp_fd = -1;
#ifdef UWSGI_MULTICAST
	char *cluster_opt_buf = NULL;
	size_t cluster_opt_size = 4;
#endif
#endif



#ifdef UWSGI_SNMP
	int snmp_fd = -1;
#endif
	int i = 0;
	int rlen;

	int check_interval = 1;

	struct uwsgi_rb_timer *min_timeout;
	struct rb_root *rb_timers = uwsgi_init_rb_timer();


	if (uwsgi.procname_master) {
		uwsgi_set_processname(uwsgi.procname_master);
	}
	else if (uwsgi.procname) {
		uwsgi_set_processname(uwsgi.procname);
	}
	else if (uwsgi.auto_procname) {
		uwsgi_set_processname("uWSGI master");
	}


	uwsgi.current_time = uwsgi_now();

	uwsgi_unix_signal(SIGTSTP, suspend_resume_them_all);
	uwsgi_unix_signal(SIGHUP, grace_them_all);
	if (uwsgi.die_on_term) {
		uwsgi_unix_signal(SIGTERM, kill_them_all);
		uwsgi_unix_signal(SIGQUIT, reap_them_all);
	}
	else {
		uwsgi_unix_signal(SIGTERM, reap_them_all);
		uwsgi_unix_signal(SIGQUIT, kill_them_all);
	}
	uwsgi_unix_signal(SIGINT, kill_them_all);
	uwsgi_unix_signal(SIGUSR1, stats);
	if (uwsgi.auto_snapshot) {
		uwsgi_unix_signal(SIGURG, uwsgi_restore_auto_snapshot);
	}

	atexit(uwsgi_master_cleanup_hooks);

	uwsgi.master_queue = event_queue_init();

	/* route signals to workers... */
#ifdef UWSGI_DEBUG
	uwsgi_log("adding %d to signal poll\n", uwsgi.shared->worker_signal_pipe[0]);
#endif
	event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_signal_pipe[0]);

#ifdef UWSGI_SPOOLER
	if (uwsgi.spoolers) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->spooler_signal_pipe[0]);
	}
#endif

	if (uwsgi.mules_cnt > 0) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->mule_signal_pipe[0]);
	}

	if (uwsgi.log_master) {
		uwsgi.log_master_buf = uwsgi_malloc(uwsgi.log_master_bufsize);
		if (!uwsgi.threaded_logger) {
#ifdef UWSGI_DEBUG
			uwsgi_log("adding %d to master logging\n", uwsgi.shared->worker_log_pipe[0]);
#endif
			event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]);
		}
		else {
			if (pthread_create(&logger_thread, NULL, logger_thread_loop, NULL)) {
				uwsgi_error("pthread_create()");
				uwsgi_log("falling back to non-threaded logger...\n");
				event_queue_add_fd_read(uwsgi.master_queue, uwsgi.shared->worker_log_pipe[0]);
				uwsgi.threaded_logger = 0;
			}
		}

#ifdef UWSGI_ALARM
		// initialize the alarm subsystem
		uwsgi_alarms_init();
#endif
	}

	if (uwsgi.cache_max_items > 0 && !uwsgi.cache_no_expire) {
		if (pthread_create(&cache_sweeper, NULL, cache_sweeper_loop, NULL)) {
			uwsgi_error("pthread_create()");
			uwsgi_log("unable to run the cache sweeper !!!\n");
		}
		else {
			uwsgi_log("cache sweeper thread enabled\n");
		}
	}


	uwsgi.wsgi_req->buffer = uwsgi.workers[0].cores[0].buffer;

	if (uwsgi.has_emperor) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.emperor_fd);
	}

	if (uwsgi.zerg_server) {
		uwsgi.zerg_server_fd = bind_to_unix(uwsgi.zerg_server, uwsgi.listen_queue, 0, 0);
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.zerg_server_fd);
		uwsgi_log("*** Zerg server enabled on %s ***\n", uwsgi.zerg_server);
	}

	if (uwsgi.stats) {
		char *tcp_port = strchr(uwsgi.stats, ':');
		if (tcp_port) {
			// disable deferred accept for this socket
			int current_defer_accept = uwsgi.no_defer_accept;
			uwsgi.no_defer_accept = 1;
			uwsgi.stats_fd = bind_to_tcp(uwsgi.stats, uwsgi.listen_queue, tcp_port);
			uwsgi.no_defer_accept = current_defer_accept;
		}
		else {
			uwsgi.stats_fd = bind_to_unix(uwsgi.stats, uwsgi.listen_queue, uwsgi.chmod_socket, uwsgi.abstract_socket);
		}

		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.stats_fd);
		uwsgi_log("*** Stats server enabled on %s fd: %d ***\n", uwsgi.stats, uwsgi.stats_fd);
	}
#ifdef UWSGI_UDP
	if (uwsgi.udp_socket) {
		udp_fd = bind_to_udp(uwsgi.udp_socket, 0, 0);
		if (udp_fd < 0) {
			uwsgi_log("unable to bind to udp socket. SNMP and cluster management services will be disabled.\n");
		}
		else {
			uwsgi_log("UDP server enabled.\n");
			event_queue_add_fd_read(uwsgi.master_queue, udp_fd);
		}
	}

#ifdef UWSGI_MULTICAST
	if (uwsgi.cluster) {
		event_queue_add_fd_read(uwsgi.master_queue, uwsgi.cluster_fd);
		cluster_opt_buf = uwsgi_setup_clusterbuf(&cluster_opt_size);
	}
#endif
#endif

#ifdef UWSGI_SNMP
	snmp_fd = uwsgi_setup_snmp();
#endif

	if (uwsgi.cheap) {
		uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1);
		for (i = 1; i <= uwsgi.numproc; i++) {
			uwsgi.workers[i].cheaped = 1;
		}
		uwsgi_log("cheap mode enabled: waiting for socket connection...\n");
	}


	// spawn mules
	for (i = 0; i < uwsgi.mules_cnt; i++) {
		size_t mule_patch_size = 0;
		uwsgi.mules[i].patch = uwsgi_string_get_list(&uwsgi.mules_patches, i, &mule_patch_size);
		uwsgi_mule(i + 1);
	}

	// spawn gateways
	for (i = 0; i < ushared->gateways_cnt; i++) {
		if (ushared->gateways[i].pid == 0) {
			gateway_respawn(i);
		}
	}

	// spawn daemons
	uwsgi_daemons_spawn_all();

	// first subscription
	struct uwsgi_string_list *subscriptions = uwsgi.subscriptions;
	while (subscriptions) {
		uwsgi_subscribe(subscriptions->value, 0);
		subscriptions = subscriptions->next;
	}

	// sync the cache store if needed
	if (uwsgi.cache_store && uwsgi.cache_filesize) {
		if (msync(uwsgi.cache_items, uwsgi.cache_filesize, MS_ASYNC)) {
			uwsgi_error("msync()");
		}
	}

	if (uwsgi.queue_store && uwsgi.queue_filesize) {
		if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
			uwsgi_error("msync()");
		}
	}

	// update touches timestamps
	uwsgi_check_touches(uwsgi.touch_reload);
	uwsgi_check_touches(uwsgi.touch_logrotate);
	uwsgi_check_touches(uwsgi.touch_logreopen);

	// setup cheaper algos
	uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
	if (uwsgi.requested_cheaper_algo) {
		uwsgi.cheaper_algo = NULL;
		struct uwsgi_cheaper_algo *uca = uwsgi.cheaper_algos;
		while (uca) {
			if (!strcmp(uca->name, uwsgi.requested_cheaper_algo)) {
				uwsgi.cheaper_algo = uca->func;
				break;
			}
			uca = uca->next;
		}

		if (!uwsgi.cheaper_algo) {
			uwsgi_log("unable to find requested cheaper algorithm, falling back to spare\n");
			uwsgi.cheaper_algo = uwsgi_cheaper_algo_spare;
		}

	}

	// here really starts the master loop

	for (;;) {
		//uwsgi_log("uwsgi.ready_to_reload %d %d\n", uwsgi.ready_to_reload, uwsgi.numproc);

		// run master_cycle hook for every plugin
		for (i = 0; i < uwsgi.gp_cnt; i++) {
			if (uwsgi.gp[i]->master_cycle) {
				uwsgi.gp[i]->master_cycle();
			}
		}
		for (i = 0; i < 256; i++) {
			if (uwsgi.p[i]->master_cycle) {
				uwsgi.p[i]->master_cycle();
			}
		}

		uwsgi_daemons_smart_check();

		// count the number of active workers
		int active_workers = 0;
                for (i = 1; i <= uwsgi.numproc; i++) {
                        if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) {
                                active_workers++;
                        }
                }


		if (uwsgi.to_outworld) {
			//uwsgi_log("%d/%d\n", uwsgi.lazy_respawned, uwsgi.numproc);
			if (uwsgi.lazy_respawned >= active_workers) {
				uwsgi.to_outworld = 0;
				uwsgi.master_mercy = 0;
				uwsgi.lazy_respawned = 0;
			}
		}


		if (uwsgi_master_check_mercy())
			return 0;

		if (uwsgi.respawn_workers) {
			for (i = 1; i <= uwsgi.respawn_workers; i++) {
				if (uwsgi_respawn_worker(i))
					return 0;
			}

			uwsgi.respawn_workers = 0;
		}

		if (uwsgi.restore_snapshot) {
			uwsgi_master_restore_snapshot();
			continue;
		}

		// cheaper management
		if (uwsgi.cheaper && !uwsgi.cheap && !uwsgi.to_heaven && !uwsgi.to_hell && !uwsgi.to_outworld && !uwsgi.workers[0].suspended) {
			if (!uwsgi_calc_cheaper())
				return 0;
		}

		if ((uwsgi.cheap || uwsgi.ready_to_die >= active_workers) && uwsgi.to_hell) {
			// call a series of waitpid to ensure all processes (gateways, mules and daemons) are dead
			for (i = 0; i < (ushared->gateways_cnt + uwsgi.daemons_cnt + uwsgi.mules_cnt); i++) {
				diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
			}

			uwsgi_log("goodbye to uWSGI.\n");
			exit(0);
		}

		if ((uwsgi.cheap || uwsgi.ready_to_reload >= active_workers) && uwsgi.to_heaven) {
			uwsgi_reload(argv);
			// never here (unless in shared library mode)
			return -1;
		}

		diedpid = waitpid(WAIT_ANY, &waitpid_status, WNOHANG);
		if (diedpid == -1) {
			if (errno == ECHILD) {
				// something did not work as expected, just assume all has been cleared
				if (uwsgi.to_heaven) {
					uwsgi.ready_to_reload = uwsgi.numproc;
					continue;
				}
				else if (uwsgi.to_hell) {
					uwsgi.ready_to_die = uwsgi.numproc;
					continue;
				}
				else if (uwsgi.to_outworld) {
					uwsgi.lazy_respawned = uwsgi.numproc;
					uwsgi_log("*** no workers to reload found ***\n");
					continue;
				}
				diedpid = 0;
			}
			else {
				uwsgi_error("waitpid()");
				/* here is better to reload all the uWSGI stack */
				uwsgi_log("something horrible happened...\n");
				reap_them_all(0);
				exit(1);
			}
		}

		if (diedpid == 0) {

			/* all processes ok, doing status scan after N seconds */
			check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
			if (!check_interval)
				check_interval = 1;


			// add unregistered file monitors
			// locking is not needed as monitors can only increase
			for (i = 0; i < ushared->files_monitored_cnt; i++) {
				if (!ushared->files_monitored[i].registered) {
					ushared->files_monitored[i].fd = event_queue_add_file_monitor(uwsgi.master_queue, ushared->files_monitored[i].filename, &ushared->files_monitored[i].id);
					ushared->files_monitored[i].registered = 1;
				}
			}


			// add unregistered timers
			// locking is not needed as timers can only increase
			for (i = 0; i < ushared->timers_cnt; i++) {
				if (!ushared->timers[i].registered) {
					ushared->timers[i].fd = event_queue_add_timer(uwsgi.master_queue, &ushared->timers[i].id, ushared->timers[i].value);
					ushared->timers[i].registered = 1;
				}
			}

			// add unregistered rb_timers
			// locking is not needed as rb_timers can only increase
			for (i = 0; i < ushared->rb_timers_cnt; i++) {
				if (!ushared->rb_timers[i].registered) {
					ushared->rb_timers[i].uwsgi_rb_timer = uwsgi_add_rb_timer(rb_timers, uwsgi_now() + ushared->rb_timers[i].value, &ushared->rb_timers[i]);
					ushared->rb_timers[i].registered = 1;
				}
			}

			int interesting_fd = -1;

			if (ushared->rb_timers_cnt > 0) {
				min_timeout = uwsgi_min_rb_timer(rb_timers);
				if (min_timeout == NULL) {
					check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
				}
				else {
					check_interval = min_timeout->key - uwsgi_now();
					if (check_interval <= 0) {
						expire_rb_timeouts(rb_timers);
						check_interval = 0;
					}
				}
			}

			// wait for event
			rlen = event_queue_wait(uwsgi.master_queue, check_interval, &interesting_fd);

			if (rlen == 0) {
				if (ushared->rb_timers_cnt > 0) {
					expire_rb_timeouts(rb_timers);
				}
			}


			// check uwsgi-cron table
			if (ushared->cron_cnt) {
				uwsgi_manage_signal_cron(uwsgi_now());
			}

			if (uwsgi.crons) {
				uwsgi_manage_command_cron(uwsgi_now());
			}


			// check for probes
			if (ushared->probes_cnt > 0) {
				uwsgi_lock(uwsgi.probe_table_lock);
				for (i = 0; i < ushared->probes_cnt; i++) {
					if (interesting_fd == -1) {
						// increment cycles
						ushared->probes[i].cycles++;
					}
					if (ushared->probes[i].func(interesting_fd, &ushared->probes[i])) {
						uwsgi_route_signal(ushared->probes[i].sig);
					}
				}
				uwsgi_unlock(uwsgi.probe_table_lock);
			}

			if (rlen > 0) {

				if (uwsgi.log_master && !uwsgi.threaded_logger) {
					if (interesting_fd == uwsgi.shared->worker_log_pipe[0]) {
						uwsgi_master_log();
						goto health_cycle;
					}
				}

				if (uwsgi.stats && uwsgi.stats_fd > -1) {
					if (interesting_fd == uwsgi.stats_fd) {
						uwsgi_send_stats(uwsgi.stats_fd);
						goto health_cycle;
					}
				}

				if (uwsgi.zerg_server) {
					if (interesting_fd == uwsgi.zerg_server_fd) {
						uwsgi_manage_zerg(uwsgi.zerg_server_fd, 0, NULL);
						goto health_cycle;
					}
				}

				if (uwsgi.has_emperor) {
					if (interesting_fd == uwsgi.emperor_fd) {
						uwsgi_master_manage_emperor();
						goto health_cycle;
					}
				}


				if (uwsgi.cheap) {
					int found = 0;
					struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
					while (uwsgi_sock) {
						if (interesting_fd == uwsgi_sock->fd) {
							found = 1;
							uwsgi.cheap = 0;
							uwsgi_del_sockets_from_queue(uwsgi.master_queue);
							int needed = uwsgi.numproc;
							if (uwsgi.cheaper) {
								needed = uwsgi.cheaper_count;
							}
							for (i = 1; i <= needed; i++) {
								if (uwsgi_respawn_worker(i))
									return 0;
							}
							break;
						}
						uwsgi_sock = uwsgi_sock->next;
					}
					// here is better to continue instead going to health_cycle
					if (found)
						continue;
				}
#ifdef UWSGI_SNMP
				if (uwsgi.snmp_addr && interesting_fd == snmp_fd) {
					uwsgi_master_manage_snmp(snmp_fd);
					goto health_cycle;
				}
#endif

#ifdef UWSGI_UDP
				if (uwsgi.udp_socket && interesting_fd == udp_fd) {
					uwsgi_master_manage_udp(udp_fd);
					goto health_cycle;
				}

#ifdef UWSGI_MULTICAST
				if (interesting_fd == uwsgi.cluster_fd) {

					if (uwsgi_get_dgram(uwsgi.cluster_fd, &uwsgi.workers[0].cores[0].req)) {
						goto health_cycle;
					}

					manage_cluster_message(cluster_opt_buf, cluster_opt_size);

					goto health_cycle;
				}
#endif

#endif


				int next_iteration = 0;

				uwsgi_lock(uwsgi.fmon_table_lock);
				for (i = 0; i < ushared->files_monitored_cnt; i++) {
					if (ushared->files_monitored[i].registered) {
						if (interesting_fd == ushared->files_monitored[i].fd) {
							struct uwsgi_fmon *uf = event_queue_ack_file_monitor(uwsgi.master_queue, interesting_fd);
							// now call the file_monitor handler
							if (uf)
								uwsgi_route_signal(uf->sig);
							break;
						}
					}
				}

				uwsgi_unlock(uwsgi.fmon_table_lock);
				if (next_iteration)
					goto health_cycle;;

				next_iteration = 0;

				uwsgi_lock(uwsgi.timer_table_lock);
				for (i = 0; i < ushared->timers_cnt; i++) {
					if (ushared->timers[i].registered) {
						if (interesting_fd == ushared->timers[i].fd) {
							struct uwsgi_timer *ut = event_queue_ack_timer(interesting_fd);
							// now call the file_monitor handler
							if (ut)
								uwsgi_route_signal(ut->sig);
							break;
						}
					}
				}
				uwsgi_unlock(uwsgi.timer_table_lock);
				if (next_iteration)
					goto health_cycle;;


				// check for worker signal
				if (interesting_fd == uwsgi.shared->worker_signal_pipe[0]) {
					rlen = read(interesting_fd, &uwsgi_signal, 1);
					if (rlen < 0) {
						uwsgi_error("read()");
					}
					else if (rlen > 0) {
#ifdef UWSGI_DEBUG
						uwsgi_log_verbose("received uwsgi signal %d from a worker\n", uwsgi_signal);
#endif
						uwsgi_route_signal(uwsgi_signal);
					}
					else {
						uwsgi_log_verbose("lost connection with worker %d\n", i);
						close(interesting_fd);
					}
					goto health_cycle;
				}

#ifdef UWSGI_SPOOLER
				// check for spooler signal
				if (uwsgi.spoolers) {
					if (interesting_fd == uwsgi.shared->spooler_signal_pipe[0]) {
						rlen = read(interesting_fd, &uwsgi_signal, 1);
						if (rlen < 0) {
							uwsgi_error("read()");
						}
						else if (rlen > 0) {
#ifdef UWSGI_DEBUG
							uwsgi_log_verbose("received uwsgi signal %d from a spooler\n", uwsgi_signal);
#endif
							uwsgi_route_signal(uwsgi_signal);
						}
						else {
							uwsgi_log_verbose("lost connection with the spooler\n");
							close(interesting_fd);
						}
						goto health_cycle;
					}

				}
#endif

				// check for mules signal
				if (uwsgi.mules_cnt > 0) {
					if (interesting_fd == uwsgi.shared->mule_signal_pipe[0]) {
						rlen = read(interesting_fd, &uwsgi_signal, 1);
						if (rlen < 0) {
							uwsgi_error("read()");
						}
						else if (rlen > 0) {
#ifdef UWSGI_DEBUG
							uwsgi_log_verbose("received uwsgi signal %d from a mule\n", uwsgi_signal);
#endif
							uwsgi_route_signal(uwsgi_signal);
						}
						else {
							uwsgi_log_verbose("lost connection with a mule\n");
							close(interesting_fd);
						}
						goto health_cycle;
					}

				}


			}

health_cycle:
			now = uwsgi_now();
			if (now - uwsgi.current_time < 1) {
				continue;
			}
			uwsgi.current_time = now;

			// checking logsize
			if (uwsgi.logfile) {
				uwsgi_check_logrotate();
			}

			// this will be incremented at (more or less) regular intervals
			uwsgi.master_cycles++;

			// recalculate requests counter on race conditions risky configurations
			// a bit of inaccuracy is better than locking;)

			if (uwsgi.numproc > 1) {
				tmp_counter = 0;
				for (i = 1; i < uwsgi.numproc + 1; i++)
					tmp_counter += uwsgi.workers[i].requests;
				uwsgi.workers[0].requests = tmp_counter;
			}

			if (uwsgi.idle > 0 && !uwsgi.cheap) {
				uwsgi.current_time = uwsgi_now();
				if (!last_request_timecheck)
					last_request_timecheck = uwsgi.current_time;

				int busy_workers = 0;
				for (i = 1; i <= uwsgi.numproc; i++) {
                                        if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) {
                                                if (uwsgi.workers[i].busy == 1) {
                                                	busy_workers = 1;
                                                	break;
                                        	}
                                	}
				}

				if (last_request_count != uwsgi.workers[0].requests) {
					last_request_timecheck = uwsgi.current_time;
					last_request_count = uwsgi.workers[0].requests;
				}
				// a bit of over-engeneering to avoid clock skews
				else if (last_request_timecheck < uwsgi.current_time && (uwsgi.current_time - last_request_timecheck > uwsgi.idle) && !busy_workers) {
					uwsgi_log("workers have been inactive for more than %d seconds (%llu-%llu)\n", uwsgi.idle, (unsigned long long) uwsgi.current_time, (unsigned long long) last_request_timecheck);
					uwsgi.cheap = 1;
					if (uwsgi.die_on_idle) {
						if (uwsgi.has_emperor) {
							char byte = 22;
							if (write(uwsgi.emperor_fd, &byte, 1) != 1) {
								uwsgi_error("write()");
								kill_them_all(0);
							}
						}
						else {
							kill_them_all(0);
						}
						continue;
					}
					for (i = 1; i <= uwsgi.numproc; i++) {
						uwsgi.workers[i].cheaped = 1;
						if (uwsgi.workers[i].pid == 0)
							continue;
						kill(uwsgi.workers[i].pid, SIGKILL);
						if (waitpid(uwsgi.workers[i].pid, &waitpid_status, 0) < 0) {
							if (errno != ECHILD)
								uwsgi_error("waitpid()");
						}
					}
					uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1);
					uwsgi_log("cheap mode enabled: waiting for socket connection...\n");
					last_request_timecheck = 0;
					continue;
				}
			}

			check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL];
			if (!check_interval)
				check_interval = 1;


#ifdef __linux__
			// get listen_queue status
			struct uwsgi_socket *uwsgi_sock = uwsgi.sockets;
			while (uwsgi_sock) {
				if (uwsgi_sock->family == AF_INET) {
					get_linux_tcp_info(uwsgi_sock->fd);
				}
#ifdef SIOBKLGQ
				else if (uwsgi_sock->family == AF_UNIX) {
					get_linux_unbit_SIOBKLGQ(uwsgi_sock->fd);
				}
#endif
				uwsgi_sock = uwsgi_sock->next;
			}
#endif

			for (i = 1; i <= uwsgi.numproc; i++) {
				/* first check for harakiri */
				if (uwsgi.workers[i].harakiri > 0) {
					if (uwsgi.workers[i].harakiri < (time_t) uwsgi.current_time) {
						trigger_harakiri(i);
					}
				}
				/* then user-defined harakiri */
				if (uwsgi.workers[i].user_harakiri > 0) {
					if (uwsgi.workers[i].user_harakiri < (time_t) uwsgi.current_time) {
						trigger_harakiri(i);
					}
				}
				// then for evil memory checkers
				if (uwsgi.evil_reload_on_as) {
					if ((rlim_t) uwsgi.workers[i].vsz_size >= uwsgi.evil_reload_on_as) {
						uwsgi_log("*** EVIL RELOAD ON WORKER %d ADDRESS SPACE: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].vsz_size, uwsgi.workers[i].pid);
						kill(uwsgi.workers[i].pid, SIGKILL);
						uwsgi.workers[i].vsz_size = 0;
					}
				}
				if (uwsgi.evil_reload_on_rss) {
					if ((rlim_t) uwsgi.workers[i].rss_size >= uwsgi.evil_reload_on_rss) {
						uwsgi_log("*** EVIL RELOAD ON WORKER %d RSS: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].rss_size, uwsgi.workers[i].pid);
						kill(uwsgi.workers[i].pid, SIGKILL);
						uwsgi.workers[i].rss_size = 0;
					}
				}

				// need to find a better way
				//uwsgi.workers[i].last_running_time = uwsgi.workers[i].running_time;
			}

			for (i = 0; i < ushared->gateways_cnt; i++) {
				if (ushared->gateways_harakiri[i] > 0) {
					if (ushared->gateways_harakiri[i] < (time_t) uwsgi.current_time) {
						if (ushared->gateways[i].pid > 0) {
							kill(ushared->gateways[i].pid, SIGKILL);
						}
						ushared->gateways_harakiri[i] = 0;
					}
				}
			}

			for (i = 0; i < uwsgi.mules_cnt; i++) {
				if (uwsgi.mules[i].harakiri > 0) {
					if (uwsgi.mules[i].harakiri < (time_t) uwsgi.current_time) {
						uwsgi_log("*** HARAKIRI ON MULE %d HANDLING SIGNAL %d (pid: %d) ***\n", i + 1, uwsgi.mules[i].signum, uwsgi.mules[i].pid);
						kill(uwsgi.mules[i].pid, SIGKILL);
						uwsgi.mules[i].harakiri = 0;
					}
				}
			}
#ifdef UWSGI_SPOOLER
			struct uwsgi_spooler *uspool = uwsgi.spoolers;
			while (uspool) {
				if (uspool->harakiri > 0 && uspool->harakiri < (time_t) uwsgi.current_time) {
					uwsgi_log("*** HARAKIRI ON THE SPOOLER (pid: %d) ***\n", uspool->pid);
					kill(uspool->pid, SIGKILL);
					uspool->harakiri = 0;
				}
				uspool = uspool->next;
			}
#endif

#ifdef __linux__
#ifdef MADV_MERGEABLE
			if (uwsgi.linux_ksm > 0 && (uwsgi.master_cycles % uwsgi.linux_ksm) == 0) {
				uwsgi_linux_ksm_map();
			}
#endif
#endif

#ifdef UWSGI_UDP
			// check for cluster nodes
			master_check_cluster_nodes();

			// reannounce myself every 10 cycles
			if (uwsgi.cluster && uwsgi.cluster_fd >= 0 && !uwsgi.cluster_nodes && (uwsgi.master_cycles % 10) == 0) {
				uwsgi_cluster_add_me();
			}

			// resubscribe every 10 cycles by default
			if ((uwsgi.subscriptions && ((uwsgi.master_cycles % uwsgi.subscribe_freq) == 0 || uwsgi.master_cycles == 1)) && !uwsgi.to_heaven && !uwsgi.to_hell && !uwsgi.workers[0].suspended) {
				struct uwsgi_string_list *subscriptions = uwsgi.subscriptions;
				while (subscriptions) {
					uwsgi_subscribe(subscriptions->value, 0);
					subscriptions = subscriptions->next;
				}
			}

#endif

			if (uwsgi.cache_store && uwsgi.cache_filesize && uwsgi.cache_store_sync && ((uwsgi.master_cycles % uwsgi.cache_store_sync) == 0)) {
				if (msync(uwsgi.cache_items, uwsgi.cache_filesize, MS_ASYNC)) {
					uwsgi_error("msync()");
				}
			}

			if (uwsgi.queue_store && uwsgi.queue_filesize && uwsgi.queue_store_sync && ((uwsgi.master_cycles % uwsgi.queue_store_sync) == 0)) {
				if (msync(uwsgi.queue_header, uwsgi.queue_filesize, MS_ASYNC)) {
					uwsgi_error("msync()");
				}
			}

			// check touch_reload
			if (!uwsgi.to_heaven && !uwsgi.to_hell) {
				char *touched = uwsgi_check_touches(uwsgi.touch_reload);
				if (touched) {
					uwsgi_log("*** %s has been touched... grace them all !!! ***\n", touched);
					uwsgi_block_signal(SIGHUP);
					grace_them_all(0);
					uwsgi_unblock_signal(SIGHUP);
				}
			}

			continue;

		}

		// no one died
		if (diedpid <= 0)
			continue;

		// check for deadlocks first
		uwsgi_deadlock_check(diedpid);

		// reload gateways and daemons only on normal workflow (+outworld status)
		if (!uwsgi.to_heaven && !uwsgi.to_hell) {

#ifdef UWSGI_SPOOLER
			/* reload the spooler */
			struct uwsgi_spooler *uspool = uwsgi.spoolers;
			pid_found = 0;
			while (uspool) {
				if (uspool->pid > 0 && diedpid == uspool->pid) {
					uwsgi_log("OOOPS the spooler is no more...trying respawn...\n");
					uspool->respawned++;
					uspool->pid = spooler_start(uspool);
					pid_found = 1;
					break;
				}
				uspool = uspool->next;
			}

			if (pid_found)
				continue;
#endif

			pid_found = 0;
			for (i = 0; i < uwsgi.mules_cnt; i++) {
				if (uwsgi.mules[i].pid == diedpid) {
					uwsgi_log("OOOPS mule %d (pid: %d) crippled...trying respawn...\n", i + 1, uwsgi.mules[i].pid);
					uwsgi_mule(i + 1);
					pid_found = 1;
					break;
				}
			}

			if (pid_found)
				continue;


			/* reload the gateways */
			pid_found = 0;
			for (i = 0; i < ushared->gateways_cnt; i++) {
				if (ushared->gateways[i].pid == diedpid) {
					gateway_respawn(i);
					pid_found = 1;
					break;
				}
			}

			if (pid_found)
				continue;

			/* reload the daemons */
			pid_found = uwsgi_daemon_check_pid_reload(diedpid);

			if (pid_found)
				continue;

		}


		/* What happens here ?

		   case 1) the diedpid is not a worker, report it and continue
		   case 2) the diedpid is a worker and we are not in a reload procedure -> reload it
		   case 3) the diedpid is a worker and we are in graceful reload -> uwsgi.ready_to_reload++ and continue
		   case 3) the diedpid is a worker and we are in brutal reload -> uwsgi.ready_to_die++ and continue


		 */

		uwsgi.mywid = find_worker_id(diedpid);
		if (uwsgi.mywid <= 0) {
			// check spooler, mules, gateways and daemons
#ifdef UWSGI_SPOOLER
			struct uwsgi_spooler *uspool = uwsgi.spoolers;
			while (uspool) {
				if (uspool->pid > 0 && diedpid == uspool->pid) {
					uwsgi_log("spooler (pid: %d) annihilated\n", (int) diedpid);
					goto next;
				}
				uspool = uspool->next;
			}
#endif

			for (i = 0; i < uwsgi.mules_cnt; i++) {
				if (uwsgi.mules[i].pid == diedpid) {
					uwsgi_log("mule %d (pid: %d) annihilated\n", i + 1, (int) diedpid);
					goto next;
				}
			}

			for (i = 0; i < ushared->gateways_cnt; i++) {
				if (ushared->gateways[i].pid == diedpid) {
					uwsgi_log("gateway %d (%s, pid: %d) annihilated\n", i + 1, ushared->gateways[i].fullname, (int) diedpid);
					goto next;
				}
			}

			if (uwsgi_daemon_check_pid_death(diedpid)) goto next;

			if (WIFEXITED(waitpid_status)) {
				uwsgi_log("subprocess %d exited with code %d\n", (int) diedpid, WEXITSTATUS(waitpid_status));
			}
			else if (WIFSIGNALED(waitpid_status)) {
				uwsgi_log("subprocess %d exited by signal %d\n", (int) diedpid, WTERMSIG(waitpid_status));
			}
			else if (WIFSTOPPED(waitpid_status)) {
				uwsgi_log("subprocess %d stopped\n", (int) diedpid);
			}
next:
			continue;
		}


		// ok a worker died...
		if (uwsgi.to_heaven) {
			uwsgi.ready_to_reload++;
			uwsgi.workers[uwsgi.mywid].pid = 0;
			// only to be safe :P
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
			continue;
		}
		else if (uwsgi.to_hell) {
			uwsgi.ready_to_die++;
			uwsgi.workers[uwsgi.mywid].pid = 0;
			// only to be safe :P
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
			continue;
		}
		else if (uwsgi.to_outworld) {
			uwsgi.lazy_respawned++;
			uwsgi.workers[uwsgi.mywid].destroy = 0;
			uwsgi.workers[uwsgi.mywid].pid = 0;
			// only to be safe :P
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
		}

		if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_FAILED_APP_CODE) {
			uwsgi_log("OOPS ! failed loading app in worker %d (pid %d) :( trying again...\n", uwsgi.mywid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_DE_HIJACKED_CODE) {
			uwsgi_log("...restoring worker %d (pid: %d)...\n", uwsgi.mywid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_EXCEPTION_CODE) {
			uwsgi_log("... monitored exception detected, respawning worker %d (pid: %d)...\n", uwsgi.mywid, (int) diedpid);
		}
		else if (WIFEXITED(waitpid_status) && WEXITSTATUS(waitpid_status) == UWSGI_QUIET_CODE) {
			// noop
		}
		else if (uwsgi.workers[uwsgi.mywid].manage_next_request) {
			if (WIFSIGNALED(waitpid_status)) {
				uwsgi_log("DAMN ! worker %d (pid: %d) died, killed by signal %d :( trying respawn ...\n", uwsgi.mywid, (int) diedpid, (int) WTERMSIG(waitpid_status));
			}
			else {
				uwsgi_log("DAMN ! worker %d (pid: %d) died :( trying respawn ...\n", uwsgi.mywid, (int) diedpid);
			}
		}
		else {
                	uwsgi_log("DAMN ! worker %d (pid: %d) MISTERIOUSLY died :( trying respawn ...\n", uwsgi.mywid, (int) diedpid);
		}

		if (uwsgi.workers[uwsgi.mywid].cheaped == 1) {
			uwsgi.workers[uwsgi.mywid].pid = 0;
			uwsgi_log("uWSGI worker %d cheaped.\n", uwsgi.mywid);
			uwsgi.workers[uwsgi.mywid].harakiri = 0;
			continue;
		}
		gettimeofday(&last_respawn, NULL);
		if (last_respawn.tv_sec <= uwsgi.respawn_delta + check_interval) {
			last_respawn_rate++;
			if (last_respawn_rate > uwsgi.numproc) {
				if (uwsgi.forkbomb_delay > 0) {
					uwsgi_log("worker respawning too fast !!! i have to sleep a bit (%d seconds)...\n", uwsgi.forkbomb_delay);
					/* use --forkbomb-delay 0 to disable sleeping */
					sleep(uwsgi.forkbomb_delay);
				}
				last_respawn_rate = 0;
			}
		}
		else {
			last_respawn_rate = 0;
		}
		gettimeofday(&last_respawn, NULL);
		uwsgi.respawn_delta = last_respawn.tv_sec;

		if (uwsgi_respawn_worker(uwsgi.mywid))
			return 0;

		// end of the loop
	}

	// never here
}