Exemple #1
0
PyObject *py_uwsgi_gevent_signal_handler(PyObject * self, PyObject * args) {

    int signal_socket;

    if (!PyArg_ParseTuple(args, "i:uwsgi_gevent_signal_handler", &signal_socket)) {
        return NULL;
    }

    uwsgi_receive_signal(signal_socket, "worker", uwsgi.mywid);

    Py_INCREF(Py_None);
    return Py_None;
}
Exemple #2
0
int spool_request(struct uwsgi_spooler *uspool, char *filename, int rn, int core_id, char *buffer, int size, char *priority, time_t at, char *body, size_t body_len) {

    struct timeval tv;
    int fd;
    struct uwsgi_header uh;

    if (!uspool) {
        uspool = uwsgi.spoolers;
    }

    // this lock is for threads, the pid value in filename will avoid multiprocess races
    uwsgi_lock(uspool->lock);

    gettimeofday(&tv, NULL);

    if (priority) {
        if (snprintf(filename, 1024, "%s/%s", uspool->dir, priority) <= 0) {
            uwsgi_unlock(uspool->lock);
            return 0;
        }
        // no need to check for errors...
        (void) mkdir(filename, 0777);

        if (snprintf(filename, 1024, "%s/%s/uwsgi_spoolfile_on_%s_%d_%d_%d_%llu_%llu", uspool->dir, priority, uwsgi.hostname, (int) getpid(), rn, core_id, (unsigned long long) tv.tv_sec, (unsigned long long) tv.tv_usec) <= 0) {
            uwsgi_unlock(uspool->lock);
            return 0;
        }
    }
    else {
        if (snprintf(filename, 1024, "%s/uwsgi_spoolfile_on_%s_%d_%d_%d_%llu_%llu", uspool->dir, uwsgi.hostname, (int) getpid(), rn, core_id, (unsigned long long) tv.tv_sec, (unsigned long long) tv.tv_usec) <= 0) {
            uwsgi_unlock(uspool->lock);
            return 0;
        }
    }

    fd = open(filename, O_CREAT | O_EXCL | O_WRONLY, S_IRUSR | S_IWUSR);
    if (fd < 0) {
        uwsgi_error_open(filename);
        uwsgi_unlock(uspool->lock);
        return 0;
    }

    // now lock the file, it will no be runnable, until the lock is not removed
    // a race could come if the spooler take the file before fcntl is called
    // in such case the spooler will detect a zeroed file and will retry later
    if (uwsgi_fcntl_lock(fd)) {
        close(fd);
        uwsgi_unlock(uspool->lock);
        return 0;
    }

    uh.modifier1 = 17;
    uh.modifier2 = 0;
    uh.pktsize = (uint16_t) size;
#ifdef __BIG_ENDIAN__
    uh.pktsize = uwsgi_swap16(uh.pktsize);
#endif

    if (write(fd, &uh, 4) != 4) {
        goto clear;
    }

    if (write(fd, buffer, size) != size) {
        goto clear;
    }

    if (body && body_len > 0) {
        if ((size_t)write(fd, body, body_len) != body_len) {
            goto clear;
        }
    }

    if (at > 0) {
        struct timeval tv[2];
        tv[0].tv_sec = at;
        tv[0].tv_usec = 0;
        tv[1].tv_sec = at;
        tv[1].tv_usec = 0;
#ifdef __sun__
        if (futimesat(fd, NULL, tv)) {
#else
        if (futimes(fd, tv)) {
#endif
            uwsgi_error("futimes()");
        }
    }

    // here the file will be unlocked too
    close(fd);

    if (!uwsgi.spooler_quiet)
        uwsgi_log("[spooler] written %d bytes to file %s\n", size + body_len + 4, filename);

    // and here waiting threads can continue
    uwsgi_unlock(uspool->lock);

    /*	wake up the spoolers attached to the specified dir ... (HACKY)
    	no need to fear races, as USR1 is harmless an all of the uWSGI processes...
    	it could be a problem if a new process takes the old pid, but modern systems should avoid that
    */

    struct uwsgi_spooler *spoolers = uwsgi.spoolers;
    while(spoolers) {
        if (!strcmp(spoolers->dir, uspool->dir)) {
            if (spoolers->pid > 0 && spoolers->running == 0) {
                (void) kill(spoolers->pid, SIGUSR1);
            }
        }
        spoolers = spoolers->next;
    }

    return 1;


clear:
    uwsgi_unlock(uspool->lock);
    uwsgi_error("write()");
    if (unlink(filename)) {
        uwsgi_error("unlink()");
    }
    // unlock the file too
    close(fd);
    return 0;
}



void spooler(struct uwsgi_spooler *uspool) {

    // prevent process blindly reading stdin to make mess
    int nullfd;

    // asked by Marco Beri
#ifdef __HAIKU__
#ifdef UWSGI_DEBUG
    uwsgi_log("lowering spooler priority to %d\n", B_LOW_PRIORITY);
#endif
    set_thread_priority(find_thread(NULL), B_LOW_PRIORITY);
#else
#ifdef UWSGI_DEBUG
    uwsgi_log("lowering spooler priority to %d\n", PRIO_MAX);
#endif
    setpriority(PRIO_PROCESS, getpid(), PRIO_MAX);
#endif

    nullfd = open("/dev/null", O_RDONLY);
    if (nullfd < 0) {
        uwsgi_error_open("/dev/null");
        exit(1);
    }

    if (nullfd != 0) {
        dup2(nullfd, 0);
        close(nullfd);
    }

    int spooler_event_queue = event_queue_init();
    int interesting_fd = -1;

    if (uwsgi.master_process) {
        event_queue_add_fd_read(spooler_event_queue, uwsgi.shared->spooler_signal_pipe[1]);
    }

    // reset the tasks counter
    uspool->tasks = 0;

    for (;;) {


        if (chdir(uspool->dir)) {
            uwsgi_error("chdir()");
            exit(1);
        }

        if (uwsgi.spooler_ordered) {
#ifdef __linux__
            spooler_scandir(uspool, NULL);
#else
            spooler_readdir(uspool, NULL);
#endif
        }
        else {
            spooler_readdir(uspool, NULL);
        }

        int timeout = uwsgi.shared->spooler_frequency;
        if (wakeup > 0) {
            timeout = 0;
        }

        if (event_queue_wait(spooler_event_queue, timeout, &interesting_fd) > 0) {
            if (uwsgi.master_process) {
                if (interesting_fd == uwsgi.shared->spooler_signal_pipe[1]) {
                    uwsgi_receive_signal(interesting_fd, "spooler", (int) getpid());
                }
            }
        }

        // avoid races
        uint64_t tmp_wakeup = wakeup;
        if (tmp_wakeup > 0) {
            tmp_wakeup--;
        }
        wakeup = tmp_wakeup;

        // need to recycle ?
        if (uwsgi.spooler_max_tasks > 0 && uspool->tasks >= (uint64_t)uwsgi.spooler_max_tasks) {
            uwsgi_log("[spooler %s pid: %d] maximum number of tasks reached (%d) recycling ...\n", uspool->dir, (int) uwsgi.mypid, uwsgi.spooler_max_tasks);
            end_me(0);
        }

    }
}
Exemple #3
0
void async_loop() {

	if (uwsgi.async < 2) {
		uwsgi_log("the async loop engine requires async mode (--async <n>)\n");
		exit(1);
	}

	int interesting_fd, i;
	struct uwsgi_rb_timer *min_timeout;
	int timeout;
	int is_a_new_connection;
	int proto_parser_status;

	uint64_t now;

	struct uwsgi_async_request *current_request = NULL;

	void *events = event_queue_alloc(64);
	struct uwsgi_socket *uwsgi_sock;

	uwsgi.async_runqueue = NULL;

	uwsgi.wait_write_hook = async_wait_fd_write;
        uwsgi.wait_read_hook = async_wait_fd_read;

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket);
	}

	// set a default request manager
	if (!uwsgi.schedule_to_req)
		uwsgi.schedule_to_req = async_schedule_to_req;

	if (!uwsgi.schedule_to_main) {
		uwsgi_log("*** DANGER *** async mode without coroutine/greenthread engine loaded !!!\n");
	}

	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		now = (uint64_t) uwsgi_now();
		if (uwsgi.async_runqueue) {
			timeout = 0;
		}
		else {
			min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL);
			if (min_timeout) {
				timeout = min_timeout->value - now;
				if (timeout <= 0) {
					async_expire_timeouts(now);
					timeout = 0;
				}
			}
			else {
				timeout = -1;
			}
		}

		uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64);

		now = (uint64_t) uwsgi_now();
		// timeout ???
		if (uwsgi.async_nevents == 0) {
			async_expire_timeouts(now);
		}


		for (i = 0; i < uwsgi.async_nevents; i++) {
			// manage events
			interesting_fd = event_queue_interesting_fd(events, i);

			// signals are executed in the main stack... in the future we could have dedicated stacks for them
			if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) {
				uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid);
				continue;
			}

			is_a_new_connection = 0;

			// new request coming in ?
			uwsgi_sock = uwsgi.sockets;
			while (uwsgi_sock) {

				if (interesting_fd == uwsgi_sock->fd) {

					is_a_new_connection = 1;

					uwsgi.wsgi_req = find_first_available_wsgi_req();
					if (uwsgi.wsgi_req == NULL) {
						uwsgi_async_queue_is_full((time_t)now);
						break;
					}

					// on error re-insert the request in the queue
					wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock);
					if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (wsgi_req_async_recv(uwsgi.wsgi_req)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					// by default the core is in UWSGI_AGAIN mode
					uwsgi.wsgi_req->async_status = UWSGI_AGAIN;
					// some protocol (like zeromq) do not need additional parsing, just push it in the runqueue
					if (uwsgi.wsgi_req->do_not_add_to_async_queue) {
						runqueue_push(uwsgi.wsgi_req);
					}

					break;
				}

				uwsgi_sock = uwsgi_sock->next;
			}

			if (!is_a_new_connection) {
				// proto event
				uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd);
				if (uwsgi.wsgi_req) {
					proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req);
					// reset timeout
					async_reset_request(uwsgi.wsgi_req);
					// parsing complete
					if (!proto_parser_status) {
						// remove fd from event poll and fd proto table 
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read());
						// put request in the runqueue
						runqueue_push(uwsgi.wsgi_req);
						continue;
					}
					else if (proto_parser_status < 0) {
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						close(interesting_fd);
						continue;
					}
					// re-add timer
					async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]);
					continue;
				}

				// app-registered event
				uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd);
				// unknown fd, remove it (for safety)
				if (uwsgi.wsgi_req == NULL) {
					close(interesting_fd);
					continue;
				}

				// remove all the fd monitors and timeout
				async_reset_request(uwsgi.wsgi_req);
				uwsgi.wsgi_req->async_ready_fd = 1;
				uwsgi.wsgi_req->async_last_ready_fd = interesting_fd;

				// put the request in the runqueue again
				runqueue_push(uwsgi.wsgi_req);
			}
		}


		// event queue managed, give cpu to runqueue
		current_request = uwsgi.async_runqueue;

		while(current_request) {

			// current_request could be nulled on error/end of request
			struct uwsgi_async_request *next_request = current_request->next;

			uwsgi.wsgi_req = current_request->wsgi_req;
			uwsgi.schedule_to_req();
			uwsgi.wsgi_req->switches++;

			// request ended ?
			if (uwsgi.wsgi_req->async_status <= UWSGI_OK ||
				uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) {
				// remove from the runqueue
				runqueue_remove(current_request);
			}
			current_request = next_request;
		}

	}

}
Exemple #4
0
void *async_loop(void *arg1) {

	struct uwsgi_async_fd *tmp_uaf;
	int interesting_fd, i;
	struct uwsgi_rb_timer *min_timeout;
	int timeout;
	int is_a_new_connection;
	int proto_parser_status;

	time_t now, last_now = 0;

	static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL;

	void *events = event_queue_alloc(64);
	struct uwsgi_socket *uwsgi_sock;

	uwsgi.async_runqueue = NULL;
	uwsgi.async_runqueue_cnt = 0;

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket);
	}

	// set a default request manager
        if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req;

	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		if (uwsgi.async_runqueue_cnt) {
			timeout = 0;
		}
		else {
			min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts);
			if (uwsgi.async_runqueue_cnt) {
                        	timeout = 0;
			}
                	if (min_timeout) {
                        	timeout = min_timeout->key - time(NULL);
                        	if (timeout <= 0) {
                                	async_expire_timeouts();
                                	timeout = 0;
                        	}
			}
			else {
				timeout = -1;
			}
                }

		uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64);

		// timeout ???
		if (uwsgi.async_nevents == 0) {
                	async_expire_timeouts();
		}


		for(i=0;i<uwsgi.async_nevents;i++) {
			// manage events
			interesting_fd = event_queue_interesting_fd(events, i);

			if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) {
				uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid);
				continue;
			}

			is_a_new_connection = 0;

			// new request coming in ?

			uwsgi_sock = uwsgi.sockets;
			while(uwsgi_sock) {

				if (interesting_fd == uwsgi_sock->fd) {

					is_a_new_connection = 1;	

					uwsgi.wsgi_req = find_first_available_wsgi_req();
					if (uwsgi.wsgi_req == NULL) {
						now = time(NULL);
						if (now > last_now) {
							uwsgi_log("async queue is full !!!\n");
							last_now = now;
						}
						break;
					}

					wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock );
					if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) {
#ifdef UWSGI_EVENT_USE_PORT
                                		event_queue_add_fd_read(uwsgi.async_queue, interesting_fd);
#endif
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}
#ifdef UWSGI_EVENT_USE_PORT
                                	event_queue_add_fd_read(uwsgi.async_queue, interesting_fd);
#endif

// on linux we do not need to reset the socket to blocking state
#ifndef __linux__
	                                /* re-set blocking socket */
					int arg = uwsgi_sock->arg;
					arg &= (~O_NONBLOCK);
	                                if (fcntl(uwsgi.wsgi_req->poll.fd, F_SETFL, arg) < 0) {
	                                       	uwsgi_error("fcntl()");
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
	                                	break;
					}
#endif

					if (wsgi_req_async_recv(uwsgi.wsgi_req)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (uwsgi.wsgi_req->do_not_add_to_async_queue) {
						runqueue_push(uwsgi.wsgi_req);
					}

					break;
				}

				uwsgi_sock = uwsgi_sock->next;
			}

			if (!is_a_new_connection) {
				// proto event
				uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd);
				if (uwsgi.wsgi_req) {
					proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req);
					// reset timeout
					rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
					// parsing complete
					if (!proto_parser_status) {
						// remove fd from event poll and fd proto table	
#ifndef UWSGI_EVENT_USE_PORT
						event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read());
#endif
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						// put request in the runqueue
						runqueue_push(uwsgi.wsgi_req);
						continue;
					}
					else if (proto_parser_status < 0) {
						if (proto_parser_status == -1)
							uwsgi_log("error parsing request\n");
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						close(interesting_fd);
						continue;
					}
					// re-add timer
					async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]);
					continue;
				}

				// app event
				uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd);
				// unknown fd, remove it (for safety)
				if (uwsgi.wsgi_req == NULL) {
					close(interesting_fd);
					continue;
				}

				// remove all the fd monitors and timeout
				while(uwsgi.wsgi_req->waiting_fds) {
#ifndef UWSGI_EVENT_USE_PORT
                                        event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event);
#endif
                                        tmp_uaf = uwsgi.wsgi_req->waiting_fds;
                                        uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
                                        uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
                                        free(tmp_uaf);
                                }
				uwsgi.wsgi_req->waiting_fds = NULL;
                                if (uwsgi.wsgi_req->async_timeout) {
                                        rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts);
                                        free(uwsgi.wsgi_req->async_timeout);
                                        uwsgi.wsgi_req->async_timeout = NULL;
                                }	

				uwsgi.wsgi_req->async_ready_fd = 1;
				uwsgi.wsgi_req->async_last_ready_fd = interesting_fd;

				// put the request in the runqueue again
				runqueue_push(uwsgi.wsgi_req);
			}
		}

		// event queue managed, give cpu to runqueue
		if (!current_request)
			current_request = uwsgi.async_runqueue;

		if (uwsgi.async_runqueue_cnt) {

			uwsgi.wsgi_req = current_request->wsgi_req;

			uwsgi.schedule_to_req();
			uwsgi.wsgi_req->switches++;

			next_async_request = current_request->next;
			// request ended ?
			if (uwsgi.wsgi_req->async_status <= UWSGI_OK) {
				// remove all the monitored fds and timeout
				while(uwsgi.wsgi_req->waiting_fds) {
#ifndef UWSGI_EVENT_USE_PORT
					event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event);
#endif
					tmp_uaf = uwsgi.wsgi_req->waiting_fds;
					uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
					uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
					free(tmp_uaf);
				}
				uwsgi.wsgi_req->waiting_fds = NULL;
				if (uwsgi.wsgi_req->async_timeout) {
					rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts);
                        		free(uwsgi.wsgi_req->async_timeout);
                        		uwsgi.wsgi_req->async_timeout = NULL;
				}

				// remove from the list
				runqueue_remove(current_request);

				uwsgi_close_request(uwsgi.wsgi_req);

				// push wsgi_request in the unused stack
				uwsgi.async_queue_unused_ptr++;
				uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;

			}
			else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) {
				// remove this request from suspended list	
				runqueue_remove(current_request);
			}

			current_request = next_async_request;

		}


	}

	return NULL;

}
/*
CHANGED in 2.0.7: wsgi_req is useless !
*/
char *uwsgi_spool_request(struct wsgi_request *wsgi_req, char *buf, size_t len, char *body, size_t body_len) {

	struct timeval tv;
	static uint64_t internal_counter = 0;
	int fd = -1;
	struct spooler_req sr;

	if (len > 0xffff) {
		uwsgi_log("[uwsgi-spooler] args buffer is limited to 64k, use the 'body' for bigger values\n");
		return NULL;
	}

	// parse the request buffer
	memset(&sr, 0, sizeof(struct spooler_req));
	uwsgi_hooked_parse(buf, len, spooler_req_parser_hook, &sr);
	
	struct uwsgi_spooler *uspool = uwsgi.spoolers;
	if (!uspool) {
		uwsgi_log("[uwsgi-spooler] no spooler available\n");
		return NULL;
	}

	// if it is a number, get the spooler by id instead of by name
	if (sr.spooler && sr.spooler_len) {
		uspool = uwsgi_get_spooler_by_name(sr.spooler, sr.spooler_len);
		if (!uspool) {
			uwsgi_log("[uwsgi-spooler] unable to find spooler \"%.*s\"\n", sr.spooler_len, sr.spooler);
			return NULL;
		}
	}

	// this lock is for threads, the pid value in filename will avoid multiprocess races
	uwsgi_lock(uspool->lock);

	// we increase it even if the request fails
	internal_counter++;

	gettimeofday(&tv, NULL);

	char *filename = NULL;
	size_t filename_len = 0;

	if (sr.priority && sr.priority_len) {
		filename_len = strlen(uspool->dir) + sr.priority_len + strlen(uwsgi.hostname) + 256;	
		filename = uwsgi_malloc(filename_len);
		int ret = snprintf(filename, filename_len, "%s/%.*s", uspool->dir, (int) sr.priority_len, sr.priority);
		if (ret <= 0 || ret >= (int) filename_len) {
			uwsgi_log("[uwsgi-spooler] error generating spooler filename\n");
			free(filename);
			uwsgi_unlock(uspool->lock);
			return NULL;
		}
		// no need to check for errors...
		(void) mkdir(filename, 0777);

		ret = snprintf(filename, filename_len, "%s/%.*s/uwsgi_spoolfile_on_%s_%d_%llu_%d_%llu_%llu", uspool->dir, (int)sr.priority_len, sr.priority, uwsgi.hostname, (int) getpid(), (unsigned long long) internal_counter, rand(),
				(unsigned long long) tv.tv_sec, (unsigned long long) tv.tv_usec);
		if (ret <= 0 || ret >=(int)  filename_len) {
                        uwsgi_log("[uwsgi-spooler] error generating spooler filename\n");
			free(filename);
			uwsgi_unlock(uspool->lock);
			return NULL;
		}
	}
	else {
		filename_len = strlen(uspool->dir) + strlen(uwsgi.hostname) + 256;
                filename = uwsgi_malloc(filename_len);
		int ret = snprintf(filename, filename_len, "%s/uwsgi_spoolfile_on_%s_%d_%llu_%d_%llu_%llu", uspool->dir, uwsgi.hostname, (int) getpid(), (unsigned long long) internal_counter,
				rand(), (unsigned long long) tv.tv_sec, (unsigned long long) tv.tv_usec);
		if (ret <= 0 || ret >= (int) filename_len) {
                        uwsgi_log("[uwsgi-spooler] error generating spooler filename\n");
			free(filename);
			uwsgi_unlock(uspool->lock);
			return NULL;
		}
	}

	fd = open(filename, O_CREAT | O_EXCL | O_WRONLY, S_IRUSR | S_IWUSR);
	if (fd < 0) {
		uwsgi_error_open(filename);
		free(filename);
		uwsgi_unlock(uspool->lock);
		return NULL;
	}

	// now lock the file, it will no be runnable, until the lock is not removed
	// a race could come if the spooler take the file before fcntl is called
	// in such case the spooler will detect a zeroed file and will retry later
	if (uwsgi_fcntl_lock(fd)) {
		close(fd);
		free(filename);
		uwsgi_unlock(uspool->lock);
		return NULL;
	}

	struct uwsgi_header uh;
	uh.modifier1 = 17;
	uh.modifier2 = 0;
	uh.pktsize = (uint16_t) len;
#ifdef __BIG_ENDIAN__
	uh.pktsize = uwsgi_swap16(uh.pktsize);
#endif

	if (write(fd, &uh, 4) != 4) {
		uwsgi_log("[spooler] unable to write header for %s\n", filename);
		goto clear;
	}

	if (write(fd, buf, len) != (ssize_t) len) {
		uwsgi_log("[spooler] unable to write args for %s\n", filename);
		goto clear;
	}

	if (body && body_len > 0) {
		if ((size_t) write(fd, body, body_len) != body_len) {
			uwsgi_log("[spooler] unable to write body for %s\n", filename);
			goto clear;
		}
	}

	if (sr.at > 0) {
#ifdef __UCLIBC__
		struct timespec ts[2]; 
		ts[0].tv_sec = sr.at; 
		ts[0].tv_nsec = 0;
		ts[1].tv_sec = sr.at;
		ts[1].tv_nsec = 0; 
		if (futimens(fd, ts)) {
			uwsgi_error("uwsgi_spooler_request()/futimens()");	
		}
#else
		struct timeval tv[2];
		tv[0].tv_sec = sr.at;
		tv[0].tv_usec = 0;
		tv[1].tv_sec = sr.at;
		tv[1].tv_usec = 0;
#ifdef __sun__
		if (futimesat(fd, NULL, tv)) {
#else
		if (futimes(fd, tv)) {
#endif
			uwsgi_error("uwsgi_spooler_request()/futimes()");
		}
#endif
	}

	// here the file will be unlocked too
	close(fd);

	if (!uwsgi.spooler_quiet)
		uwsgi_log("[spooler] written %lu bytes to file %s\n", (unsigned long) len + body_len + 4, filename);

	// and here waiting threads can continue
	uwsgi_unlock(uspool->lock);

/*	wake up the spoolers attached to the specified dir ... (HACKY) 
	no need to fear races, as USR1 is harmless an all of the uWSGI processes...
	it could be a problem if a new process takes the old pid, but modern systems should avoid that
*/

	struct uwsgi_spooler *spoolers = uwsgi.spoolers;
	while (spoolers) {
		if (!strcmp(spoolers->dir, uspool->dir)) {
			if (spoolers->pid > 0 && spoolers->running == 0) {
				(void) kill(spoolers->pid, SIGUSR1);
			}
		}
		spoolers = spoolers->next;
	}

	return filename;


clear:
	uwsgi_unlock(uspool->lock);
	uwsgi_error("uwsgi_spool_request()/write()");
	if (unlink(filename)) {
		uwsgi_error("uwsgi_spool_request()/unlink()");
	}
	free(filename);
	// unlock the file too
	close(fd);
	return NULL;
}



void spooler(struct uwsgi_spooler *uspool) {

	// prevent process blindly reading stdin to make mess
	int nullfd;

	// asked by Marco Beri
#ifdef __HAIKU__
#ifdef UWSGI_DEBUG
	uwsgi_log("lowering spooler priority to %d\n", B_LOW_PRIORITY);
#endif
	set_thread_priority(find_thread(NULL), B_LOW_PRIORITY);
#else
#ifdef UWSGI_DEBUG
	uwsgi_log("lowering spooler priority to %d\n", PRIO_MAX);
#endif
	setpriority(PRIO_PROCESS, getpid(), PRIO_MAX);
#endif

	nullfd = open("/dev/null", O_RDONLY);
	if (nullfd < 0) {
		uwsgi_error_open("/dev/null");
		exit(1);
	}

	if (nullfd != 0) {
		dup2(nullfd, 0);
		close(nullfd);
	}

	int spooler_event_queue = event_queue_init();
	int interesting_fd = -1;

	if (uwsgi.master_process) {
		event_queue_add_fd_read(spooler_event_queue, uwsgi.shared->spooler_signal_pipe[1]);
	}

	// reset the tasks counter
	uspool->tasks = 0;

	for (;;) {


		if (chdir(uspool->dir)) {
			uwsgi_error("chdir()");
			exit(1);
		}

		if (uwsgi.spooler_ordered) {
#ifdef __linux__
			spooler_scandir(uspool, NULL);
#else
			spooler_readdir(uspool, NULL);
#endif
		}
		else {
			spooler_readdir(uspool, NULL);
		}

		int timeout = uwsgi.shared->spooler_frequency ? uwsgi.shared->spooler_frequency : uwsgi.spooler_frequency;
		if (wakeup > 0) {
			timeout = 0;
		}

		if (event_queue_wait(spooler_event_queue, timeout, &interesting_fd) > 0) {
			if (uwsgi.master_process) {
				if (interesting_fd == uwsgi.shared->spooler_signal_pipe[1]) {
					uwsgi_receive_signal(interesting_fd, "spooler", (int) getpid());
				}
			}
		}

		// avoid races
		uint64_t tmp_wakeup = wakeup;
		if (tmp_wakeup > 0) {
			tmp_wakeup--;
		}
		wakeup = tmp_wakeup;

	}
}
Exemple #6
0
void async_loop() {

	if (uwsgi.async < 2) {
		uwsgi_log("the async loop engine requires async mode (--async <n>)\n");
		exit(1);
	}

	struct uwsgi_async_fd *tmp_uaf;
	int interesting_fd, i;
	struct uwsgi_rb_timer *min_timeout;
	int timeout;
	int is_a_new_connection;
	int proto_parser_status;

	uint64_t now;

	static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL;

	void *events = event_queue_alloc(64);
	struct uwsgi_socket *uwsgi_sock;

	uwsgi.async_runqueue = NULL;
	uwsgi.async_runqueue_cnt = 0;

	uwsgi.wait_write_hook = async_wait_fd_write;
        uwsgi.wait_read_hook = async_wait_fd_read;

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket);
	}

	// set a default request manager
	if (!uwsgi.schedule_to_req)
		uwsgi.schedule_to_req = async_schedule_to_req;

	if (!uwsgi.schedule_to_main) {
		uwsgi_log("*** WARNING *** async mode without coroutine/greenthread engine loaded !!!\n");
	}

	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		now = (uint64_t) uwsgi_now();
		if (uwsgi.async_runqueue_cnt) {
			timeout = 0;
		}
		else {
			min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL);
			if (min_timeout) {
				timeout = min_timeout->value - now;
				if (timeout <= 0) {
					async_expire_timeouts(now);
					timeout = 0;
				}
			}
			else {
				timeout = -1;
			}
		}

		uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64);

		now = (uint64_t) uwsgi_now();
		// timeout ???
		if (uwsgi.async_nevents == 0) {
			async_expire_timeouts(now);
		}


		for (i = 0; i < uwsgi.async_nevents; i++) {
			// manage events
			interesting_fd = event_queue_interesting_fd(events, i);

			if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) {
				uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid);
				continue;
			}


			is_a_new_connection = 0;

			// new request coming in ?

			uwsgi_sock = uwsgi.sockets;
			while (uwsgi_sock) {

				if (interesting_fd == uwsgi_sock->fd) {

					is_a_new_connection = 1;

					uwsgi.wsgi_req = find_first_available_wsgi_req();
					if (uwsgi.wsgi_req == NULL) {
						uwsgi_async_queue_is_full((time_t)now);
						break;
					}

					wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock);
					if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (wsgi_req_async_recv(uwsgi.wsgi_req)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (uwsgi.wsgi_req->do_not_add_to_async_queue) {
						runqueue_push(uwsgi.wsgi_req);
					}

					break;
				}

				uwsgi_sock = uwsgi_sock->next;
			}

			if (!is_a_new_connection) {
				// proto event
				uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd);
				if (uwsgi.wsgi_req) {
					proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req);
					// reset timeout
					uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
					// parsing complete
					if (!proto_parser_status) {
						// remove fd from event poll and fd proto table 
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read());
						// put request in the runqueue
						runqueue_push(uwsgi.wsgi_req);
						continue;
					}
					else if (proto_parser_status < 0) {
						if (proto_parser_status == -1)
							uwsgi_log("error parsing request\n");
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						close(interesting_fd);
						continue;
					}
					// re-add timer
					async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]);
					continue;
				}

				// app event
				uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd);
				// unknown fd, remove it (for safety)
				if (uwsgi.wsgi_req == NULL) {
					close(interesting_fd);
					continue;
				}

				// remove all the fd monitors and timeout
				while (uwsgi.wsgi_req->waiting_fds) {
					tmp_uaf = uwsgi.wsgi_req->waiting_fds;
					uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
					event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event);
					uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
					free(tmp_uaf);
				}
				uwsgi.wsgi_req->waiting_fds = NULL;
				if (uwsgi.wsgi_req->async_timeout) {
					uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
				}

				uwsgi.wsgi_req->async_ready_fd = 1;
				uwsgi.wsgi_req->async_last_ready_fd = interesting_fd;

				// put the request in the runqueue again
				runqueue_push(uwsgi.wsgi_req);
				// avoid managing other enqueued events...
				break;
			}
		}

		// event queue managed, give cpu to runqueue
		if (!current_request)
			current_request = uwsgi.async_runqueue;

		if (uwsgi.async_runqueue_cnt) {

			uwsgi.wsgi_req = current_request->wsgi_req;

			uwsgi.schedule_to_req();
			uwsgi.wsgi_req->switches++;

			next_async_request = current_request->next;
			// request ended ?
			if (uwsgi.wsgi_req->async_status <= UWSGI_OK) {
				// remove all the monitored fds and timeout
				while (uwsgi.wsgi_req->waiting_fds) {
					tmp_uaf = uwsgi.wsgi_req->waiting_fds;
					uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
					event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event);
					uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
					free(tmp_uaf);
				}
				uwsgi.wsgi_req->waiting_fds = NULL;
				if (uwsgi.wsgi_req->async_timeout) {
					uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
				}

				// remove from the list
				runqueue_remove(current_request);

				uwsgi_close_request(uwsgi.wsgi_req);

				// push wsgi_request in the unused stack
				uwsgi.async_queue_unused_ptr++;
				uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;

			}
			else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) {
				// remove this request from suspended list      
				runqueue_remove(current_request);
			}

			current_request = next_async_request;

		}


	}

}