PyObject *py_uwsgi_stackless_worker(PyObject * self, PyObject * args) { PyThreadState *ts = PyThreadState_GET(); struct wsgi_request *wsgi_req = find_request_by_tasklet(ts->st.current); PyObject *zero; int async_id = wsgi_req->async_id; //fprintf(stderr,"i am the tasklet worker\n"); for(;;) { // wait for request zero = PyChannel_Receive(uwsgi.workers_channel); wsgi_req_setup(wsgi_req, async_id); if (wsgi_req_accept(uwsgi.serverfd, wsgi_req)) { continue; } if (wsgi_req_recv(wsgi_req)) { continue; } uwsgi_close_request(&uwsgi, wsgi_req); } }
PyObject *py_uwsgi_gevent_main(PyObject * self, PyObject * args) { // hack to retrieve the socket address PyObject *py_uwsgi_sock = PyTuple_GetItem(args, 0); struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) PyLong_AsLong(py_uwsgi_sock); struct wsgi_request *wsgi_req = NULL; edge: wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_log("async queue is full !!!\n"); goto clear; } // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.shared->options[UWSGI_OPTION_HARAKIRI] > 0) { set_harakiri(uwsgi.shared->options[UWSGI_OPTION_HARAKIRI]); } // accept the connection if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { free_req_queue; if (uwsgi_sock->retry && uwsgi_sock->retry[wsgi_req->async_id]) { goto edge; } goto clear; } // on linux we need to set the socket in non-blocking as it is not inherited #ifdef __linux__ uwsgi_socket_nb(wsgi_req->poll.fd); #endif // hack to easily pass wsgi_req pointer to the greenlet PyTuple_SetItem(ugevent.greenlet_args, 1, PyLong_FromLong((long)wsgi_req)); // spawn the request greenlet PyObject *new_gl = python_call(ugevent.spawn, ugevent.greenlet_args, 0, NULL); Py_DECREF(new_gl); if (uwsgi_sock->edge_trigger) { #ifdef UWSGI_DEBUG uwsgi_log("i am an edge triggered socket !!!\n"); #endif goto edge; } clear: Py_INCREF(Py_None); return Py_None; }
static PyObject *py_uwsgi_asyncio_accept(PyObject *self, PyObject *args) { long uwsgi_sock_ptr = 0; if (!PyArg_ParseTuple(args, "l:uwsgi_asyncio_accept", &uwsgi_sock_ptr)) { return NULL; } struct wsgi_request *wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); goto end; } uwsgi.wsgi_req = wsgi_req; struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) uwsgi_sock_ptr; // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { // in case of errors (or thundering herd, just reset it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; free_req_queue; goto end; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(wsgi_req, uwsgi.harakiri_options.workers); } uwsgi.async_proto_fd_table[wsgi_req->fd] = wsgi_req; // add callback for protocol if (PyObject_CallMethod(uasyncio.loop, "add_reader", "iOl", wsgi_req->fd, uasyncio.request, (long) wsgi_req) == NULL) { free_req_queue; PyErr_Print(); } // add timeout PyObject *ob_timeout = PyObject_CallMethod(uasyncio.loop, "call_later", "iOli", uwsgi.socket_timeout, uasyncio.request, (long)wsgi_req, 1); if (!ob_timeout) { if (PyObject_CallMethod(uasyncio.loop, "remove_reader", "i", wsgi_req->fd) == NULL) PyErr_Print(); free_req_queue; } else { // trick for reference counting wsgi_req->async_timeout = (struct uwsgi_rb_timer *) ob_timeout; } end: Py_INCREF(Py_None); return Py_None; }
PyObject *py_uwsgi_gevent_main(PyObject * self, PyObject * args) { // hack to retrieve the socket address PyObject *py_uwsgi_sock = PyTuple_GetItem(args, 0); struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) PyLong_AsLong(py_uwsgi_sock); long watcher_index = PyInt_AsLong(PyTuple_GetItem(args, 1)); struct wsgi_request *wsgi_req = NULL; edge: wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); PyObject_CallMethod(ugevent.watchers[watcher_index], "stop", NULL); goto clear; } // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { free_req_queue; if (uwsgi_sock->retry && uwsgi_sock->retry[wsgi_req->async_id]) { goto edge; } // in case of errors (or thundering herd, just rest it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; goto clear; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(wsgi_req, uwsgi.harakiri_options.workers); } // hack to easily pass wsgi_req pointer to the greenlet PyTuple_SetItem(ugevent.greenlet_args, 1, PyLong_FromLong((long)wsgi_req)); // spawn the request greenlet PyObject *new_gl = python_call(ugevent.spawn, ugevent.greenlet_args, 0, NULL); Py_DECREF(new_gl); if (uwsgi_sock->edge_trigger) { #ifdef UWSGI_DEBUG uwsgi_log("i am an edge triggered socket !!!\n"); #endif goto edge; } clear: Py_INCREF(Py_None); return Py_None; }
void *simple_loop_run(void *arg1) { long core_id = (long) arg1; struct wsgi_request *wsgi_req = &uwsgi.workers[uwsgi.mywid].cores[core_id].req; if (uwsgi.threads > 1) { uwsgi_setup_thread_req(core_id, wsgi_req); } // initialize the main event queue to monitor sockets int main_queue = event_queue_init(); uwsgi_add_sockets_to_queue(main_queue, core_id); if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(main_queue, uwsgi.signal_socket); event_queue_add_fd_read(main_queue, uwsgi.my_signal_socket); } // ok we are ready, let's start managing requests and signals while (uwsgi.workers[uwsgi.mywid].manage_next_request) { wsgi_req_setup(wsgi_req, core_id, NULL); if (wsgi_req_accept(main_queue, wsgi_req)) { continue; } if (wsgi_req_recv(main_queue, wsgi_req)) { uwsgi_destroy_request(wsgi_req); continue; } uwsgi_close_request(wsgi_req); } // end of the loop if (uwsgi.workers[uwsgi.mywid].destroy && uwsgi.workers[0].pid > 0) { #ifdef __APPLE__ kill(uwsgi.workers[0].pid, SIGTERM); #else if (uwsgi.propagate_touch) { kill(uwsgi.workers[0].pid, SIGHUP); } else { gracefully_kill(0); } #endif } return NULL; }
PyObject *py_uwsgi_gevent_main(PyObject * self, PyObject * args) { struct wsgi_request *wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_log("async queue is full !!!\n"); goto clear; } uwsgi.wsgi_req = wsgi_req; // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi.sockets ); // mark core as used uwsgi.core[wsgi_req->async_id]->in_request = 1; gettimeofday(&wsgi_req->start_of_request, NULL); // enter harakiri mode if (uwsgi.shared->options[UWSGI_OPTION_HARAKIRI] > 0) { set_harakiri(uwsgi.shared->options[UWSGI_OPTION_HARAKIRI]); } // accept the connection if (wsgi_req_simple_accept(wsgi_req, uwsgi.sockets->fd)) { uwsgi_close_request(wsgi_req); free_req_queue; goto clear; } // hack to easily pass wsgi_req pointer to the greenlet PyTuple_SetItem(ugevent.greenlet_args, 1, PyLong_FromLong((long)wsgi_req)); // spawn the request greenlet PyObject *new_gl = python_call(ugevent.spawn, ugevent.greenlet_args, 0, NULL); Py_DECREF(new_gl); clear: Py_INCREF(Py_None); return Py_None; }
static VALUE uwsgi_rb_thread_core(void *arg) { long core_id = (long) arg; struct wsgi_request *wsgi_req = &uwsgi.workers[uwsgi.mywid].cores[core_id].req; uwsgi_setup_thread_req(core_id, wsgi_req); struct uwsgi_rbthread *urbt = uwsgi_malloc(sizeof(struct uwsgi_rbthread)); // initialize the main event queue to monitor sockets urbt->queue = event_queue_init(); urbt->wsgi_req = wsgi_req; uwsgi_add_sockets_to_queue(urbt->queue, (int)core_id); if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(urbt->queue, uwsgi.signal_socket); event_queue_add_fd_read(urbt->queue, uwsgi.my_signal_socket); } // ok we are ready, let's start managing requests and signals while (uwsgi.workers[uwsgi.mywid].manage_next_request) { wsgi_req_setup(wsgi_req, (int)core_id, NULL); rb_thread_call_without_gvl(uwsgi_rb_thread_accept, urbt, NULL, NULL); // accept failed ? if (urbt->ret) continue; if (wsgi_req_recv(urbt->queue, wsgi_req)) { uwsgi_destroy_request(wsgi_req); continue; } uwsgi_close_request(wsgi_req); } return Qnil; }
void *simple_loop(void *arg1) { long core_id = (long) arg1; struct wsgi_request *wsgi_req = uwsgi.wsgi_requests[core_id]; #ifdef UWSGI_THREADING int i; sigset_t smask; if (uwsgi.threads > 1) { pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &i); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &i); pthread_setspecific(uwsgi.tur_key, (void *) wsgi_req); if (core_id > 0) { // block all signals on new threads sigfillset(&smask); #ifdef UWSGI_DEBUG sigdelset(&smask, SIGSEGV); #endif pthread_sigmask(SIG_BLOCK, &smask, NULL); // run per-thread socket hook struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; while(uwsgi_sock) { if (uwsgi_sock->proto_thread_fixup) { uwsgi_sock->proto_thread_fixup(uwsgi_sock, core_id); } uwsgi_sock = uwsgi_sock->next; } for (i = 0; i < 256; i++) { if (uwsgi.p[i]->init_thread) { uwsgi.p[i]->init_thread(core_id); } } } } #endif // initialize the main event queue to monitor sockets int main_queue = event_queue_init(); uwsgi_add_sockets_to_queue(main_queue, core_id); if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(main_queue, uwsgi.signal_socket); event_queue_add_fd_read(main_queue, uwsgi.my_signal_socket); } // ok we are ready, let's start managing requests and signals while (uwsgi.workers[uwsgi.mywid].manage_next_request) { wsgi_req_setup(wsgi_req, core_id, NULL); if (wsgi_req_accept(main_queue, wsgi_req)) { continue; } if (wsgi_req_recv(wsgi_req)) { uwsgi_destroy_request(wsgi_req); continue; } uwsgi_close_request(wsgi_req); } // end of the loop if (uwsgi.workers[uwsgi.mywid].destroy && uwsgi.workers[0].pid > 0) { #ifdef __APPLE__ kill(uwsgi.workers[0].pid, SIGTERM); #else if (uwsgi.propagate_touch) { kill(uwsgi.workers[0].pid, SIGHUP); } else { gracefully_kill(0); } #endif } return NULL; }
void async_loop() { if (uwsgi.async < 2) { uwsgi_log("the async loop engine requires async mode (--async <n>)\n"); exit(1); } int interesting_fd, i; struct uwsgi_rb_timer *min_timeout; int timeout; int is_a_new_connection; int proto_parser_status; uint64_t now; struct uwsgi_async_request *current_request = NULL; void *events = event_queue_alloc(64); struct uwsgi_socket *uwsgi_sock; uwsgi.async_runqueue = NULL; uwsgi.wait_write_hook = async_wait_fd_write; uwsgi.wait_read_hook = async_wait_fd_read; if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket); event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket); } // set a default request manager if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req; if (!uwsgi.schedule_to_main) { uwsgi_log("*** DANGER *** async mode without coroutine/greenthread engine loaded !!!\n"); } while (uwsgi.workers[uwsgi.mywid].manage_next_request) { now = (uint64_t) uwsgi_now(); if (uwsgi.async_runqueue) { timeout = 0; } else { min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL); if (min_timeout) { timeout = min_timeout->value - now; if (timeout <= 0) { async_expire_timeouts(now); timeout = 0; } } else { timeout = -1; } } uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64); now = (uint64_t) uwsgi_now(); // timeout ??? if (uwsgi.async_nevents == 0) { async_expire_timeouts(now); } for (i = 0; i < uwsgi.async_nevents; i++) { // manage events interesting_fd = event_queue_interesting_fd(events, i); // signals are executed in the main stack... in the future we could have dedicated stacks for them if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) { uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid); continue; } is_a_new_connection = 0; // new request coming in ? uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { is_a_new_connection = 1; uwsgi.wsgi_req = find_first_available_wsgi_req(); if (uwsgi.wsgi_req == NULL) { uwsgi_async_queue_is_full((time_t)now); break; } // on error re-insert the request in the queue wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock); if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (wsgi_req_async_recv(uwsgi.wsgi_req)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } // by default the core is in UWSGI_AGAIN mode uwsgi.wsgi_req->async_status = UWSGI_AGAIN; // some protocol (like zeromq) do not need additional parsing, just push it in the runqueue if (uwsgi.wsgi_req->do_not_add_to_async_queue) { runqueue_push(uwsgi.wsgi_req); } break; } uwsgi_sock = uwsgi_sock->next; } if (!is_a_new_connection) { // proto event uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd); if (uwsgi.wsgi_req) { proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req); // reset timeout async_reset_request(uwsgi.wsgi_req); // parsing complete if (!proto_parser_status) { // remove fd from event poll and fd proto table uwsgi.async_proto_fd_table[interesting_fd] = NULL; event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read()); // put request in the runqueue runqueue_push(uwsgi.wsgi_req); continue; } else if (proto_parser_status < 0) { uwsgi.async_proto_fd_table[interesting_fd] = NULL; close(interesting_fd); continue; } // re-add timer async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); continue; } // app-registered event uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd); // unknown fd, remove it (for safety) if (uwsgi.wsgi_req == NULL) { close(interesting_fd); continue; } // remove all the fd monitors and timeout async_reset_request(uwsgi.wsgi_req); uwsgi.wsgi_req->async_ready_fd = 1; uwsgi.wsgi_req->async_last_ready_fd = interesting_fd; // put the request in the runqueue again runqueue_push(uwsgi.wsgi_req); } } // event queue managed, give cpu to runqueue current_request = uwsgi.async_runqueue; while(current_request) { // current_request could be nulled on error/end of request struct uwsgi_async_request *next_request = current_request->next; uwsgi.wsgi_req = current_request->wsgi_req; uwsgi.schedule_to_req(); uwsgi.wsgi_req->switches++; // request ended ? if (uwsgi.wsgi_req->async_status <= UWSGI_OK || uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) { // remove from the runqueue runqueue_remove(current_request); } current_request = next_request; } } }
void *async_loop(void *arg1) { struct uwsgi_async_fd *tmp_uaf; int interesting_fd, i; struct uwsgi_rb_timer *min_timeout; int timeout; int is_a_new_connection; int proto_parser_status; time_t now, last_now = 0; static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL; void *events = event_queue_alloc(64); struct uwsgi_socket *uwsgi_sock; uwsgi.async_runqueue = NULL; uwsgi.async_runqueue_cnt = 0; if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket); event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket); } // set a default request manager if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req; while (uwsgi.workers[uwsgi.mywid].manage_next_request) { if (uwsgi.async_runqueue_cnt) { timeout = 0; } else { min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts); if (uwsgi.async_runqueue_cnt) { timeout = 0; } if (min_timeout) { timeout = min_timeout->key - time(NULL); if (timeout <= 0) { async_expire_timeouts(); timeout = 0; } } else { timeout = -1; } } uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64); // timeout ??? if (uwsgi.async_nevents == 0) { async_expire_timeouts(); } for(i=0;i<uwsgi.async_nevents;i++) { // manage events interesting_fd = event_queue_interesting_fd(events, i); if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) { uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid); continue; } is_a_new_connection = 0; // new request coming in ? uwsgi_sock = uwsgi.sockets; while(uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { is_a_new_connection = 1; uwsgi.wsgi_req = find_first_available_wsgi_req(); if (uwsgi.wsgi_req == NULL) { now = time(NULL); if (now > last_now) { uwsgi_log("async queue is full !!!\n"); last_now = now; } break; } wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock ); if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) { #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(uwsgi.async_queue, interesting_fd); #endif uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } #ifdef UWSGI_EVENT_USE_PORT event_queue_add_fd_read(uwsgi.async_queue, interesting_fd); #endif // on linux we do not need to reset the socket to blocking state #ifndef __linux__ /* re-set blocking socket */ int arg = uwsgi_sock->arg; arg &= (~O_NONBLOCK); if (fcntl(uwsgi.wsgi_req->poll.fd, F_SETFL, arg) < 0) { uwsgi_error("fcntl()"); uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } #endif if (wsgi_req_async_recv(uwsgi.wsgi_req)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (uwsgi.wsgi_req->do_not_add_to_async_queue) { runqueue_push(uwsgi.wsgi_req); } break; } uwsgi_sock = uwsgi_sock->next; } if (!is_a_new_connection) { // proto event uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd); if (uwsgi.wsgi_req) { proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req); // reset timeout rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; // parsing complete if (!proto_parser_status) { // remove fd from event poll and fd proto table #ifndef UWSGI_EVENT_USE_PORT event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read()); #endif uwsgi.async_proto_fd_table[interesting_fd] = NULL; // put request in the runqueue runqueue_push(uwsgi.wsgi_req); continue; } else if (proto_parser_status < 0) { if (proto_parser_status == -1) uwsgi_log("error parsing request\n"); uwsgi.async_proto_fd_table[interesting_fd] = NULL; close(interesting_fd); continue; } // re-add timer async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); continue; } // app event uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd); // unknown fd, remove it (for safety) if (uwsgi.wsgi_req == NULL) { close(interesting_fd); continue; } // remove all the fd monitors and timeout while(uwsgi.wsgi_req->waiting_fds) { #ifndef UWSGI_EVENT_USE_PORT event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event); #endif tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } uwsgi.wsgi_req->async_ready_fd = 1; uwsgi.wsgi_req->async_last_ready_fd = interesting_fd; // put the request in the runqueue again runqueue_push(uwsgi.wsgi_req); } } // event queue managed, give cpu to runqueue if (!current_request) current_request = uwsgi.async_runqueue; if (uwsgi.async_runqueue_cnt) { uwsgi.wsgi_req = current_request->wsgi_req; uwsgi.schedule_to_req(); uwsgi.wsgi_req->switches++; next_async_request = current_request->next; // request ended ? if (uwsgi.wsgi_req->async_status <= UWSGI_OK) { // remove all the monitored fds and timeout while(uwsgi.wsgi_req->waiting_fds) { #ifndef UWSGI_EVENT_USE_PORT event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event); #endif tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } // remove from the list runqueue_remove(current_request); uwsgi_close_request(uwsgi.wsgi_req); // push wsgi_request in the unused stack uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; } else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) { // remove this request from suspended list runqueue_remove(current_request); } current_request = next_async_request; } } return NULL; }
void *zeromq_loop(void *arg1) { sigset_t smask; int i; long core_id = (long) arg1; struct wsgi_request *wsgi_req = uwsgi.wsgi_requests[core_id]; uwsgi.zeromq_recv_flag = 0; zmq_pollitem_t zmq_poll_items[3]; char uwsgi_signal; if (uwsgi.threads > 1) { pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &i); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &i); pthread_setspecific(uwsgi.tur_key, (void *) wsgi_req); if (core_id > 0) { // block all signals on new threads sigfillset(&smask); pthread_sigmask(SIG_BLOCK, &smask, NULL); for (i = 0; i < 0xFF; i++) { if (uwsgi.p[i]->init_thread) { uwsgi.p[i]->init_thread(core_id); } } void *tmp_zmq_pull = zmq_socket(uwsgi.zmq_context, ZMQ_PULL); if (tmp_zmq_pull == NULL) { uwsgi_error("zmq_socket()"); exit(1); } if (zmq_connect(tmp_zmq_pull, uwsgi.zmq_receiver) < 0) { uwsgi_error("zmq_connect()"); exit(1); } pthread_setspecific(uwsgi.zmq_pull, tmp_zmq_pull); } } if (uwsgi.signal_socket > -1) { zmq_poll_items[0].socket = pthread_getspecific(uwsgi.zmq_pull); zmq_poll_items[0].fd = -1; zmq_poll_items[0].events = ZMQ_POLLIN; zmq_poll_items[1].socket = NULL; zmq_poll_items[1].fd = uwsgi.signal_socket; zmq_poll_items[1].events = ZMQ_POLLIN; zmq_poll_items[2].socket = NULL; zmq_poll_items[2].fd = uwsgi.my_signal_socket; zmq_poll_items[2].events = ZMQ_POLLIN; } while (uwsgi.workers[uwsgi.mywid].manage_next_request) { wsgi_req_setup(wsgi_req, core_id, NULL); uwsgi.edge_triggered = 1; wsgi_req->socket = uwsgi.zmq_socket; if (uwsgi.signal_socket > -1) { if (zmq_poll(zmq_poll_items, 3, -1) < 0) { uwsgi_error("zmq_poll()"); continue; } if (zmq_poll_items[1].revents & ZMQ_POLLIN) { if (read(uwsgi.signal_socket, &uwsgi_signal, 1) <= 0) { if (uwsgi.no_orphans) { uwsgi_log_verbose("uWSGI worker %d screams: UAAAAAAH my master died, i will follow him...\n", uwsgi.mywid); end_me(0); } } else { #ifdef UWSGI_DEBUG uwsgi_log_verbose("master sent signal %d to worker %d\n", uwsgi_signal, uwsgi.mywid); #endif if (uwsgi_signal_handler(uwsgi_signal)) { uwsgi_log_verbose("error managing signal %d on worker %d\n", uwsgi_signal, uwsgi.mywid); } } continue; } if (zmq_poll_items[2].revents & ZMQ_POLLIN) { if (read(uwsgi.my_signal_socket, &uwsgi_signal, 1) <= 0) { if (uwsgi.no_orphans) { uwsgi_log_verbose("uWSGI worker %d screams: UAAAAAAH my master died, i will follow him...\n", uwsgi.mywid); end_me(0); } } else { #ifdef UWSGI_DEBUG uwsgi_log_verbose("master sent signal %d to worker %d\n", uwsgi_signal, uwsgi.mywid); #endif if (uwsgi_signal_handler(uwsgi_signal)) { uwsgi_log_verbose("error managing signal %d on worker %d\n", uwsgi_signal, uwsgi.mywid); } } continue; } if (zmq_poll_items[0].revents & ZMQ_POLLIN) { wsgi_req->poll.fd = wsgi_req->socket->proto_accept(wsgi_req, uwsgi.zmq_socket->fd); } } else { wsgi_req->poll.fd = wsgi_req->socket->proto_accept(wsgi_req, uwsgi.zmq_socket->fd); } if (wsgi_req->poll.fd >= 0) { wsgi_req_recv(wsgi_req); } uwsgi_close_request(wsgi_req); } // end of the loop return NULL; }
PyObject *py_uwsgi_tornado_accept(PyObject *self, PyObject *args) { int fd = -1; PyObject *events = NULL; if (!PyArg_ParseTuple(args, "iO:uwsgi_tornado_accept", &fd, &events)) { return NULL; } struct wsgi_request *wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); goto end; } uwsgi.wsgi_req = wsgi_req; // TODO better to move it to a function api ... struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; while(uwsgi_sock) { if (uwsgi_sock->fd == fd) break; uwsgi_sock = uwsgi_sock->next; } if (!uwsgi_sock) { free_req_queue; goto end; } // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { // in case of errors (or thundering herd, just reset it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; free_req_queue; goto end; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(uwsgi.harakiri_options.workers); } uwsgi.async_proto_fd_table[wsgi_req->fd] = wsgi_req; // add callback for protocol if (PyObject_CallMethod(utornado.ioloop, "add_handler", "iOO", wsgi_req->fd, utornado.request, utornado.read) == NULL) { free_req_queue; PyErr_Print(); } end: Py_INCREF(Py_None); return Py_None; }
void async_loop() { if (uwsgi.async < 2) { uwsgi_log("the async loop engine requires async mode (--async <n>)\n"); exit(1); } struct uwsgi_async_fd *tmp_uaf; int interesting_fd, i; struct uwsgi_rb_timer *min_timeout; int timeout; int is_a_new_connection; int proto_parser_status; uint64_t now; static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL; void *events = event_queue_alloc(64); struct uwsgi_socket *uwsgi_sock; uwsgi.async_runqueue = NULL; uwsgi.async_runqueue_cnt = 0; uwsgi.wait_write_hook = async_wait_fd_write; uwsgi.wait_read_hook = async_wait_fd_read; if (uwsgi.signal_socket > -1) { event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket); event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket); } // set a default request manager if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req; if (!uwsgi.schedule_to_main) { uwsgi_log("*** WARNING *** async mode without coroutine/greenthread engine loaded !!!\n"); } while (uwsgi.workers[uwsgi.mywid].manage_next_request) { now = (uint64_t) uwsgi_now(); if (uwsgi.async_runqueue_cnt) { timeout = 0; } else { min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL); if (min_timeout) { timeout = min_timeout->value - now; if (timeout <= 0) { async_expire_timeouts(now); timeout = 0; } } else { timeout = -1; } } uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64); now = (uint64_t) uwsgi_now(); // timeout ??? if (uwsgi.async_nevents == 0) { async_expire_timeouts(now); } for (i = 0; i < uwsgi.async_nevents; i++) { // manage events interesting_fd = event_queue_interesting_fd(events, i); if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) { uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid); continue; } is_a_new_connection = 0; // new request coming in ? uwsgi_sock = uwsgi.sockets; while (uwsgi_sock) { if (interesting_fd == uwsgi_sock->fd) { is_a_new_connection = 1; uwsgi.wsgi_req = find_first_available_wsgi_req(); if (uwsgi.wsgi_req == NULL) { uwsgi_async_queue_is_full((time_t)now); break; } wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock); if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (wsgi_req_async_recv(uwsgi.wsgi_req)) { uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; break; } if (uwsgi.wsgi_req->do_not_add_to_async_queue) { runqueue_push(uwsgi.wsgi_req); } break; } uwsgi_sock = uwsgi_sock->next; } if (!is_a_new_connection) { // proto event uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd); if (uwsgi.wsgi_req) { proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req); // reset timeout uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; // parsing complete if (!proto_parser_status) { // remove fd from event poll and fd proto table uwsgi.async_proto_fd_table[interesting_fd] = NULL; event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read()); // put request in the runqueue runqueue_push(uwsgi.wsgi_req); continue; } else if (proto_parser_status < 0) { if (proto_parser_status == -1) uwsgi_log("error parsing request\n"); uwsgi.async_proto_fd_table[interesting_fd] = NULL; close(interesting_fd); continue; } // re-add timer async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); continue; } // app event uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd); // unknown fd, remove it (for safety) if (uwsgi.wsgi_req == NULL) { close(interesting_fd); continue; } // remove all the fd monitors and timeout while (uwsgi.wsgi_req->waiting_fds) { tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event); uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } uwsgi.wsgi_req->async_ready_fd = 1; uwsgi.wsgi_req->async_last_ready_fd = interesting_fd; // put the request in the runqueue again runqueue_push(uwsgi.wsgi_req); // avoid managing other enqueued events... break; } } // event queue managed, give cpu to runqueue if (!current_request) current_request = uwsgi.async_runqueue; if (uwsgi.async_runqueue_cnt) { uwsgi.wsgi_req = current_request->wsgi_req; uwsgi.schedule_to_req(); uwsgi.wsgi_req->switches++; next_async_request = current_request->next; // request ended ? if (uwsgi.wsgi_req->async_status <= UWSGI_OK) { // remove all the monitored fds and timeout while (uwsgi.wsgi_req->waiting_fds) { tmp_uaf = uwsgi.wsgi_req->waiting_fds; uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL; event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event); uwsgi.wsgi_req->waiting_fds = tmp_uaf->next; free(tmp_uaf); } uwsgi.wsgi_req->waiting_fds = NULL; if (uwsgi.wsgi_req->async_timeout) { uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout); free(uwsgi.wsgi_req->async_timeout); uwsgi.wsgi_req->async_timeout = NULL; } // remove from the list runqueue_remove(current_request); uwsgi_close_request(uwsgi.wsgi_req); // push wsgi_request in the unused stack uwsgi.async_queue_unused_ptr++; uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req; } else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) { // remove this request from suspended list runqueue_remove(current_request); } current_request = next_async_request; } } }