static void uwsgi_rados_add_mountpoint(char *arg, size_t arg_len) { char *rad_mountpoint = NULL; char *rad_config = NULL; char *rad_poolname = NULL; if (uwsgi_kvlist_parse(arg, arg_len, ',', '=', "mountpoint", &rad_mountpoint, "config", &rad_config, "pool", &rad_poolname, NULL)) { uwsgi_log("unable to parse rados mountpoint definition\n"); exit(1); } if (!rad_mountpoint|| !rad_poolname) { uwsgi_log("[rados] mount requires a mountpoint, and a pool name.\n"); exit(1); } time_t now = uwsgi_now(); uwsgi_log("[rados] mounting %s ...\n", rad_mountpoint); rados_t cluster; if (rados_create(&cluster, NULL) < 0) { uwsgi_error("Can't create Ceph cluster handle"); exit(1); } if (rad_config) uwsgi_log("Using Ceph conf:%s\n", rad_config); else uwsgi_log("Using default Ceph conf.\n"); if (rados_conf_read_file(cluster, rad_config) < 0) { uwsgi_error("Can't configure Ceph cluster handle"); exit(1); } if (rados_connect(cluster) < 0) { uwsgi_error("Can't connect with Ceph cluster"); exit(1); } rados_ioctx_t ctx; uwsgi_log("Ceph pool: %s\n", rad_poolname); if (rados_ioctx_create(cluster, rad_poolname, &ctx) < 0) { uwsgi_error("Can't open rados pool") rados_shutdown(cluster); exit(1); } int id = uwsgi_apps_cnt; struct uwsgi_app *ua = uwsgi_add_app(id, rados_plugin.modifier1, rad_mountpoint, strlen(rad_mountpoint), NULL, NULL); if (!ua) { uwsgi_log("[rados] unable to mount %s\n", rad_mountpoint); rados_shutdown(cluster); exit(1); } ua->responder0 = cluster; ua->responder1 = ctx; ua->started_at = now; ua->startup_time = uwsgi_now() - now; uwsgi_log("Rados app/mountpoint %d (%s) loaded in %d seconds at %p\n", id, rad_mountpoint, (int) ua->startup_time, ctx); }
void expire_rb_timeouts(struct rb_root *root) { time_t current = uwsgi_now(); struct uwsgi_rb_timer *urbt; struct uwsgi_signal_rb_timer *usrbt; for (;;) { urbt = uwsgi_min_rb_timer(root); if (urbt == NULL) return; if (urbt->key <= current) { // remove the timeout and add another usrbt = (struct uwsgi_signal_rb_timer *) urbt->data; rb_erase(&usrbt->uwsgi_rb_timer->rbt, root); free(usrbt->uwsgi_rb_timer); usrbt->iterations_done++; uwsgi_route_signal(usrbt->sig); if (!usrbt->iterations || usrbt->iterations_done < usrbt->iterations) { usrbt->uwsgi_rb_timer = uwsgi_add_rb_timer(root, uwsgi_now() + usrbt->value, usrbt); } continue; } break; } }
void expire_rb_timeouts(struct uwsgi_rbtree *tree) { uint64_t current = (uint64_t) uwsgi_now(); struct uwsgi_rb_timer *urbt; struct uwsgi_signal_rb_timer *usrbt; for (;;) { urbt = uwsgi_min_rb_timer(tree, NULL); if (urbt == NULL) return; if (urbt->value <= current) { // remove the timeout and add another usrbt = (struct uwsgi_signal_rb_timer *) urbt->data; uwsgi_del_rb_timer(tree, urbt); free(urbt); usrbt->iterations_done++; uwsgi_route_signal(usrbt->sig); if (!usrbt->iterations || usrbt->iterations_done < usrbt->iterations) { usrbt->uwsgi_rb_timer = uwsgi_add_rb_timer(tree, uwsgi_now() + usrbt->value, usrbt); } continue; } break; } }
int uwsgi_calc_cheaper(void) { int i; static time_t last_check = 0; int check_interval = uwsgi.shared->options[UWSGI_OPTION_MASTER_INTERVAL]; if (!last_check) last_check = uwsgi_now(); time_t now = uwsgi_now(); if (!check_interval) check_interval = 1; if ((now - last_check) < check_interval) return 1; last_check = now; int needed_workers = uwsgi.cheaper_algo(); if (needed_workers > 0) { for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 1 && uwsgi.workers[i].pid == 0) { if (uwsgi_respawn_worker(i)) return 0; needed_workers--; } if (needed_workers == 0) break; } } else if (needed_workers < 0) { int oldest_worker = 0; time_t oldest_worker_spawn = INT_MAX; for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) { if (uwsgi.workers[i].last_spawn < oldest_worker_spawn) { oldest_worker_spawn = uwsgi.workers[i].last_spawn; oldest_worker = i; } } } if (oldest_worker > 0) { #ifdef UWSGI_DEBUG uwsgi_log("worker %d should die...\n", oldest_worker); #endif uwsgi.workers[oldest_worker].cheaped = 1; uwsgi.workers[oldest_worker].manage_next_request = 0; // wakeup task in case of wait (void) kill(uwsgi.workers[oldest_worker].pid, SIGWINCH); } } return 1; }
// call the alarm func void uwsgi_alarm_run(struct uwsgi_alarm_instance *uai, char *msg, size_t len) { time_t now = uwsgi_now(); // avoid alarm storming/loop if last message is the same if (!uwsgi_strncmp(msg, len, uai->last_msg, uai->last_msg_size)) { if (now - uai->last_run < uwsgi.alarm_freq) return; } uai->alarm->func(uai, msg, len); uai->last_run = uwsgi_now(); memcpy(uai->last_msg, msg, len); uai->last_msg_size = len; }
static void uwsgi_glusterfs_add_mountpoint(char *arg, size_t arg_len) { char *ugfs_mountpoint = NULL; char *ugfs_server = NULL; char *ugfs_volfile = NULL; char *ugfs_volume = NULL; if (uwsgi_kvlist_parse(arg, arg_len, ',', '=', "mountpoint", &ugfs_mountpoint, "server", &ugfs_server, "servers", &ugfs_server, "volfile", &ugfs_volfile, "volume", &ugfs_volume, NULL)) { uwsgi_log("unable to parse glusterfs mountpoint definition\n"); exit(1); } if (!ugfs_mountpoint || (!ugfs_server && !ugfs_volfile) || !ugfs_volume) { uwsgi_log("[glusterfs] mount requires a mountpoint, a volume and at least one server or volfile\n"); exit(1); } int id = uwsgi_apps_cnt; time_t now = uwsgi_now(); uwsgi_log("[glusterfs] mounting %s ...\n", ugfs_mountpoint); // this should fail only if memory is not available glfs_t *volume = glfs_new(ugfs_volume); if (!volume) { uwsgi_error("unable to initialize glusterfs mountpoint: glfs_new()"); exit(1); } if (ugfs_volfile) { if (glfs_set_volfile(volume, ugfs_volfile)) { uwsgi_error("unable to set glusterfs volfile: glfs_set_volfile\n"); exit(1); } } /* here we pass ugfs_server as the callable field. After fork() if this field is defined we will start trying to connect to each one of the configuratio nodes This is required to have fallback management */ struct uwsgi_app *ua = uwsgi_add_app(id, glusterfs_plugin.modifier1, ugfs_mountpoint, strlen(ugfs_mountpoint), volume, ugfs_server); if (!ua) { uwsgi_log("[glusterfs] unable to mount %s\n", ugfs_mountpoint); exit(1); } ua->started_at = now; ua->startup_time = uwsgi_now() - now; uwsgi_log("GlusterFS app/mountpoint %d (%s) loaded in %d seconds at %p\n", id, ugfs_mountpoint, (int) ua->startup_time, volume); }
void uwsgi_master_check_idle() { static time_t last_request_timecheck = 0; static uint64_t last_request_count = 0; int i; int waitpid_status; if (!uwsgi.idle || uwsgi.status.is_cheap) return; uwsgi.current_time = uwsgi_now(); if (!last_request_timecheck) last_request_timecheck = uwsgi.current_time; // security check, stop the check if there are busy workers for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 0 && uwsgi.workers[i].pid > 0) { if (uwsgi_worker_is_busy(i)) { return; } } } if (last_request_count != uwsgi.workers[0].requests) { last_request_timecheck = uwsgi.current_time; last_request_count = uwsgi.workers[0].requests; } // a bit of over-engeneering to avoid clock skews else if (last_request_timecheck < uwsgi.current_time && (uwsgi.current_time - last_request_timecheck > uwsgi.idle)) { uwsgi_log("workers have been inactive for more than %d seconds (%llu-%llu)\n", uwsgi.idle, (unsigned long long) uwsgi.current_time, (unsigned long long) last_request_timecheck); uwsgi.status.is_cheap = 1; if (uwsgi.die_on_idle) { if (uwsgi.has_emperor) { char byte = 22; if (write(uwsgi.emperor_fd, &byte, 1) != 1) { uwsgi_error("write()"); kill_them_all(0); } } else { kill_them_all(0); } return; } for (i = 1; i <= uwsgi.numproc; i++) { uwsgi.workers[i].cheaped = 1; if (uwsgi.workers[i].pid == 0) continue; kill(uwsgi.workers[i].pid, SIGKILL); if (waitpid(uwsgi.workers[i].pid, &waitpid_status, 0) < 0) { if (errno != ECHILD) uwsgi_error("uwsgi_master_check_idle()/waitpid()"); } } uwsgi_add_sockets_to_queue(uwsgi.master_queue, -1); uwsgi_log("cheap mode enabled: waiting for socket connection...\n"); last_request_timecheck = 0; } }
int uwsgi_queue_push(char *message, uint64_t size) { struct uwsgi_queue_item *uqi; char *ptr = (char *) uwsgi.queue; if (size > uwsgi.queue_blocksize + sizeof(struct uwsgi_queue_item)) return 0; if (!size) return 0; ptr = ptr + (uwsgi.queue_blocksize * uwsgi.queue_header->pos); uqi = (struct uwsgi_queue_item *) ptr; ptr += sizeof(struct uwsgi_queue_item); uqi->size = size; uqi->ts = uwsgi_now(); memcpy(ptr, message, size); uwsgi.queue_header->pos++; if (uwsgi.queue_header->pos >= uwsgi.queue_size) uwsgi.queue_header->pos = 0; return 1; }
int uwsgi_websocket_handshake(struct wsgi_request *wsgi_req, char *key, uint16_t key_len, char *origin, uint16_t origin_len) { #ifdef UWSGI_SSL char sha1[20]; if (uwsgi_response_prepare_headers(wsgi_req, "101 Web Socket Protocol Handshake", 33)) return -1; if (uwsgi_response_add_header(wsgi_req, "Upgrade", 7, "WebSocket", 9)) return -1; if (uwsgi_response_add_header(wsgi_req, "Connection", 10, "Upgrade", 7)) return -1; if (origin_len > 0) { if (uwsgi_response_add_header(wsgi_req, "Sec-WebSocket-Origin", 20, origin, origin_len)) return -1; } else { if (uwsgi_response_add_header(wsgi_req, "Sec-WebSocket-Origin", 20, "*", 1)) return -1; } // generate websockets sha1 and encode it to base64 if (!uwsgi_sha1_2n(key, key_len, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11", 36, sha1)) return -1; size_t b64_len = 0; char *b64 = uwsgi_base64_encode(sha1, 20, &b64_len); if (!b64) return -1; if (uwsgi_response_add_header(wsgi_req, "Sec-WebSocket-Accept", 20, b64, b64_len)) { free(b64); return -1; } free(b64); wsgi_req->websocket_last_pong = uwsgi_now(); return uwsgi_response_write_headers_do(wsgi_req); #else uwsgi_log("you need to build uWSGI with SSL support to use the websocket handshake api function !!!\n"); return -1; #endif }
int uwsgi_queue_set(uint64_t pos, char *message, uint64_t size) { struct uwsgi_queue_item *uqi; char *ptr = (char *) uwsgi.queue; if (size > uwsgi.queue_blocksize + sizeof(struct uwsgi_queue_item)) return 0; if (!size) return 0; if (pos >= uwsgi.queue_size) return 0; ptr = ptr + (uwsgi.queue_blocksize * pos); uqi = (struct uwsgi_queue_item *) ptr; ptr += sizeof(struct uwsgi_queue_item); uqi->size = size; uqi->ts = uwsgi_now(); memcpy(ptr, message, size); return 1; }
static int uwsgi_websockets_ping(struct wsgi_request *wsgi_req) { if (uwsgi_response_write_body_do(wsgi_req, uwsgi.websockets_ping->buf, uwsgi.websockets_ping->pos)) { return -1; } wsgi_req->websocket_last_ping = uwsgi_now(); return 0; }
void emperor_respawn(struct uwsgi_instance *c_ui, time_t mod) { struct uwsgi_header uh; // reload the uWSGI instance if (write(c_ui->pipe[0], "\1", 1) != 1) { uwsgi_error("write()"); } // push the config to the config pipe (if needed) if (c_ui->use_config) { uh.modifier1 = 115; uh.pktsize = c_ui->config_len; uh.modifier2 = 0; if (write(c_ui->pipe_config[0], &uh, 4) != 4) { uwsgi_error("[uwsgi-emperor] write() header config"); } else { if (write(c_ui->pipe_config[0], c_ui->config, c_ui->config_len) != (long) c_ui->config_len) { uwsgi_error("[uwsgi-emperor] write() config"); } } } c_ui->respawns++; c_ui->last_mod = mod; c_ui->last_run = uwsgi_now(); uwsgi_log("[emperor] reload the uwsgi instance %s\n", c_ui->name); }
static PyObject *py_uwsgi_asyncio_accept(PyObject *self, PyObject *args) { long uwsgi_sock_ptr = 0; if (!PyArg_ParseTuple(args, "l:uwsgi_asyncio_accept", &uwsgi_sock_ptr)) { return NULL; } struct wsgi_request *wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); goto end; } uwsgi.wsgi_req = wsgi_req; struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) uwsgi_sock_ptr; // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { // in case of errors (or thundering herd, just reset it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; free_req_queue; goto end; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(wsgi_req, uwsgi.harakiri_options.workers); } uwsgi.async_proto_fd_table[wsgi_req->fd] = wsgi_req; // add callback for protocol if (PyObject_CallMethod(uasyncio.loop, "add_reader", "iOl", wsgi_req->fd, uasyncio.request, (long) wsgi_req) == NULL) { free_req_queue; PyErr_Print(); } // add timeout PyObject *ob_timeout = PyObject_CallMethod(uasyncio.loop, "call_later", "iOli", uwsgi.socket_timeout, uasyncio.request, (long)wsgi_req, 1); if (!ob_timeout) { if (PyObject_CallMethod(uasyncio.loop, "remove_reader", "i", wsgi_req->fd) == NULL) PyErr_Print(); free_req_queue; } else { // trick for reference counting wsgi_req->async_timeout = (struct uwsgi_rb_timer *) ob_timeout; } end: Py_INCREF(Py_None); return Py_None; }
void async_add_timeout(struct wsgi_request *wsgi_req, int timeout) { if (timeout > 0 && wsgi_req->async_timeout == NULL) { wsgi_req->async_timeout = uwsgi_add_rb_timer(uwsgi.rb_async_timeouts, uwsgi_now()+timeout, wsgi_req); } }
struct uwsgi_buffer *uwsgi_exception_handler_object(struct wsgi_request *wsgi_req) { struct uwsgi_buffer *ub = uwsgi_buffer_new(4096); if (uwsgi_buffer_append_keyval(ub, "vars", 4, wsgi_req->buffer,wsgi_req->uh->pktsize)) goto error; if (uwsgi.p[wsgi_req->uh->modifier1]->backtrace) { struct uwsgi_buffer *bt = uwsgi.p[wsgi_req->uh->modifier1]->backtrace(wsgi_req); if (bt) { if (uwsgi_buffer_append_keyval(ub, "backtrace", 9, bt->buf, bt->pos)) { uwsgi_buffer_destroy(bt); goto error; } uwsgi_buffer_destroy(bt); } } if (uwsgi.p[wsgi_req->uh->modifier1]->exception_class) { struct uwsgi_buffer *ec = uwsgi.p[wsgi_req->uh->modifier1]->exception_class(wsgi_req); if (ec) { if (uwsgi_buffer_append_keyval(ub, "class", 5, ec->buf, ec->pos)) { uwsgi_buffer_destroy(ec); goto error; } uwsgi_buffer_destroy(ec); } } if (uwsgi.p[wsgi_req->uh->modifier1]->exception_msg) { struct uwsgi_buffer *em = uwsgi.p[wsgi_req->uh->modifier1]->exception_msg(wsgi_req); if (em) { if (uwsgi_buffer_append_keyval(ub, "msg", 3, em->buf, em->pos)) { uwsgi_buffer_destroy(em); goto error; } uwsgi_buffer_destroy(em); } } if (uwsgi.p[wsgi_req->uh->modifier1]->exception_repr) { struct uwsgi_buffer *er = uwsgi.p[wsgi_req->uh->modifier1]->exception_repr(wsgi_req); if (er) { if (uwsgi_buffer_append_keyval(ub, "repr", 4, er->buf, er->pos)) { uwsgi_buffer_destroy(er); goto error; } uwsgi_buffer_destroy(er); } } if (uwsgi_buffer_append_keynum(ub, "unix", 4, uwsgi_now())) goto error; if (uwsgi_buffer_append_keynum(ub, "wid", 3, uwsgi.mywid)) goto error; if (uwsgi_buffer_append_keynum(ub, "pid", 3, uwsgi.mypid)) goto error; if (uwsgi_buffer_append_keynum(ub, "core", 4, wsgi_req->async_id)) goto error; if (uwsgi_buffer_append_keyval(ub, "node", 4, uwsgi.hostname, uwsgi.hostname_len)) goto error; return ub; error: uwsgi_buffer_destroy(ub); return NULL; }
PyObject *py_uwsgi_gevent_main(PyObject * self, PyObject * args) { // hack to retrieve the socket address PyObject *py_uwsgi_sock = PyTuple_GetItem(args, 0); struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) PyLong_AsLong(py_uwsgi_sock); long watcher_index = PyInt_AsLong(PyTuple_GetItem(args, 1)); struct wsgi_request *wsgi_req = NULL; edge: wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); PyObject_CallMethod(ugevent.watchers[watcher_index], "stop", NULL); goto clear; } // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { free_req_queue; if (uwsgi_sock->retry && uwsgi_sock->retry[wsgi_req->async_id]) { goto edge; } // in case of errors (or thundering herd, just rest it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; goto clear; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(wsgi_req, uwsgi.harakiri_options.workers); } // hack to easily pass wsgi_req pointer to the greenlet PyTuple_SetItem(ugevent.greenlet_args, 1, PyLong_FromLong((long)wsgi_req)); // spawn the request greenlet PyObject *new_gl = python_call(ugevent.spawn, ugevent.greenlet_args, 0, NULL); Py_DECREF(new_gl); if (uwsgi_sock->edge_trigger) { #ifdef UWSGI_DEBUG uwsgi_log("i am an edge triggered socket !!!\n"); #endif goto edge; } clear: Py_INCREF(Py_None); return Py_None; }
// massive reload of vassals static void emperor_massive_reload(int signum) { struct uwsgi_instance *c_ui = ui->ui_next; while (c_ui) { emperor_respawn(c_ui, uwsgi_now()); c_ui = c_ui->ui_next; } }
void uwsgi_curse(int wid, int sig) { uwsgi.workers[wid].cursed_at = uwsgi_now(); uwsgi.workers[wid].no_mercy_at = uwsgi.workers[wid].cursed_at + uwsgi.worker_reload_mercy; if (sig) { (void) kill(uwsgi.workers[wid].pid, sig); } }
int uwsgi_cr_map_use_static_nodes(struct uwsgi_corerouter *ucr, struct corerouter_peer *peer) { if (!ucr->current_static_node) { ucr->current_static_node = ucr->static_nodes; } peer->static_node = ucr->current_static_node; // is it a dead node ? if (peer->static_node->custom > 0) { // gracetime passed ? if (peer->static_node->custom + ucr->static_node_gracetime <= (uint64_t) uwsgi_now()) { peer->static_node->custom = 0; } else { struct uwsgi_string_list *tmp_node = peer->static_node; struct uwsgi_string_list *next_node = peer->static_node->next; peer->static_node = NULL; // needed for 1-node only setups if (!next_node) next_node = ucr->static_nodes; while (tmp_node != next_node) { if (!next_node) { next_node = ucr->static_nodes; } if (tmp_node == next_node) break; if (next_node->custom == 0) { peer->static_node = next_node; break; } next_node = next_node->next; } } } if (peer->static_node) { peer->instance_address = peer->static_node->value; peer->instance_address_len = peer->static_node->len; // set the next one ucr->current_static_node = peer->static_node->next; } else { // set the next one ucr->current_static_node = ucr->current_static_node->next; } return 0; }
int uwsgi_fr_map_use_static_nodes(struct fastrouter_session *fr_session, char **magic_table) { if (!ufr.current_static_node) { ufr.current_static_node = ufr.static_nodes; } fr_session->static_node = ufr.current_static_node; // is it a dead node ? if (fr_session->static_node->custom > 0) { // gracetime passed ? if (fr_session->static_node->custom + ufr.static_node_gracetime <= (uint64_t) uwsgi_now()) { fr_session->static_node->custom = 0; } else { struct uwsgi_string_list *tmp_node = fr_session->static_node; struct uwsgi_string_list *next_node = fr_session->static_node->next; fr_session->static_node = NULL; // needed for 1-node only setups if (!next_node) next_node = ufr.static_nodes; while (tmp_node != next_node) { if (!next_node) { next_node = ufr.static_nodes; } if (tmp_node == next_node) break; if (next_node->custom == 0) { fr_session->static_node = next_node; break; } next_node = next_node->next; } } } if (fr_session->static_node) { fr_session->instance_address = fr_session->static_node->value; fr_session->instance_address_len = fr_session->static_node->len; // set the next one ufr.current_static_node = fr_session->static_node->next; } else { // set the next one ufr.current_static_node = ufr.current_static_node->next; } return 0; }
//use this instead of fprintf to avoid buffering mess with udp logging void uwsgi_log(const char *fmt, ...) { va_list ap; char logpkt[4096]; int rlen = 0; int ret; struct timeval tv; char sftime[64]; char ctime_storage[26]; time_t now; if (uwsgi.logdate) { if (uwsgi.log_strftime) { now = uwsgi_now(); rlen = strftime(sftime, 64, uwsgi.log_strftime, localtime(&now)); memcpy(logpkt, sftime, rlen); memcpy(logpkt + rlen, " - ", 3); rlen += 3; } else { gettimeofday(&tv, NULL); #ifdef __sun__ ctime_r((const time_t *) &tv.tv_sec, ctime_storage, 26); #else ctime_r((const time_t *) &tv.tv_sec, ctime_storage); #endif memcpy(logpkt, ctime_storage, 24); memcpy(logpkt + 24, " - ", 3); rlen = 24 + 3; } } va_start(ap, fmt); ret = vsnprintf(logpkt + rlen, 4096 - rlen, fmt, ap); va_end(ap); if (ret >= 4096) { char *tmp_buf = uwsgi_malloc(rlen + ret + 1); memcpy(tmp_buf, logpkt, rlen); va_start(ap, fmt); ret = vsnprintf(tmp_buf + rlen, ret + 1, fmt, ap); va_end(ap); rlen = write(2, tmp_buf, rlen + ret); free(tmp_buf); return; } rlen += ret; // do not check for errors rlen = write(2, logpkt, rlen); }
void uwsgi_master_check_mercy() { int i; for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].pid > 0 && uwsgi.workers[i].cursed_at) { if (uwsgi_now() > uwsgi.workers[i].no_mercy_at) { uwsgi_log_verbose("worker %d (pid: %d) is taking too much time to die...NO MERCY !!!\n", i, uwsgi.workers[i].pid); // yes that look strangem but we avoid callign it again if we skip waitpid() call below uwsgi_curse(i, SIGKILL); } } } }
void async_add_timeout(struct wsgi_request *wsgi_req, int timeout) { if (uwsgi.async < 1 || !uwsgi.rb_async_timeouts) { uwsgi_log_verbose("ASYNC call without async mode !!!\n"); return; } wsgi_req->async_ready_fd = 0; if (timeout > 0 && wsgi_req->async_timeout == NULL) { wsgi_req->async_timeout = uwsgi_add_rb_timer(uwsgi.rb_async_timeouts, uwsgi_now() + timeout, wsgi_req); } }
void uwsgi_send_subscription(char *udp_address, char *key, size_t keysize, uint8_t modifier1, uint8_t modifier2, uint8_t cmd, char *socket_name, char *sign) { if (socket_name == NULL && !uwsgi.sockets) return; if (!socket_name) { socket_name = uwsgi.sockets->name; } struct uwsgi_buffer *ub = uwsgi_buffer_new(4096); // make space for uwsgi header ub->pos = 4; if (uwsgi_buffer_append_keyval(ub, "key", 3, key, keysize)) goto end; if (uwsgi_buffer_append_keyval(ub, "address", 7, socket_name, strlen(socket_name))) goto end; if (uwsgi_buffer_append_keynum(ub, "modifier1", 9, modifier1)) goto end; if (uwsgi_buffer_append_keynum(ub, "modifier2", 9, modifier2)) goto end; if (uwsgi_buffer_append_keynum(ub, "cores", 5, uwsgi.numproc * uwsgi.cores)) goto end; if (uwsgi_buffer_append_keynum(ub, "load", 4, uwsgi.shared->load)) goto end; if (uwsgi.auto_weight) { if (uwsgi_buffer_append_keynum(ub, "weight", 6, uwsgi.numproc * uwsgi.cores )) goto end; } else { if (uwsgi_buffer_append_keynum(ub, "weight", 6, uwsgi.weight )) goto end; } #ifdef UWSGI_SSL if (sign) { if (uwsgi_buffer_append_keynum(ub, "unix", 4, (uwsgi_now() + (time_t) cmd) )) goto end; unsigned int signature_len = 0; char *signature = uwsgi_rsa_sign(sign, ub->buf + 4, ub->pos - 4, &signature_len); if (signature && signature_len > 0) { if (uwsgi_buffer_append_keyval(ub, "sign", 4, signature, signature_len)) { free(signature); goto end; } free(signature); } } #endif send_udp_message(224, cmd, udp_address, ub->buf, ub->pos - 4); end: uwsgi_buffer_destroy(ub); }
void carbon_post_init() { int i; struct uwsgi_string_list *usl = u_carbon.servers; if (!uwsgi.sockets) return; if (!u_carbon.servers) return; while(usl) { struct carbon_server_list *u_server = uwsgi_calloc(sizeof(struct carbon_server_list)); u_server->value = usl->value; u_server->healthy = 1; u_server->errors = 0; if (u_carbon.servers_data) { u_server->next = u_carbon.servers_data; } u_carbon.servers_data = u_server; uwsgi_log("[carbon] added server %s\n", usl->value); usl = usl->next; } if (u_carbon.freq < 1) u_carbon.freq = 60; if (u_carbon.timeout < 1) u_carbon.timeout = 3; if (u_carbon.max_retries <= 0) u_carbon.max_retries = 1; if (u_carbon.retry_delay <= 0) u_carbon.retry_delay = 7; if (!u_carbon.id) { u_carbon.id = uwsgi_str(uwsgi.sockets->name); for(i=0;i<(int)strlen(u_carbon.id);i++) { if (u_carbon.id[i] == '.') u_carbon.id[i] = '_'; } } if (!u_carbon.last_busyness_values) { u_carbon.last_busyness_values = uwsgi_calloc(sizeof(unsigned long long) * uwsgi.numproc); } if (!u_carbon.current_busyness_values) { u_carbon.current_busyness_values = uwsgi_calloc(sizeof(unsigned long long) * uwsgi.numproc); } // set next update to now()+retry_delay, this way we will have first flush just after start u_carbon.last_update = uwsgi_now() - u_carbon.freq + u_carbon.retry_delay; uwsgi_log("[carbon] carbon plugin started, %is frequency, %is timeout, max retries %i, retry delay %is\n", u_carbon.freq, u_carbon.timeout, u_carbon.max_retries, u_carbon.retry_delay); }
void uwsgi_master_check_workers_deadline() { int i; for (i = 1; i <= uwsgi.numproc; i++) { /* first check for harakiri */ if (uwsgi.workers[i].harakiri > 0) { if (uwsgi.workers[i].harakiri < (time_t) uwsgi.current_time) { trigger_harakiri(i); } } /* then user-defined harakiri */ if (uwsgi.workers[i].user_harakiri > 0) { if (uwsgi.workers[i].user_harakiri < (time_t) uwsgi.current_time) { trigger_harakiri(i); } } // then for evil memory checkers if (uwsgi.evil_reload_on_as) { if ((rlim_t) uwsgi.workers[i].vsz_size >= uwsgi.evil_reload_on_as) { uwsgi_log("*** EVIL RELOAD ON WORKER %d ADDRESS SPACE: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].vsz_size, uwsgi.workers[i].pid); kill(uwsgi.workers[i].pid, SIGKILL); uwsgi.workers[i].vsz_size = 0; } } if (uwsgi.evil_reload_on_rss) { if ((rlim_t) uwsgi.workers[i].rss_size >= uwsgi.evil_reload_on_rss) { uwsgi_log("*** EVIL RELOAD ON WORKER %d RSS: %lld (pid: %d) ***\n", i, (long long) uwsgi.workers[i].rss_size, uwsgi.workers[i].pid); kill(uwsgi.workers[i].pid, SIGKILL); uwsgi.workers[i].rss_size = 0; } } // check if worker was running longer than allowed lifetime if (uwsgi.workers[i].pid > 0 && uwsgi.workers[i].cheaped == 0 && uwsgi.shared->options[UWSGI_OPTION_MAX_WORKER_LIFETIME] > 0) { uint64_t lifetime = uwsgi_now() - uwsgi.workers[i].last_spawn; if (lifetime > uwsgi.shared->options[UWSGI_OPTION_MAX_WORKER_LIFETIME] && uwsgi.workers[i].manage_next_request == 1) { uwsgi_log("worker %d lifetime reached, it was running for %llu second(s)\n", i, (unsigned long long) lifetime); uwsgi.workers[i].manage_next_request = 0; kill(uwsgi.workers[i].pid, SIGWINCH); } } // need to find a better way //uwsgi.workers[i].last_running_time = uwsgi.workers[i].running_time; } }
struct uwsgi_subscribe_node *uwsgi_get_subscribe_node(struct uwsgi_subscribe_slot **slot, char *key, uint16_t keylen) { if (keylen > 0xff) return NULL; struct uwsgi_subscribe_slot *current_slot = uwsgi_get_subscribe_slot(slot, key, keylen); if (!current_slot) return NULL; // slot found, move up in the list increasing hits current_slot->hits++; time_t now = uwsgi_now(); struct uwsgi_subscribe_node *node = current_slot->nodes; while (node) { // is the node alive ? if (now - node->last_check > uwsgi.subscription_tolerance) { if (node->death_mark == 0) uwsgi_log("[uwsgi-subscription for pid %d] %.*s => marking %.*s as failed (no announce received in %d seconds)\n", (int) uwsgi.mypid, (int) keylen, key, (int) node->len, node->name, uwsgi.subscription_tolerance); node->failcnt++; node->death_mark = 1; } // do i need to remove the node ? if (node->death_mark && node->reference == 0) { // remove the node and move to next struct uwsgi_subscribe_node *dead_node = node; node = node->next; // if the slot has been removed, return NULL; if (uwsgi_remove_subscribe_node(slot, dead_node) == 1) { return NULL; } continue; } struct uwsgi_subscribe_node *choosen_node = uwsgi.subscription_algo(current_slot, node); if (choosen_node) return choosen_node; node = node->next; } return uwsgi.subscription_algo(current_slot, node); }
void emperor_respawn(struct uwsgi_instance *c_ui, time_t mod) { // reload the uWSGI instance if (c_ui->use_config) { if (write(c_ui->pipe[0], "\0", 1) != 1) { uwsgi_error("write()"); } } else { if (write(c_ui->pipe[0], "\1", 1) != 1) { uwsgi_error("write()"); } } c_ui->respawns++; c_ui->last_mod = mod; c_ui->last_run = uwsgi_now(); uwsgi_log("[emperor] reload the uwsgi instance %s\n", c_ui->name); }
static int uwsgi_websockets_check_pingpong(struct wsgi_request *wsgi_req) { time_t now = uwsgi_now(); // first round if (wsgi_req->websocket_last_ping == 0) { return uwsgi_websockets_ping(wsgi_req); } // pong not received ? if (wsgi_req->websocket_last_pong < wsgi_req->websocket_last_ping) { if (wsgi_req->websocket_last_ping - wsgi_req->websocket_last_pong > uwsgi.websockets_pong_tolerance) { uwsgi_log("[uwsgi-websocket] no PONG received in %d seconds !!!\n", uwsgi.websockets_pong_tolerance); return -1; } return 0; } // pong received, send another ping if (now - wsgi_req->websocket_last_ping >= uwsgi.websockets_ping_freq) { return uwsgi_websockets_ping(wsgi_req); } return 0; }
int uwsgi_master_check_mercy() { int i, waitpid_status; if (uwsgi.master_mercy) { if (uwsgi.master_mercy < uwsgi_now()) { for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].pid > 0) { if (uwsgi.lazy && uwsgi.workers[i].destroy == 0) continue; uwsgi_log("worker %d (pid: %d) is taking too much time to die...NO MERCY !!!\n", i, uwsgi.workers[i].pid); if (!kill(uwsgi.workers[i].pid, SIGKILL)) { if (waitpid(uwsgi.workers[i].pid, &waitpid_status, 0) < 0) { uwsgi_error("waitpid()"); } uwsgi.workers[i].pid = 0; if (uwsgi.to_hell) { uwsgi.ready_to_die++; } else if (uwsgi.to_heaven) { uwsgi.ready_to_reload++; } else if (uwsgi.to_outworld) { uwsgi.lazy_respawned++; if (uwsgi_respawn_worker(i)) return -1; } } else { uwsgi_error("kill()"); } } } uwsgi.master_mercy = 0; } } return 0; }