PyObject *py_uwsgi_gevent_main(PyObject * self, PyObject * args) { // hack to retrieve the socket address PyObject *py_uwsgi_sock = PyTuple_GetItem(args, 0); struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) PyLong_AsLong(py_uwsgi_sock); struct wsgi_request *wsgi_req = NULL; edge: wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_log("async queue is full !!!\n"); goto clear; } // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.shared->options[UWSGI_OPTION_HARAKIRI] > 0) { set_harakiri(uwsgi.shared->options[UWSGI_OPTION_HARAKIRI]); } // accept the connection if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { free_req_queue; if (uwsgi_sock->retry && uwsgi_sock->retry[wsgi_req->async_id]) { goto edge; } goto clear; } // on linux we need to set the socket in non-blocking as it is not inherited #ifdef __linux__ uwsgi_socket_nb(wsgi_req->poll.fd); #endif // hack to easily pass wsgi_req pointer to the greenlet PyTuple_SetItem(ugevent.greenlet_args, 1, PyLong_FromLong((long)wsgi_req)); // spawn the request greenlet PyObject *new_gl = python_call(ugevent.spawn, ugevent.greenlet_args, 0, NULL); Py_DECREF(new_gl); if (uwsgi_sock->edge_trigger) { #ifdef UWSGI_DEBUG uwsgi_log("i am an edge triggered socket !!!\n"); #endif goto edge; } clear: Py_INCREF(Py_None); return Py_None; }
int uwsgi_python_profiler_call(PyObject *obj, PyFrameObject *frame, int what, PyObject *arg) { #ifndef UWSGI_PYPY static uint64_t last_ts = 0; uint64_t now = uwsgi_micros(); uint64_t delta = 0; switch(what) { case PyTrace_CALL: if (last_ts == 0) delta = 0; else delta = now - last_ts; last_ts = now; uwsgi_log("[uWSGI Python profiler %llu] CALL: %s (line %d) -> %s %d args, stacksize %d\n", (unsigned long long) delta, PyString_AsString(frame->f_code->co_filename), PyFrame_GetLineNumber(frame), PyString_AsString(frame->f_code->co_name), frame->f_code->co_argcount, frame->f_code->co_stacksize); break; case PyTrace_C_CALL: if (last_ts == 0) delta = 0; else delta = now - last_ts; last_ts = now; uwsgi_log("[uWSGI Python profiler %llu] C CALL: %s (line %d) -> %s %d args, stacksize %d\n", (unsigned long long) delta, PyString_AsString(frame->f_code->co_filename), PyFrame_GetLineNumber(frame), PyEval_GetFuncName(arg), frame->f_code->co_argcount, frame->f_code->co_stacksize); break; } #endif return 0; }
static PyObject *py_uwsgi_asyncio_accept(PyObject *self, PyObject *args) { long uwsgi_sock_ptr = 0; if (!PyArg_ParseTuple(args, "l:uwsgi_asyncio_accept", &uwsgi_sock_ptr)) { return NULL; } struct wsgi_request *wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); goto end; } uwsgi.wsgi_req = wsgi_req; struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) uwsgi_sock_ptr; // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { // in case of errors (or thundering herd, just reset it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; free_req_queue; goto end; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(wsgi_req, uwsgi.harakiri_options.workers); } uwsgi.async_proto_fd_table[wsgi_req->fd] = wsgi_req; // add callback for protocol if (PyObject_CallMethod(uasyncio.loop, "add_reader", "iOl", wsgi_req->fd, uasyncio.request, (long) wsgi_req) == NULL) { free_req_queue; PyErr_Print(); } // add timeout PyObject *ob_timeout = PyObject_CallMethod(uasyncio.loop, "call_later", "iOli", uwsgi.socket_timeout, uasyncio.request, (long)wsgi_req, 1); if (!ob_timeout) { if (PyObject_CallMethod(uasyncio.loop, "remove_reader", "i", wsgi_req->fd) == NULL) PyErr_Print(); free_req_queue; } else { // trick for reference counting wsgi_req->async_timeout = (struct uwsgi_rb_timer *) ob_timeout; } end: Py_INCREF(Py_None); return Py_None; }
PyObject *py_uwsgi_gevent_main(PyObject * self, PyObject * args) { // hack to retrieve the socket address PyObject *py_uwsgi_sock = PyTuple_GetItem(args, 0); struct uwsgi_socket *uwsgi_sock = (struct uwsgi_socket *) PyLong_AsLong(py_uwsgi_sock); long watcher_index = PyInt_AsLong(PyTuple_GetItem(args, 1)); struct wsgi_request *wsgi_req = NULL; edge: wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); PyObject_CallMethod(ugevent.watchers[watcher_index], "stop", NULL); goto clear; } // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { free_req_queue; if (uwsgi_sock->retry && uwsgi_sock->retry[wsgi_req->async_id]) { goto edge; } // in case of errors (or thundering herd, just rest it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; goto clear; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(wsgi_req, uwsgi.harakiri_options.workers); } // hack to easily pass wsgi_req pointer to the greenlet PyTuple_SetItem(ugevent.greenlet_args, 1, PyLong_FromLong((long)wsgi_req)); // spawn the request greenlet PyObject *new_gl = python_call(ugevent.spawn, ugevent.greenlet_args, 0, NULL); Py_DECREF(new_gl); if (uwsgi_sock->edge_trigger) { #ifdef UWSGI_DEBUG uwsgi_log("i am an edge triggered socket !!!\n"); #endif goto edge; } clear: Py_INCREF(Py_None); return Py_None; }
// used to set time after when we allow to cheap workers void set_next_cheap_time(void) { uint64_t now = uwsgi_micros(); #ifdef __linux__ if (uwsgi_cheaper_busyness_global.emergency_workers > 0) { // we have some emergency workers running, we will use minimum delay (2 cycles) to cheap workers // to have quicker recovery from big but short load spikes // otherwise we might wait a lot before cheaping all emergency workers if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %d emergency worker(s) running, using %d seconds cheaper timer\n", uwsgi_cheaper_busyness_global.emergency_workers, uwsgi.cheaper_overload*uwsgi_cheaper_busyness_global.backlog_multi); uwsgi_cheaper_busyness_global.next_cheap = now + uwsgi.cheaper_overload*uwsgi_cheaper_busyness_global.backlog_multi*1000000; } else { #endif // no emergency workers running, we use normal math for setting timer uwsgi_cheaper_busyness_global.next_cheap = now + uwsgi.cheaper_overload*uwsgi_cheaper_busyness_global.cheap_multi*1000000; #ifdef __linux__ } #endif }
int uwsgi_python_tracer(PyObject *obj, PyFrameObject *frame, int what, PyObject *arg) { #ifndef UWSGI_PYPY static uint64_t last_ts = 0; uint64_t now = uwsgi_micros(); uint64_t delta = 0; if (what == PyTrace_LINE) { if (last_ts == 0) { delta = 0; } else { delta = now - last_ts; } last_ts = now; uwsgi_log("[uWSGI Python profiler %llu] file %s line %d: %s argc:%d\n", (unsigned long long)delta, PyString_AsString(frame->f_code->co_filename), PyFrame_GetLineNumber(frame), PyString_AsString(frame->f_code->co_name), frame->f_code->co_argcount); } #endif return 0; }
int cheaper_busyness_algo(void) { int i; // we use microseconds uint64_t t = uwsgi.cheaper_overload*1000000; // this happens on the first run, the required memory is allocated if (!uwsgi_cheaper_busyness_global.last_values) { uwsgi_cheaper_busyness_global.last_values = uwsgi_calloc(sizeof(uint64_t) * uwsgi.numproc); } // set defaults if (!uwsgi_cheaper_busyness_global.busyness_max) uwsgi_cheaper_busyness_global.busyness_max = 50; if (!uwsgi_cheaper_busyness_global.busyness_min) uwsgi_cheaper_busyness_global.busyness_min = 25; if (!uwsgi_cheaper_busyness_global.cheap_multi) uwsgi_cheaper_busyness_global.cheap_multi = 10; if (!uwsgi_cheaper_busyness_global.penalty) uwsgi_cheaper_busyness_global.penalty = 2; #ifdef __linux__ if (!uwsgi_cheaper_busyness_global.backlog_alert) uwsgi_cheaper_busyness_global.backlog_alert = 33; if (!uwsgi_cheaper_busyness_global.backlog_multi) uwsgi_cheaper_busyness_global.backlog_multi = 3; if (!uwsgi_cheaper_busyness_global.backlog_step) uwsgi_cheaper_busyness_global.backlog_step = 1; #endif if (!uwsgi_cheaper_busyness_global.min_multi) { // store initial multiplier so we don't loose its initial value uwsgi_cheaper_busyness_global.min_multi = uwsgi_cheaper_busyness_global.cheap_multi; // since this is first run we will print current values uwsgi_log("[busyness] settings: min=%d%%, max=%d%%, overload=%d, multiplier=%d, respawn penalty=%d\n", uwsgi_cheaper_busyness_global.busyness_min, uwsgi_cheaper_busyness_global.busyness_max, uwsgi.cheaper_overload, uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty); #ifdef __linux__ uwsgi_log("[busyness] backlog alert is set to %d request(s), step is %d\n", uwsgi_cheaper_busyness_global.backlog_alert, uwsgi_cheaper_busyness_global.backlog_step); #endif } // initialize with current time if (uwsgi_cheaper_busyness_global.tcheck == 0) uwsgi_cheaper_busyness_global.tcheck = uwsgi_micros(); if (uwsgi_cheaper_busyness_global.next_cheap == 0) set_next_cheap_time(); int64_t active_workers = 0; uint64_t total_busyness = 0; uint64_t avg_busyness = 0; for (i = 0; i < uwsgi.numproc; i++) { if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) { active_workers++; } } #ifdef __linux__ int backlog = uwsgi.shared->options[UWSGI_OPTION_BACKLOG_STATUS]; #endif uint64_t now = uwsgi_micros(); if (now - uwsgi_cheaper_busyness_global.tcheck >= t) { uwsgi_cheaper_busyness_global.tcheck = now; for (i = 0; i < uwsgi.numproc; i++) { if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) { uint64_t percent = (( (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i])*100)/t); if (percent > 100) percent = 100; total_busyness += percent; if (uwsgi_cheaper_busyness_global.verbose && active_workers > 1) uwsgi_log("[busyness] worker nr %d %ds average busyness is at %d%%\n", i+1, uwsgi.cheaper_overload, percent); } uwsgi_cheaper_busyness_global.last_values[i] = uwsgi.workers[i+1].running_time; } avg_busyness = (active_workers ? total_busyness / active_workers : 0); if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %ds average busyness of %d worker(s) is at %d%%\n", uwsgi.cheaper_overload, active_workers, avg_busyness); if (avg_busyness > uwsgi_cheaper_busyness_global.busyness_max) { // we need to reset this to 0 since this is not idle cycle uwsgi_cheaper_busyness_global.tolerance_counter = 0; int decheaped = 0; for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 1 && uwsgi.workers[i].pid == 0) { decheaped++; if (decheaped >= uwsgi.cheaper_step) break; } } if (decheaped > 0) { // store information that we just spawned new workers uwsgi_cheaper_busyness_global.last_action = 1; // calculate number of seconds since last worker was cheaped if ((now - uwsgi_cheaper_busyness_global.last_cheaped)/uwsgi.cheaper_overload/1000000 <= uwsgi_cheaper_busyness_global.cheap_multi) { // worker was cheaped and then spawned back in less than current multiplier*cheaper_overload seconds // we will increase the multiplier so that next time worker will need to wait longer before being cheaped uwsgi_cheaper_busyness_global.cheap_multi += uwsgi_cheaper_busyness_global.penalty; uwsgi_log("[busyness] worker(s) respawned to fast, increasing chpeaper multiplier to %d (+%d)\n", uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty); } else { decrease_multi(); } set_next_cheap_time(); uwsgi_log("[busyness] %ds average busyness is at %d%%, will spawn %d new worker(s)\n", uwsgi.cheaper_overload, avg_busyness, decheaped); } else { uwsgi_log("[busyness] %ds average busyness is at %d%% but we already started maximum number of workers (%d)\n", uwsgi.cheaper_overload, avg_busyness, uwsgi.numproc); } // return the maximum number of workers to spawn return decheaped; #ifdef __linux__ } else if (backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) { return spawn_emergency_worker(backlog); #endif } else if (avg_busyness < uwsgi_cheaper_busyness_global.busyness_min) { // with only 1 worker running there is no point in doing all that magic if (active_workers == 1) return 0; // we need to reset this to 0 since this is not idle cycle uwsgi_cheaper_busyness_global.tolerance_counter = 0; if (active_workers > uwsgi.cheaper_count) { // cheap a worker if too much are running if (now >= uwsgi_cheaper_busyness_global.next_cheap) { // lower cheaper multiplier if this is subsequent cheap if (uwsgi_cheaper_busyness_global.last_action == 2) decrease_multi(); set_next_cheap_time(); uwsgi_log("[busyness] %ds average busyness is at %d%%, cheap one of %d running workers\n", uwsgi.cheaper_overload, avg_busyness, active_workers); // store timestamp uwsgi_cheaper_busyness_global.last_cheaped = uwsgi_micros(); // store information that last action performed was cheaping worker uwsgi_cheaper_busyness_global.last_action = 2; if (uwsgi_cheaper_busyness_global.emergency_workers > 0) uwsgi_cheaper_busyness_global.emergency_workers--; return -1; } else if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] need to wait %d more second(s) to cheap worker\n", (uwsgi_cheaper_busyness_global.next_cheap - now)/1000000); } } else { // with only 1 worker running there is no point in doing all that magic if (active_workers == 1) return 0; if (uwsgi_cheaper_busyness_global.emergency_workers > 0) // we had emergency workers running and we went down to the busyness // level that is high enough to slow down cheaping workers at extra speed uwsgi_cheaper_busyness_global.emergency_workers--; // we have min <= busyness <= max we need to check what happened before uwsgi_cheaper_busyness_global.tolerance_counter++; if (uwsgi_cheaper_busyness_global.tolerance_counter >= 3) { // we had three or more cycles when min <= busyness <= max, lets reset the cheaper timer // this is to prevent workers from being cheaped if we had idle cycles for almost all // time needed to cheap them, than a lot min<busy<max when we do not reset timer // and then another idle cycle than would trigger cheaping if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %ds average busyness is at %d%%, %d non-idle cycle(s), reseting cheaper timer\n", uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter); set_next_cheap_time(); } else { // we had < 3 idle cycles in a row so we won't reset idle timer yet since this might be just short load spike // but we need to add cheaper-overload seconds to the cheaper timer so this cycle isn't counted as idle if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %ds average busyness is at %d%%, %d non-idle cycle(s), adjusting cheaper timer\n", uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter); uwsgi_cheaper_busyness_global.next_cheap += uwsgi.cheaper_overload*1000000; } } #ifdef __linux__ } else if (backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) { // we check for backlog overload every cycle return spawn_emergency_worker(backlog); #endif } return 0; }
static int uwsgi_cheaper_busyness_init(void) { if (!uwsgi.requested_cheaper_algo || strcmp(uwsgi.requested_cheaper_algo, "busyness")) return 0; // this happens on the first run, the required memory is allocated uwsgi_cheaper_busyness_global.last_values = uwsgi_calloc(sizeof(uint64_t) * uwsgi.numproc); uwsgi_cheaper_busyness_global.was_busy = uwsgi_calloc(sizeof(int) * uwsgi.numproc); if (uwsgi.has_metrics) { // allocate metrics memory uwsgi_cheaper_busyness_global.current_busyness = uwsgi_calloc(sizeof(uint64_t) * uwsgi.numproc); } // set defaults if (!uwsgi_cheaper_busyness_global.busyness_max) uwsgi_cheaper_busyness_global.busyness_max = 50; if (!uwsgi_cheaper_busyness_global.busyness_min) uwsgi_cheaper_busyness_global.busyness_min = 25; if (!uwsgi_cheaper_busyness_global.cheap_multi) uwsgi_cheaper_busyness_global.cheap_multi = 10; if (!uwsgi_cheaper_busyness_global.penalty) uwsgi_cheaper_busyness_global.penalty = 2; #ifdef __linux__ if (!uwsgi_cheaper_busyness_global.backlog_alert) uwsgi_cheaper_busyness_global.backlog_alert = 33; if (!uwsgi_cheaper_busyness_global.backlog_multi) uwsgi_cheaper_busyness_global.backlog_multi = 3; if (!uwsgi_cheaper_busyness_global.backlog_step) uwsgi_cheaper_busyness_global.backlog_step = 1; if (!uwsgi_cheaper_busyness_global.backlog_nonzero_alert) uwsgi_cheaper_busyness_global.backlog_nonzero_alert = 60; #endif // store initial multiplier so we don't loose that value uwsgi_cheaper_busyness_global.min_multi = uwsgi_cheaper_busyness_global.cheap_multi; // since this is first run we will print current values uwsgi_log("[busyness] settings: min=%llu%%, max=%llu%%, overload=%llu, multiplier=%llu, respawn penalty=%llu\n", uwsgi_cheaper_busyness_global.busyness_min, uwsgi_cheaper_busyness_global.busyness_max, uwsgi.cheaper_overload, uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty); #ifdef __linux__ uwsgi_log("[busyness] backlog alert is set to %d request(s), step is %d\n", uwsgi_cheaper_busyness_global.backlog_alert, uwsgi_cheaper_busyness_global.backlog_step); uwsgi_log("[busyness] backlog non-zero alert is set to %llu second(s)\n", uwsgi_cheaper_busyness_global.backlog_nonzero_alert); #endif // register metrics if enabled if (uwsgi.has_metrics) { int i; char buf[4096]; char buf2[4096]; for (i = 0; i < uwsgi.numproc; i++) { if (snprintf(buf, 4096, "worker.%d.plugin.cheaper_busyness.busyness", i+1) <= 0) { uwsgi_log("[busyness] unable to register busyness metric for worker %d\n", i+1); exit(1); } if (snprintf(buf2, 4096, "3.%d.100.1", i+1) <= 0) { uwsgi_log("[busyness] unable to register busyness metric oid for worker %d\n", i+1); exit(1); } uwsgi_register_metric(buf, buf2, UWSGI_METRIC_GAUGE, "ptr", &uwsgi_cheaper_busyness_global.current_busyness[i], 0, NULL); } uwsgi_register_metric("plugin.cheaper_busyness.total_avg_busyness", "4.100.1", UWSGI_METRIC_GAUGE, "ptr", &uwsgi_cheaper_busyness_global.total_avg_busyness, 0, NULL); uwsgi_log("[busyness] metrics registered\n"); } // initialize timers uwsgi_cheaper_busyness_global.tcheck = uwsgi_micros(); set_next_cheap_time(); return 0; }
int cheaper_busyness_algo(int can_spawn) { int i; // we use microseconds uint64_t t = uwsgi.cheaper_overload*1000000; int active_workers = 0; uint64_t total_busyness = 0; uint64_t avg_busyness = 0; for (i = 0; i < uwsgi.numproc; i++) { if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) { active_workers++; uwsgi_cheaper_busyness_global.was_busy[i] += uwsgi_worker_is_busy(i+1); } else { uwsgi_cheaper_busyness_global.was_busy[i] = 0; } } #ifdef __linux__ int backlog = uwsgi.shared->backlog; #endif uint64_t now = uwsgi_micros(); if (now - uwsgi_cheaper_busyness_global.tcheck >= t) { uwsgi_cheaper_busyness_global.tcheck = now; for (i = 0; i < uwsgi.numproc; i++) { if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) { uint64_t percent = (( (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i])*100)/t); if (percent > 100) { percent = 100; } else if (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i] == 0 && percent == 0 && uwsgi_cheaper_busyness_global.was_busy[i] > 0) { // running_time did not change but workers were busy // this means that workers had response times > busyness check interval if (uwsgi_cheaper_busyness_global.verbose) { uwsgi_log("[busyness] worker %d was busy %d time(s) in last cycle but no request was completed during this time, marking as 100%% busy\n", i+1, uwsgi_cheaper_busyness_global.was_busy[i]); } percent = 100; } uwsgi_cheaper_busyness_global.was_busy[i] = 0; total_busyness += percent; if (uwsgi_cheaper_busyness_global.verbose && active_workers > 1) uwsgi_log("[busyness] worker nr %d %llus average busyness is at %llu%%\n", i+1, uwsgi.cheaper_overload, percent); if (uwsgi.has_metrics) { // update metrics uwsgi_wlock(uwsgi.metrics_lock); uwsgi_cheaper_busyness_global.current_busyness[i] = percent; uwsgi_rwunlock(uwsgi.metrics_lock); } } uwsgi_cheaper_busyness_global.last_values[i] = uwsgi.workers[i+1].running_time; } avg_busyness = (active_workers ? total_busyness / active_workers : 0); if (uwsgi.has_metrics) { uwsgi_wlock(uwsgi.metrics_lock); uwsgi_cheaper_busyness_global.total_avg_busyness = avg_busyness; uwsgi_rwunlock(uwsgi.metrics_lock); } if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %ds average busyness of %d worker(s) is at %d%%\n", (int) uwsgi.cheaper_overload, (int) active_workers, (int) avg_busyness); if (avg_busyness > uwsgi_cheaper_busyness_global.busyness_max) { // we need to reset this to 0 since this is not idle cycle uwsgi_cheaper_busyness_global.tolerance_counter = 0; int decheaped = 0; if (can_spawn) { for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 1 && uwsgi.workers[i].pid == 0) { decheaped++; if (decheaped >= uwsgi.cheaper_step) break; } } } if (decheaped > 0) { // store information that we just spawned new workers uwsgi_cheaper_busyness_global.last_action = 1; // calculate number of seconds since last worker was cheaped if ((now - uwsgi_cheaper_busyness_global.last_cheaped)/uwsgi.cheaper_overload/1000000 <= uwsgi_cheaper_busyness_global.cheap_multi) { // worker was cheaped and then spawned back in less than current multiplier*cheaper_overload seconds // we will increase the multiplier so that next time worker will need to wait longer before being cheaped uwsgi_cheaper_busyness_global.cheap_multi += uwsgi_cheaper_busyness_global.penalty; uwsgi_log("[busyness] worker(s) respawned to fast, increasing cheaper multiplier to %llu (+%llu)\n", uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty); } else { decrease_multi(); } set_next_cheap_time(); uwsgi_log("[busyness] %llus average busyness is at %llu%%, will spawn %d new worker(s)\n", uwsgi.cheaper_overload, avg_busyness, decheaped); } else { uwsgi_log("[busyness] %llus average busyness is at %llu%% but we already started maximum number of workers available with current limits (%d)\n", uwsgi.cheaper_overload, avg_busyness, active_workers); } // return the maximum number of workers to spawn return decheaped; #ifdef __linux__ } else if (can_spawn && backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) { return spawn_emergency_worker(backlog); #endif } else if (avg_busyness < uwsgi_cheaper_busyness_global.busyness_min) { // with only 1 worker running there is no point in doing all that magic if (active_workers == 1) return 0; // we need to reset this to 0 since this is not idle cycle uwsgi_cheaper_busyness_global.tolerance_counter = 0; if (active_workers > uwsgi.cheaper_count) { // cheap a worker if too much are running if (now >= uwsgi_cheaper_busyness_global.next_cheap) { // lower cheaper multiplier if this is subsequent cheap if (uwsgi_cheaper_busyness_global.last_action == 2) decrease_multi(); set_next_cheap_time(); uwsgi_log("[busyness] %llus average busyness is at %llu%%, cheap one of %d running workers\n", uwsgi.cheaper_overload, avg_busyness, (int) active_workers); // store timestamp uwsgi_cheaper_busyness_global.last_cheaped = uwsgi_micros(); // store information that last action performed was cheaping worker uwsgi_cheaper_busyness_global.last_action = 2; if (uwsgi_cheaper_busyness_global.emergency_workers > 0) uwsgi_cheaper_busyness_global.emergency_workers--; return -1; } else if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] need to wait %llu more second(s) to cheap worker\n", (uwsgi_cheaper_busyness_global.next_cheap - now)/1000000); } } else { // with only 1 worker running there is no point in doing all that magic if (active_workers == 1) return 0; if (uwsgi_cheaper_busyness_global.emergency_workers > 0) // we had emergency workers running and we went down to the busyness // level that is high enough to slow down cheaping workers at extra speed uwsgi_cheaper_busyness_global.emergency_workers--; // we have min <= busyness <= max we need to check what happened before uwsgi_cheaper_busyness_global.tolerance_counter++; if (uwsgi_cheaper_busyness_global.tolerance_counter >= 3) { // we had three or more cycles when min <= busyness <= max, lets reset the cheaper timer // this is to prevent workers from being cheaped if we had idle cycles for almost all // time needed to cheap them, than a lot min<busy<max when we do not reset timer // and then another idle cycle than would trigger cheaping if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %llus average busyness is at %llu%%, %llu non-idle cycle(s), resetting cheaper timer\n", uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter); set_next_cheap_time(); } else { // we had < 3 idle cycles in a row so we won't reset idle timer yet since this might be just short load spike // but we need to add cheaper-overload seconds to the cheaper timer so this cycle isn't counted as idle if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %llus average busyness is at %llu%%, %llu non-idle cycle(s), adjusting cheaper timer\n", uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter); uwsgi_cheaper_busyness_global.next_cheap += uwsgi.cheaper_overload*1000000; } } } #ifdef __linux__ else if (can_spawn && backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) { // we check for backlog overload every cycle return spawn_emergency_worker(backlog); } else if (backlog > 0) { if (uwsgi_cheaper_busyness_global.backlog_is_nonzero) { // backlog was > 0 last time, check timestamp and spawn workers if needed if (can_spawn && (now - uwsgi_cheaper_busyness_global.backlog_nonzero_since)/1000000 >= uwsgi_cheaper_busyness_global.backlog_nonzero_alert) { uwsgi_log("[busyness] backlog was non-zero for %llu second(s), spawning new worker(s)\n", (now - uwsgi_cheaper_busyness_global.backlog_nonzero_since)/1000000); uwsgi_cheaper_busyness_global.backlog_nonzero_since = now; return spawn_emergency_worker(backlog); } } else { // this is first > 0 pass, setup timer if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] backlog is starting to fill (%d)\n", backlog); uwsgi_cheaper_busyness_global.backlog_is_nonzero = 1; uwsgi_cheaper_busyness_global.backlog_nonzero_since = now; } } else if (uwsgi_cheaper_busyness_global.backlog_is_nonzero) { if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] backlog is now empty\n"); uwsgi_cheaper_busyness_global.backlog_is_nonzero = 0; } #endif return 0; }
static ssize_t uwsgi_crypto_logger(struct uwsgi_logger *ul, char *message, size_t len) { struct uwsgi_crypto_logger_conf *uclc = (struct uwsgi_crypto_logger_conf *) ul->data; if (!ul->configured) { uclc = uwsgi_calloc(sizeof(struct uwsgi_crypto_logger_conf)); if (uwsgi_kvlist_parse(ul->arg, strlen(ul->arg), ',', '=', "addr", &uclc->addr, "algo", &uclc->algo, "secret", &uclc->secret, "iv", &uclc->iv, "prefix", &uclc->prefix, NULL)) { uwsgi_log_safe("[uwsgi-logcrypto] unable to parse options\n"); exit(1); } if (!uclc->addr || !uclc->algo || !uclc->secret) { uwsgi_log_safe("[uwsgi-logcrypto] you have to specify at least addr,algo and secret options\n"); exit(1); } if (uclc->prefix) { uclc->prefix_len = strlen(uclc->prefix); } char *colon = strchr(uclc->addr, ':'); if (!colon) { uwsgi_log_safe("[uwsgi-logcrypto] invalid UDP address\n"); exit(1); } ul->addr_len = socket_to_in_addr(uclc->addr, colon, 0, &ul->addr.sa_in); ul->fd = socket(AF_INET, SOCK_DGRAM, 0); if (ul->fd < 0) { uwsgi_error_safe("uwsgi_crypto_logger()/socket()"); exit(1); } uwsgi_crypto_logger_setup_encryption(uclc); ul->data = uclc; ul->configured = 1; } struct uwsgi_buffer *ub = uwsgi_buffer_new(uwsgi.page_size); if (uwsgi_buffer_num64(ub, uwsgi_micros())) goto error; if (uwsgi_buffer_append(ub, " ", 1)) goto error; if (uclc->prefix) { if (uwsgi_buffer_append(ub, uclc->prefix, uclc->prefix_len)) goto error; if (uwsgi_buffer_append(ub, " ", 1)) goto error; } if (uwsgi_buffer_append(ub, message, len)) goto error; // let's encrypt the message unsigned char *encrypted = uwsgi_malloc(ub->pos + EVP_MAX_BLOCK_LENGTH); if (EVP_EncryptInit_ex(uclc->encrypt_ctx, NULL, NULL, NULL, NULL) <= 0) { uwsgi_error_safe("[uwsgi-logcrypto] EVP_EncryptInit_ex()"); free(encrypted); goto error; } int e_len = 0; if (EVP_EncryptUpdate(uclc->encrypt_ctx, encrypted, &e_len, (unsigned char *) ub->buf, ub->pos) <= 0) { uwsgi_error("[uwsgi-logcrypto] EVP_EncryptUpdate()"); free(encrypted); goto error; } int tmplen = 0; if (EVP_EncryptFinal_ex(uclc->encrypt_ctx, encrypted + e_len, &tmplen) <= 0) { uwsgi_error("[uwsgi-logcrypto] EVP_EncryptFinal_ex()"); free(encrypted); goto error; } uwsgi_buffer_destroy(ub); ssize_t rlen = sendto(ul->fd, encrypted, e_len + tmplen, 0, (struct sockaddr *) &ul->addr.sa_in, ul->addr_len); free(encrypted); return rlen; error: uwsgi_buffer_destroy(ub); return -1; }
PyObject *py_uwsgi_tornado_accept(PyObject *self, PyObject *args) { int fd = -1; PyObject *events = NULL; if (!PyArg_ParseTuple(args, "iO:uwsgi_tornado_accept", &fd, &events)) { return NULL; } struct wsgi_request *wsgi_req = find_first_available_wsgi_req(); if (wsgi_req == NULL) { uwsgi_async_queue_is_full(uwsgi_now()); goto end; } uwsgi.wsgi_req = wsgi_req; // TODO better to move it to a function api ... struct uwsgi_socket *uwsgi_sock = uwsgi.sockets; while(uwsgi_sock) { if (uwsgi_sock->fd == fd) break; uwsgi_sock = uwsgi_sock->next; } if (!uwsgi_sock) { free_req_queue; goto end; } // fill wsgi_request structure wsgi_req_setup(wsgi_req, wsgi_req->async_id, uwsgi_sock ); // mark core as used uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 1; // accept the connection (since uWSGI 1.5 all of the sockets are non-blocking) if (wsgi_req_simple_accept(wsgi_req, uwsgi_sock->fd)) { // in case of errors (or thundering herd, just reset it) uwsgi.workers[uwsgi.mywid].cores[wsgi_req->async_id].in_request = 0; free_req_queue; goto end; } wsgi_req->start_of_request = uwsgi_micros(); wsgi_req->start_of_request_in_sec = wsgi_req->start_of_request/1000000; // enter harakiri mode if (uwsgi.harakiri_options.workers > 0) { set_harakiri(uwsgi.harakiri_options.workers); } uwsgi.async_proto_fd_table[wsgi_req->fd] = wsgi_req; // add callback for protocol if (PyObject_CallMethod(utornado.ioloop, "add_handler", "iOO", wsgi_req->fd, utornado.request, utornado.read) == NULL) { free_req_queue; PyErr_Print(); } end: Py_INCREF(Py_None); return Py_None; }