Пример #1
0
int cheaper_busyness_algo(void) {

	int i;
	// we use microseconds
	uint64_t t = uwsgi.cheaper_overload*1000000;

	// this happens on the first run, the required memory is allocated
	if (!uwsgi_cheaper_busyness_global.last_values) {
		uwsgi_cheaper_busyness_global.last_values = uwsgi_calloc(sizeof(uint64_t) * uwsgi.numproc);
	}

	// set defaults
	if (!uwsgi_cheaper_busyness_global.busyness_max) uwsgi_cheaper_busyness_global.busyness_max = 50;
	if (!uwsgi_cheaper_busyness_global.busyness_min) uwsgi_cheaper_busyness_global.busyness_min = 25;
	if (!uwsgi_cheaper_busyness_global.cheap_multi) uwsgi_cheaper_busyness_global.cheap_multi = 10;
	if (!uwsgi_cheaper_busyness_global.penalty) uwsgi_cheaper_busyness_global.penalty = 2;

#ifdef __linux__
	if (!uwsgi_cheaper_busyness_global.backlog_alert) uwsgi_cheaper_busyness_global.backlog_alert = 33;
	if (!uwsgi_cheaper_busyness_global.backlog_multi) uwsgi_cheaper_busyness_global.backlog_multi = 3;
	if (!uwsgi_cheaper_busyness_global.backlog_step) uwsgi_cheaper_busyness_global.backlog_step = 1;
#endif

	if (!uwsgi_cheaper_busyness_global.min_multi) {
		// store initial multiplier so we don't loose its initial value
		uwsgi_cheaper_busyness_global.min_multi = uwsgi_cheaper_busyness_global.cheap_multi;
		// since this is first run we will print current values
		uwsgi_log("[busyness] settings: min=%d%%, max=%d%%, overload=%d, multiplier=%d, respawn penalty=%d\n",
			uwsgi_cheaper_busyness_global.busyness_min, uwsgi_cheaper_busyness_global.busyness_max,
			uwsgi.cheaper_overload, uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty);
#ifdef __linux__
		uwsgi_log("[busyness] backlog alert is set to %d request(s), step is %d\n",
			uwsgi_cheaper_busyness_global.backlog_alert, uwsgi_cheaper_busyness_global.backlog_step);
#endif
	}

	// initialize with current time
	if (uwsgi_cheaper_busyness_global.tcheck == 0) uwsgi_cheaper_busyness_global.tcheck = uwsgi_micros();

	if (uwsgi_cheaper_busyness_global.next_cheap == 0) set_next_cheap_time();

	int64_t active_workers = 0;
	uint64_t total_busyness = 0;
	uint64_t avg_busyness = 0;

	for (i = 0; i < uwsgi.numproc; i++) {
		
		if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) {
			active_workers++;
		}
	}

#ifdef __linux__
	int backlog = uwsgi.shared->options[UWSGI_OPTION_BACKLOG_STATUS];
#endif

	uint64_t now = uwsgi_micros();
	if (now - uwsgi_cheaper_busyness_global.tcheck >= t) {
		uwsgi_cheaper_busyness_global.tcheck = now;
		for (i = 0; i < uwsgi.numproc; i++) {
			if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) {
				uint64_t percent = (( (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i])*100)/t);
				if (percent > 100) percent = 100;
				total_busyness += percent;
				if (uwsgi_cheaper_busyness_global.verbose && active_workers > 1)
					uwsgi_log("[busyness] worker nr %d %ds average busyness is at %d%%\n",
						i+1, uwsgi.cheaper_overload, percent);
			}
			uwsgi_cheaper_busyness_global.last_values[i] = uwsgi.workers[i+1].running_time;
		}

		avg_busyness = (active_workers ? total_busyness / active_workers : 0);
		if (uwsgi_cheaper_busyness_global.verbose)
			uwsgi_log("[busyness] %ds average busyness of %d worker(s) is at %d%%\n",
				uwsgi.cheaper_overload, active_workers, avg_busyness);

		if (avg_busyness > uwsgi_cheaper_busyness_global.busyness_max) {

			// we need to reset this to 0 since this is not idle cycle
			uwsgi_cheaper_busyness_global.tolerance_counter = 0;

			int decheaped = 0;
			for (i = 1; i <= uwsgi.numproc; i++) {
				
				if (uwsgi.workers[i].cheaped == 1 && uwsgi.workers[i].pid == 0) {
					decheaped++;
					if (decheaped >= uwsgi.cheaper_step) break;
				}
			}

			if (decheaped > 0) {
				// store information that we just spawned new workers
				uwsgi_cheaper_busyness_global.last_action = 1;

				// calculate number of seconds since last worker was cheaped
				if ((now - uwsgi_cheaper_busyness_global.last_cheaped)/uwsgi.cheaper_overload/1000000 <= uwsgi_cheaper_busyness_global.cheap_multi) {
					// worker was cheaped and then spawned back in less than current multiplier*cheaper_overload seconds
					// we will increase the multiplier so that next time worker will need to wait longer before being cheaped
					uwsgi_cheaper_busyness_global.cheap_multi += uwsgi_cheaper_busyness_global.penalty;
					uwsgi_log("[busyness] worker(s) respawned to fast, increasing chpeaper multiplier to %d (+%d)\n",
						uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty);
				} else {
					decrease_multi();
				}

				set_next_cheap_time();

				uwsgi_log("[busyness] %ds average busyness is at %d%%, will spawn %d new worker(s)\n",
					uwsgi.cheaper_overload, avg_busyness, decheaped);
			} else {
				uwsgi_log("[busyness] %ds average busyness is at %d%% but we already started maximum number of workers (%d)\n",
					uwsgi.cheaper_overload, avg_busyness, uwsgi.numproc);
			}

			// return the maximum number of workers to spawn
			return decheaped;

#ifdef __linux__
		} else if (backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) {
			return spawn_emergency_worker(backlog);
#endif

		} else if (avg_busyness < uwsgi_cheaper_busyness_global.busyness_min) {

			// with only 1 worker running there is no point in doing all that magic
			if (active_workers == 1) return 0;

			// we need to reset this to 0 since this is not idle cycle
			uwsgi_cheaper_busyness_global.tolerance_counter = 0;

			if (active_workers > uwsgi.cheaper_count) {
				// cheap a worker if too much are running
				if (now >= uwsgi_cheaper_busyness_global.next_cheap) {
					// lower cheaper multiplier if this is subsequent cheap
					if (uwsgi_cheaper_busyness_global.last_action == 2) decrease_multi();
					set_next_cheap_time();

					uwsgi_log("[busyness] %ds average busyness is at %d%%, cheap one of %d running workers\n",
						uwsgi.cheaper_overload, avg_busyness, active_workers);
					// store timestamp
					uwsgi_cheaper_busyness_global.last_cheaped = uwsgi_micros();

					// store information that last action performed was cheaping worker
					uwsgi_cheaper_busyness_global.last_action = 2;

					if (uwsgi_cheaper_busyness_global.emergency_workers > 0)
						uwsgi_cheaper_busyness_global.emergency_workers--;

					return -1;
				} else if (uwsgi_cheaper_busyness_global.verbose)
					uwsgi_log("[busyness] need to wait %d more second(s) to cheap worker\n", (uwsgi_cheaper_busyness_global.next_cheap - now)/1000000);
			}

		} else {
			// with only 1 worker running there is no point in doing all that magic
			if (active_workers == 1) return 0;

			if (uwsgi_cheaper_busyness_global.emergency_workers > 0)
				// we had emergency workers running and we went down to the busyness
				// level that is high enough to slow down cheaping workers at extra speed
				uwsgi_cheaper_busyness_global.emergency_workers--;

			// we have min <= busyness <= max we need to check what happened before

			uwsgi_cheaper_busyness_global.tolerance_counter++;
			if (uwsgi_cheaper_busyness_global.tolerance_counter >= 3) {
				// we had three or more cycles when min <= busyness <= max, lets reset the cheaper timer
				// this is to prevent workers from being cheaped if we had idle cycles for almost all
				// time needed to cheap them, than a lot min<busy<max when we do not reset timer
				// and then another idle cycle than would trigger cheaping
				if (uwsgi_cheaper_busyness_global.verbose)
					uwsgi_log("[busyness] %ds average busyness is at %d%%, %d non-idle cycle(s), reseting cheaper timer\n",
						uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter);
				set_next_cheap_time();
			} else {
				// we had < 3 idle cycles in a row so we won't reset idle timer yet since this might be just short load spike
				// but we need to add cheaper-overload seconds to the cheaper timer so this cycle isn't counted as idle
				if (uwsgi_cheaper_busyness_global.verbose)
					uwsgi_log("[busyness] %ds average busyness is at %d%%, %d non-idle cycle(s), adjusting cheaper timer\n",
						uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter);
				uwsgi_cheaper_busyness_global.next_cheap += uwsgi.cheaper_overload*1000000;
			}
		}

#ifdef __linux__
	} else if (backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) {
		// we check for backlog overload every cycle
		return spawn_emergency_worker(backlog);
#endif
	}

	return 0;
}
Пример #2
0
int cheaper_busyness_algo(int can_spawn) {

	int i;
	// we use microseconds
	uint64_t t = uwsgi.cheaper_overload*1000000;

	int active_workers = 0;
	uint64_t total_busyness = 0;
	uint64_t avg_busyness = 0;

	for (i = 0; i < uwsgi.numproc; i++) {
		if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) {
			active_workers++;
			uwsgi_cheaper_busyness_global.was_busy[i] += uwsgi_worker_is_busy(i+1);
		} else {
			uwsgi_cheaper_busyness_global.was_busy[i] = 0;
		}
	}

#ifdef __linux__
	int backlog = uwsgi.shared->backlog;
#endif

	uint64_t now = uwsgi_micros();
	if (now - uwsgi_cheaper_busyness_global.tcheck >= t) {
		uwsgi_cheaper_busyness_global.tcheck = now;
		for (i = 0; i < uwsgi.numproc; i++) {
			if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) {
				uint64_t percent = (( (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i])*100)/t);
				if (percent > 100) {
					percent = 100;
				}
				else if (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i] == 0 && percent == 0 && uwsgi_cheaper_busyness_global.was_busy[i] > 0) {
					// running_time did not change but workers were busy
					// this means that workers had response times > busyness check interval
					if (uwsgi_cheaper_busyness_global.verbose) {
						uwsgi_log("[busyness] worker %d was busy %d time(s) in last cycle but no request was completed during this time, marking as 100%% busy\n",
							i+1, uwsgi_cheaper_busyness_global.was_busy[i]);
					}
					percent = 100;
				}
				uwsgi_cheaper_busyness_global.was_busy[i] = 0;
				total_busyness += percent;
				if (uwsgi_cheaper_busyness_global.verbose && active_workers > 1)
					uwsgi_log("[busyness] worker nr %d %llus average busyness is at %llu%%\n",
						i+1, uwsgi.cheaper_overload, percent);
				if (uwsgi.has_metrics) {
					// update metrics
					uwsgi_wlock(uwsgi.metrics_lock);
					uwsgi_cheaper_busyness_global.current_busyness[i] = percent;
					uwsgi_rwunlock(uwsgi.metrics_lock);
				}
			}
			uwsgi_cheaper_busyness_global.last_values[i] = uwsgi.workers[i+1].running_time;
		}

		avg_busyness = (active_workers ? total_busyness / active_workers : 0);
		if (uwsgi.has_metrics) {
			uwsgi_wlock(uwsgi.metrics_lock);
			uwsgi_cheaper_busyness_global.total_avg_busyness = avg_busyness;
			uwsgi_rwunlock(uwsgi.metrics_lock);
		}

		if (uwsgi_cheaper_busyness_global.verbose)
			uwsgi_log("[busyness] %ds average busyness of %d worker(s) is at %d%%\n",
				(int) uwsgi.cheaper_overload, (int) active_workers, (int) avg_busyness);

		if (avg_busyness > uwsgi_cheaper_busyness_global.busyness_max) {

			// we need to reset this to 0 since this is not idle cycle
			uwsgi_cheaper_busyness_global.tolerance_counter = 0;

			int decheaped = 0;
			if (can_spawn) {
				for (i = 1; i <= uwsgi.numproc; i++) {
					if (uwsgi.workers[i].cheaped == 1 && uwsgi.workers[i].pid == 0) {
						decheaped++;
						if (decheaped >= uwsgi.cheaper_step) break;
					}
				}
			}

			if (decheaped > 0) {
				// store information that we just spawned new workers
				uwsgi_cheaper_busyness_global.last_action = 1;

				// calculate number of seconds since last worker was cheaped
				if ((now - uwsgi_cheaper_busyness_global.last_cheaped)/uwsgi.cheaper_overload/1000000 <= uwsgi_cheaper_busyness_global.cheap_multi) {
					// worker was cheaped and then spawned back in less than current multiplier*cheaper_overload seconds
					// we will increase the multiplier so that next time worker will need to wait longer before being cheaped
					uwsgi_cheaper_busyness_global.cheap_multi += uwsgi_cheaper_busyness_global.penalty;
					uwsgi_log("[busyness] worker(s) respawned to fast, increasing cheaper multiplier to %llu (+%llu)\n",
						uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty);
				} else {
					decrease_multi();
				}

				set_next_cheap_time();

				uwsgi_log("[busyness] %llus average busyness is at %llu%%, will spawn %d new worker(s)\n",
					uwsgi.cheaper_overload, avg_busyness, decheaped);
			} else {
				uwsgi_log("[busyness] %llus average busyness is at %llu%% but we already started maximum number of workers available with current limits (%d)\n",
					uwsgi.cheaper_overload, avg_busyness, active_workers);
			}

			// return the maximum number of workers to spawn
			return decheaped;

#ifdef __linux__
		} else if (can_spawn && backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) {
			return spawn_emergency_worker(backlog);
#endif

		} else if (avg_busyness < uwsgi_cheaper_busyness_global.busyness_min) {

			// with only 1 worker running there is no point in doing all that magic
			if (active_workers == 1) return 0;

			// we need to reset this to 0 since this is not idle cycle
			uwsgi_cheaper_busyness_global.tolerance_counter = 0;

			if (active_workers > uwsgi.cheaper_count) {
				// cheap a worker if too much are running
				if (now >= uwsgi_cheaper_busyness_global.next_cheap) {
					// lower cheaper multiplier if this is subsequent cheap
					if (uwsgi_cheaper_busyness_global.last_action == 2) decrease_multi();
					set_next_cheap_time();

					uwsgi_log("[busyness] %llus average busyness is at %llu%%, cheap one of %d running workers\n",
						uwsgi.cheaper_overload, avg_busyness, (int) active_workers);
					// store timestamp
					uwsgi_cheaper_busyness_global.last_cheaped = uwsgi_micros();

					// store information that last action performed was cheaping worker
					uwsgi_cheaper_busyness_global.last_action = 2;

					if (uwsgi_cheaper_busyness_global.emergency_workers > 0)
						uwsgi_cheaper_busyness_global.emergency_workers--;

					return -1;
				} else if (uwsgi_cheaper_busyness_global.verbose)
					uwsgi_log("[busyness] need to wait %llu more second(s) to cheap worker\n", (uwsgi_cheaper_busyness_global.next_cheap - now)/1000000);
			}

		} else {
			// with only 1 worker running there is no point in doing all that magic
			if (active_workers == 1) return 0;

			if (uwsgi_cheaper_busyness_global.emergency_workers > 0)
				// we had emergency workers running and we went down to the busyness
				// level that is high enough to slow down cheaping workers at extra speed
				uwsgi_cheaper_busyness_global.emergency_workers--;

			// we have min <= busyness <= max we need to check what happened before

			uwsgi_cheaper_busyness_global.tolerance_counter++;
			if (uwsgi_cheaper_busyness_global.tolerance_counter >= 3) {
				// we had three or more cycles when min <= busyness <= max, lets reset the cheaper timer
				// this is to prevent workers from being cheaped if we had idle cycles for almost all
				// time needed to cheap them, than a lot min<busy<max when we do not reset timer
				// and then another idle cycle than would trigger cheaping
				if (uwsgi_cheaper_busyness_global.verbose)
					uwsgi_log("[busyness] %llus average busyness is at %llu%%, %llu non-idle cycle(s), resetting cheaper timer\n",
						uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter);
				set_next_cheap_time();
			} else {
				// we had < 3 idle cycles in a row so we won't reset idle timer yet since this might be just short load spike
				// but we need to add cheaper-overload seconds to the cheaper timer so this cycle isn't counted as idle
				if (uwsgi_cheaper_busyness_global.verbose)
					uwsgi_log("[busyness] %llus average busyness is at %llu%%, %llu non-idle cycle(s), adjusting cheaper timer\n",
						uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter);
				uwsgi_cheaper_busyness_global.next_cheap += uwsgi.cheaper_overload*1000000;
			}
		}
	}

#ifdef __linux__
	else if (can_spawn && backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) {
		// we check for backlog overload every cycle
		return spawn_emergency_worker(backlog);
	}
	else if (backlog > 0) {
		if (uwsgi_cheaper_busyness_global.backlog_is_nonzero) {
			// backlog was > 0 last time, check timestamp and spawn workers if needed
			if (can_spawn && (now - uwsgi_cheaper_busyness_global.backlog_nonzero_since)/1000000 >= uwsgi_cheaper_busyness_global.backlog_nonzero_alert) {
				uwsgi_log("[busyness] backlog was non-zero for %llu second(s), spawning new worker(s)\n", (now - uwsgi_cheaper_busyness_global.backlog_nonzero_since)/1000000);
				uwsgi_cheaper_busyness_global.backlog_nonzero_since = now;
				return spawn_emergency_worker(backlog);
			}
		}
		else {
			// this is first > 0 pass, setup timer
			if (uwsgi_cheaper_busyness_global.verbose)
				uwsgi_log("[busyness] backlog is starting to fill (%d)\n", backlog);
			uwsgi_cheaper_busyness_global.backlog_is_nonzero = 1;
			uwsgi_cheaper_busyness_global.backlog_nonzero_since = now;
		}
	}
	else if (uwsgi_cheaper_busyness_global.backlog_is_nonzero) {
		if (uwsgi_cheaper_busyness_global.verbose)
			uwsgi_log("[busyness] backlog is now empty\n");
		uwsgi_cheaper_busyness_global.backlog_is_nonzero = 0;
	}
#endif

	return 0;
}