Example #1
0
static void hangcheck_fire(unsigned long data)
{
	unsigned long long cur_tsc, tsc_diff;

	cur_tsc = monotonic_clock();

	if (cur_tsc > hangcheck_tsc)
		tsc_diff = cur_tsc - hangcheck_tsc;
	else
		tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); 

	if (tsc_diff > hangcheck_tsc_margin) {
		if (hangcheck_dump_tasks) {
			printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
			handle_sysrq('t', NULL);
#endif  
		}
		if (hangcheck_reboot) {
			printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
			emergency_restart();
		} else {
			printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
		}
	}
	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
	hangcheck_tsc = monotonic_clock();
}
Example #2
0
static void garbage_collect_waiting_processes(uint64_t alloted_ns)
{
	int nr_timed = wait_list_len(&queues.on_timed_receive);
	int nr_infinite = proc_list_len(&queues.on_infinite_receive);
	int nr_waiting = nr_timed + nr_infinite;
	if (nr_waiting == 0)
		return;

	int nr_scanned = 0;
	while (nr_scanned < nr_waiting)
	{
		static int64_t counter = 0;
		int index = counter % nr_waiting;
		proc_t *fatty = (index >= nr_timed)
				? proc_list_at(&queues.on_infinite_receive, index -nr_timed)
				: wait_list_at(&queues.on_timed_receive, index);
		heap_t *hp = &fatty->hp;
		if (!gc_skip_idle(hp))
		{
			uint64_t gc_started_ns = monotonic_clock();
			proc_burn_fat(GC_LOC_IDLE, fatty, fatty->cap.regs, fatty->cap.live);
			uint64_t consumed_ns = (monotonic_clock() -gc_started_ns);
			if (consumed_ns > alloted_ns)
				break;
			alloted_ns -= consumed_ns;
		}
		counter++;
		nr_scanned++;
	}
}
Example #3
0
static void hangcheck_fire(unsigned long data)
{
	unsigned long long cur_tsc, tsc_diff;

	cur_tsc = monotonic_clock();

	if (cur_tsc > hangcheck_tsc)
		tsc_diff = cur_tsc - hangcheck_tsc;
	else
		tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */

	if (tsc_diff > hangcheck_tsc_margin) {
		if (hangcheck_dump_tasks) {
			printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
			handle_sysrq('t');
#endif  /* CONFIG_MAGIC_SYSRQ */
		}
		if (hangcheck_reboot) {
			printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
			emergency_restart();
		} else {
			printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
		}
	}
#if 0
	/*
	 * Enable to investigate delays in detail
	 */
	printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
			tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
#endif
	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
	hangcheck_tsc = monotonic_clock();
}
Example #4
0
int clock_gettime(clockid_t clk_id, struct timespec *tp)
{
    switch (clk_id) {
	case CLOCK_MONOTONIC:
	{
	    struct timeval tv;

	    gettimeofday(&tv, NULL);

	    tp->tv_sec = tv.tv_sec;
	    tp->tv_nsec = tv.tv_usec * 1000;

	    break;
	}
	case CLOCK_REALTIME:
	{
	    uint64_t nsec = monotonic_clock();

	    tp->tv_sec = nsec / 1000000000ULL;
	    tp->tv_nsec = nsec % 1000000000ULL;

	    break;
	}
	default:
	    print_unsupported("clock_gettime(%ld)", (long) clk_id);
	    errno = EINVAL;
	    return -1;
    }

    return 0;
}
Example #5
0
int etimer_cancel(uint64_t ref_id, int64_t *left_ns)
{
	etimer_t **ref = &active_timers;
	etimer_t *tm = active_timers;
	while (tm != 0 && tm->ref_id != ref_id)
	{
		ref = &tm->next;
		tm = tm->next;
	}

	if (tm == 0)
		return -NOT_FOUND;

	*ref = tm->next;

	if (tm->sender != 0)
	{
		assert(tm->sender->pending_timers > 0);
		tm->sender->pending_timers--;
		if (tm->sender->pending_timers == 0 &&
				tm->sender->my_queue == MY_QUEUE_PENDING_TIMERS)
			proc_destroy(tm->sender);	// destroy a zombie process
	}

	*left_ns = tm->timeout - monotonic_clock();

	//printk("*** etimer_cancel: ref_id %ld left_ms %ld\n", tm->ref_id, *left_ns /1000000);

	tm->next = free_timers;
	free_timers = tm;
	return 0;
}
Example #6
0
void block_domain(u32 millisecs)
{
    struct timeval tv;
    gettimeofday(&tv);
    HYPERVISOR_set_timer_op(monotonic_clock() + 1000000LL * (s64) millisecs);
    HYPERVISOR_sched_op(SCHEDOP_block, 0);
}
Example #7
0
File: time.c Project: Geal/mini-os
int gettimeofday(struct timeval *tv, void *tz)
{
    uint64_t nsec = monotonic_clock();
    nsec += shadow_ts.tv_nsec;

    tv->tv_sec = shadow_ts.tv_sec;
    tv->tv_sec += NSEC_TO_SEC(nsec);
    tv->tv_usec = NSEC_TO_USEC(nsec % 1000000000UL);

    return 0;
}
Example #8
0
void gettimeofday(struct timeval *tv)
{
    u64 nsec = monotonic_clock();
    nsec += shadow_ts.ts_nsec;
    
    update_wallclock();
    
    /* tv->tv_sec = shadow_ts.ts_sec; */
    /* tv->tv_sec += NSEC_TO_SEC(shadow_ts.ts_nsec); */
    /* tv->tv_usec = NSEC_TO_USEC(shadow_ts.ts_nsec % 1000000000UL); */
}
Example #9
0
int etimer_read(uint64_t ref_id, int64_t *left_ns)
{
	etimer_t *tm = active_timers;
	while (tm != 0 && tm->ref_id != ref_id)
		tm = tm->next;

	if (tm == 0)
		return -NOT_FOUND;

	*left_ns = tm->timeout - monotonic_clock();
	return 0;
}
Example #10
0
/* Use monotonic_clock() by default. It's faster and is available on older
 * kernels, but few architectures have them, so we must fallback to
 * do_posix_clock_monotonic_gettime().
 */
hrtime_t
__gethrtime(void) {
#ifdef HAVE_MONOTONIC_CLOCK
        unsigned long long res = monotonic_clock();

        /* Deal with signed/unsigned mismatch */
        return (hrtime_t)(res & ~(1ULL << 63));
#else
        struct timespec ts;

        do_posix_clock_monotonic_gettime(&ts);
        return (((hrtime_t)ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec);
#endif
}
Example #11
0
term_t cbif_profile1(proc_t *proc, term_t *regs)
{
	term_t Flag = regs[0];
	if (!is_bool(Flag))
		badarg(Flag);

#ifdef PROFILE_HARNESS
	if (Flag == A_TRUE)
		prof_restart();
	else
	{
		uint64_t now = monotonic_clock();
		prof_stop(now);
	}
#endif
	return A_OK;
}
Example #12
0
static int __init hangcheck_init(void)
{
	printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
	       VERSION_STR, hangcheck_tick, hangcheck_margin);
#if defined (HAVE_MONOTONIC)
	printk("Hangcheck: Using monotonic_clock().\n");
#else
	printk("Hangcheck: Using get_cycles().\n");
#endif  
	hangcheck_tsc_margin =
		(unsigned long long)(hangcheck_margin + hangcheck_tick);
	hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ;

	hangcheck_tsc = monotonic_clock();
	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));

	return 0;
}
static void hangcheck_fire(unsigned long data)
{
	unsigned long long cur_tsc, tsc_diff;

	cur_tsc = monotonic_clock();

	if (cur_tsc > hangcheck_tsc)
		tsc_diff = cur_tsc - hangcheck_tsc;
	else
		tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */

	if (tsc_diff > hangcheck_tsc_margin) {
		if (hangcheck_dump_tasks) {
			printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
<<<<<<< HEAD
			handle_sysrq('t');
=======
			handle_sysrq('t', NULL);
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
#endif  /* CONFIG_MAGIC_SYSRQ */
		}
		if (hangcheck_reboot) {
Example #14
0
void scheduler_init(void)
{
	next_proc_id = 0;

	registry = hash_make();
	named_processes = hash_make();
	proc_queue_init(&queues.high_prio);
	proc_queue_init(&queues.normal_prio);
	proc_queue_init(&queues.low_prio);

	wait_list_init(&queues.on_timed_receive);
	proc_list_init(&queues.on_infinite_receive);

	//runtime = 0;
	scheduler_runtime_start();

	last_event_fired_ns = monotonic_clock();
	avg_event_gap_ns = MANUAL_POLLING_THRESHOLD;
	expect_event_in_ns = MANUAL_POLLING_THRESHOLD;

	memset(purgatory, 0, sizeof(purgatory));
	num_purged = 0;
}
Example #15
0
void scheduler_runtime_start(void)
{
	rt_start = monotonic_clock();
}
Example #16
0
uint64_t wall_clock(void)
{
	return wall_clock_base +monotonic_clock();
}
Example #17
0
CAMLprim value caml_sys_random_seed (value unit)
{
  intnat seed;
  seed = monotonic_clock ();
  return Val_long(seed);
}
Example #18
0
void server::run()
{
	network::connection sock = 0;

	time_t last_ts = monotonic_clock();

	for(;;)
	{
		if(need_reload) {
			load_config(); // TODO: handle port number config changes

			need_reload = 0;
			last_ts = 0;

			LOG_CS << "Reloaded configuration\n";
		}

		try {
			bool force_flush = false;
			std::string admin_cmd;

			if(input_ && input_->read_line(admin_cmd)) {
				control_line ctl = admin_cmd;

				if(ctl == "shut_down") {
					LOG_CS << "Shut down requested by admin, shutting down...\n";
					break;
				} else if(ctl == "readonly") {
					if(ctl.args_count()) {
						cfg_["read_only"] = read_only_ = utils::string_bool(ctl[1], true);
					}

					LOG_CS << "Read only mode: " << (read_only_ ? "enabled" : "disabled") << '\n';
				} else if(ctl == "flush") {
					force_flush = true;
					LOG_CS << "Flushing config to disk...\n";
				} else if(ctl == "reload") {
					if(ctl.args_count()) {
						if(ctl[1] == "blacklist") {
							LOG_CS << "Reloading blacklist...\n";
							load_blacklist();
						} else {
							ERR_CS << "Unrecognized admin reload argument: " << ctl[1] << '\n';
						}
					} else {
						LOG_CS << "Reloading all configuration...\n";
						need_reload = 1;
						// Avoid flush timer ellapsing
						continue;
					}
				} else if(ctl == "setpass") {
					if(ctl.args_count() != 2) {
						ERR_CS << "Incorrect number of arguments for 'setpass'\n";
					} else {
						const std::string& addon_id = ctl[1];
						const std::string& newpass = ctl[2];
						config& campaign = get_campaign(addon_id);

						if(!campaign) {
							ERR_CS << "Add-on '" << addon_id << "' not found, cannot set passphrase\n";
						} else if(newpass.empty()) {
							// Shouldn't happen!
							ERR_CS << "Add-on passphrases may not be empty!\n";
						} else {
							campaign["passphrase"] = newpass;
							write_config();

							LOG_CS << "New passphrase set for '" << addon_id << "'\n";
						}
					}
				} else {
					ERR_CS << "Unrecognized admin command: " << ctl.full() << '\n';
				}
			}

			const time_t cur_ts = monotonic_clock();
			// Write config to disk every ten minutes.
			if(force_flush || labs(cur_ts - last_ts) >= 10*60) {
				write_config();
				last_ts = cur_ts;
			}

			network::process_send_queue();

			sock = network::accept_connection();
			if(sock) {
				LOG_CS << "received connection from " << network::ip_address(sock) << "\n";
			}

			config data;

			while((sock = network::receive_data(data, 0)) != network::null_connection)
			{
				config::all_children_iterator i = data.ordered_begin();

				if(i != data.ordered_end()) {
					// We only handle the first child.
					const config::any_child& c = *i;

					request_handlers_table::const_iterator j
							= handlers_.find(c.key);

					if(j != handlers_.end()) {
						// Call the handler.
						j->second(this, request(c.key, c.cfg, sock));
					} else {
						send_error("Unrecognized [" + c.key + "] request.",
								   sock);
					}
				}
			}
		} catch(network::error& e) {
			if(!e.socket) {
				ERR_CS << "fatal network error: " << e.message << "\n";
				throw;
			} else {
				LOG_CS << "client disconnect: " << e.message << " " << network::ip_address(e.socket) << "\n";
				e.disconnect();
			}
		} catch(const config::error& e) {
			network::connection err_sock = 0;
			network::connection const * err_connection = boost::get_error_info<network::connection_info>(e);

			if(err_connection != NULL) {
				err_sock = *err_connection;
			}

			if(err_sock == 0 && sock > 0) {
				err_sock = sock;
			}

			if(err_sock) {
				ERR_CS << "client disconnect due to exception: " << e.what() << " " << network::ip_address(err_sock) << "\n";
				network::disconnect(err_sock);
			} else {
				throw;
			}
		}

		SDL_Delay(20);
	}
}
Example #19
0
void server::run()
{
	network::connection sock = 0;

	time_t last_ts = monotonic_clock();

	for(;;)
	{
		try {
			std::string admin_cmd;

			if(input_ && input_->read_line(admin_cmd)) {
				// process command
				if(admin_cmd == "shut_down") {
					break;
				}
			}

			const time_t cur_ts = monotonic_clock();
			// Write config to disk every ten minutes.
			if(labs(cur_ts - last_ts) >= 10*60) {
				write_config();
				last_ts = cur_ts;
			}

			network::process_send_queue();

			sock = network::accept_connection();
			if(sock) {
				LOG_CS << "received connection from " << network::ip_address(sock) << "\n";
			}

			config data;

			while((sock = network::receive_data(data, 0)) != network::null_connection)
			{
				config::all_children_iterator i = data.ordered_begin();

				if(i != data.ordered_end()) {
					// We only handle the first child.
					const config::any_child& c = *i;

					request_handlers_table::const_iterator j
							= handlers_.find(c.key);

					if(j != handlers_.end()) {
						// Call the handler.
						j->second(this, request(c.key, c.cfg, sock));
					} else {
						send_error("Unrecognized [" + c.key + "] request.",
								   sock);
					}
				}
			}
		} catch(network::error& e) {
			if(!e.socket) {
				ERR_CS << "fatal network error: " << e.message << "\n";
				throw;
			} else {
				LOG_CS << "client disconnect: " << e.message << " " << network::ip_address(e.socket) << "\n";
				e.disconnect();
			}
		} catch(const config::error& e) {
			network::connection err_sock = 0;
			network::connection const * err_connection = boost::get_error_info<network::connection_info>(e);

			if(err_connection != NULL) {
				err_sock = *err_connection;
			}

			if(err_sock == 0 && sock > 0) {
				err_sock = sock;
			}

			if(err_sock) {
				ERR_CS << "client disconnect due to exception: " << e.what() << " " << network::ip_address(err_sock) << "\n";
				network::disconnect(err_sock);
			} else {
				throw;
			}
		}

		SDL_Delay(20);
	}
}
Example #20
0
proc_t *scheduler_next(proc_t *current, int reds_left)
{
	set_phase(PHASE_NEXT);
	uint32_t reds_used = SLICE_REDUCTIONS -reds_left;
	ssi(SYS_STATS_CTX_SWITCHES);
	ssa(SYS_STATS_REDUCTIONS, reds_used);
	current->total_reds += reds_used;
	proc_t *next_proc = 0;
	uint64_t ticks = monotonic_clock(); // freeze time

	assert(current->my_queue == MY_QUEUE_NONE);

#ifdef PROFILE_HARNESS
static uint64_t proc_started_ns = 0;
	if (proc_started_ns != 0)
		prof_slice_complete(current->pid,
			current->result.what, current->cap.ip, proc_started_ns, ticks);
#endif

	proc_t *expired;
	while ((expired = wait_list_expired(&queues.on_timed_receive, ticks)) != 0)
	{
		expired->cap.ip = expired->result.jump_to;
		if (scheduler_park_runnable_N(expired) < 0)
			scheduler_exit_process(expired, A_NO_MEMORY);
	}

	int memory_exhausted = 0;
	switch (current->result.what)
	{
	case SLICE_RESULT_YIELD:
		if (scheduler_park_runnable_N(current) < 0)
			memory_exhausted = 1;
		break;
	case SLICE_RESULT_WAIT:
		if (current->result.until_when == LING_INFINITY)
		{
			if (proc_list_put_N(&queues.on_infinite_receive, current) < 0)
				memory_exhausted = 1;
			else
				current->my_queue = MY_QUEUE_INF_WAIT;
		}
		else
		{
			if (wait_list_put_N(&queues.on_timed_receive,
					current, current->result.until_when) < 0)
				memory_exhausted = 1;
			else
				current->my_queue = MY_QUEUE_TIMED_WAIT;
		}
		break;
	case SLICE_RESULT_DONE:
		scheduler_exit_process(current, A_NORMAL);
		break;

	case SLICE_RESULT_PURGE_PROCS:
		// purge_module() call may have detected processes lingering on the old
		// code - terminate them
		if (scheduler_park_runnable_N(current) < 0)
			memory_exhausted = 1;
		for (int i = 0; i < num_purged; i++)
			if (scheduler_signal_exit_N(purgatory[i], current->pid, A_KILL) < 0)
				memory_exhausted = 1;
		num_purged = 0;
		break;

	case SLICE_RESULT_EXIT:
		scheduler_exit_process(current, current->result.reason);
		// what about the returned value when main function just returns?
		break;
	case SLICE_RESULT_EXIT2:
		// only needed to implement erlang:exit/2
		if (scheduler_park_runnable_N(current) < 0 ||
				(scheduler_signal_exit_N(current->result.victim,
								         current->pid, 
								         current->result.reason2) < 0))
			memory_exhausted = 1;
		break;
	case SLICE_RESULT_ERROR:
		scheduler_exit_process(current, current->result.reason);
		// how is this different from SLICE_RESULT_EXIT?
		break;
	case SLICE_RESULT_THROW:
		scheduler_exit_process(current, current->result.reason);
		// how is this different from SLICE_RESULT_EXIT?
		break;
	default:
	{
		assert(current->result.what == SLICE_RESULT_OUTLET_CLOSE);
		if (scheduler_park_runnable_N(current) < 0)
			memory_exhausted = 1;
		outlet_t *closing = current->result.closing;
		//assert(is_atom(current->result.why));
		outlet_close(closing, current->result.why);
		break;
	}
	}

	if (memory_exhausted)
		scheduler_exit_process(current, A_NO_MEMORY);

do_pending:

	ticks = monotonic_clock();
	while ((expired = wait_list_expired(&queues.on_timed_receive, ticks)) != 0)
	{
		expired->cap.ip = expired->result.jump_to;
		if (scheduler_park_runnable_N(expired) < 0)
			scheduler_exit_process(expired, A_NO_MEMORY);
	}

	set_phase(PHASE_EVENTS);
	// software events/timeouts
	net_check_timeouts();
	etimer_expired(ticks);
	// 'hardware' events
	int nr_fired = events_do_pending();
	update_event_times(nr_fired, ticks);
	set_phase(PHASE_NEXT);

	// select_runnable
	if (!proc_queue_is_empty(&queues.high_prio))
		next_proc = proc_queue_get(&queues.high_prio);
	else if (normal_count < NORMAL_ADVANTAGE)
	{
		if (!proc_queue_is_empty(&queues.normal_prio))
			next_proc = proc_queue_get(&queues.normal_prio);
		else if (!proc_queue_is_empty(&queues.low_prio))
			next_proc = proc_queue_get(&queues.low_prio);
		normal_count++;
	}
	else
	{
		if (!proc_queue_is_empty(&queues.low_prio))
			next_proc = proc_queue_get(&queues.low_prio);
		else if (!proc_queue_is_empty(&queues.normal_prio))
			next_proc = proc_queue_get(&queues.normal_prio);
		normal_count = 0;
	}

	if (next_proc == 0)
	{
		// no runnable processes; poll for events from all three sources

		// Beware that events_poll() reports events 5us after they occur. If
		// a new event is expected very soon we are better off polling event
		// bits manually (using events_do_pending())

		// Devote a portion of time until the next event to gc waiting processes
		garbage_collect_waiting_processes(expect_event_in_ns /2);

		if (expect_event_in_ns < MANUAL_POLLING_THRESHOLD)
			goto do_pending;

		uint64_t next_ticks = wait_list_timeout(&queues.on_timed_receive);
		uint64_t closest_timeout = etimer_closest_timeout();
		if (closest_timeout < next_ticks)
			next_ticks = closest_timeout;

		closest_timeout = lwip_closest_timeout();
		if (closest_timeout < next_ticks)
			next_ticks = closest_timeout;

		scheduler_runtime_update();
		events_poll(next_ticks);		// LING_INFINITY is big enough
		scheduler_runtime_start();

		goto do_pending;
	}

	next_proc->my_queue = MY_QUEUE_NONE;
	
	//TODO: update stats

#ifdef PROFILE_HARNESS
	proc_started_ns = ticks;
#endif

	set_phase(PHASE_ERLANG);
	return next_proc;
}
Example #21
0
uint64_t scheduler_runtime_get(void)
{
	return runtime + (monotonic_clock() - rt_start);
}
Example #22
0
void scheduler_runtime_update(void)
{
	runtime += (monotonic_clock() - rt_start);
}