Example #1
0
void worker_scheduleEvent(Event* event, SimulationTime nano_delay, GQuark receiver_node_id) {
	/* TODO create accessors, or better yet refactor the work to event class */
	MAGIC_ASSERT(event);
	MAGIC_ASSERT((&(event->super)));

	/* get our thread-private worker */
	Worker* worker = worker_getPrivate();
	Engine* engine = worker->cached_engine;

	/* when the event will execute */
	event->time = worker->clock_now + nano_delay;

	/* parties involved. sender may be NULL, receiver may not! */
	Node* sender = worker->cached_node;

	/* we MAY NOT OWN the receiver, so do not write to it! */
	Node* receiver = receiver_node_id == 0 ? sender : internetwork_getNode(worker_getInternet(), receiver_node_id);
	g_assert(receiver);

	/* the NodeEvent needs a pointer to the correct node */
	event->node = receiver;

	/* if we are not going to execute any more events, free it and return */
	if(engine_isKilled(engine)) {
		shadowevent_free(event);
		return;
	}

	/* engine is not killed, assert accurate worker clock */
	g_assert(worker->clock_now != SIMTIME_INVALID);

	/* non-local events must be properly delayed */
	SimulationTime jump = engine_getMinTimeJump(engine);
	if(!node_isEqual(receiver, sender)) {
		SimulationTime minTime = worker->clock_now + jump;

		/* warn and adjust time if needed */
		if(event->time < minTime) {
			debug("Inter-node event time %lu changed to %lu due to minimum delay %lu",
					event->time, minTime, jump);
			event->time = minTime;
		}
	}

	/* figure out where to push the event */
	if(engine_getNumThreads(engine) > 1) {
		/* multi-threaded, push event to receiver node */
		EventQueue* eventq = node_getEvents(receiver);
		eventqueue_push(eventq, event);
	} else {
		/* single-threaded, push to master queue */
		engine_pushEvent(engine, (Event*)event);
	}
}
Example #2
0
static guint _worker_processNode(Worker* worker, Node* node, SimulationTime barrier) {
	/* update cache, reset clocks */
	worker->cached_node = node;
	worker->clock_last = SIMTIME_INVALID;
	worker->clock_now = SIMTIME_INVALID;
	worker->clock_barrier = barrier;

	/* lock the node */
	node_lock(worker->cached_node);

	EventQueue* eventq = node_getEvents(worker->cached_node);
	Event* nextEvent = eventqueue_peek(eventq);

	/* process all events in the nodes local queue */
	guint nEventsProcessed = 0;
	while(nextEvent && (nextEvent->time < worker->clock_barrier))
	{
		worker->cached_event = eventqueue_pop(eventq);
		MAGIC_ASSERT(worker->cached_event);

		/* make sure we don't jump backward in time */
		worker->clock_now = worker->cached_event->time;
		if(worker->clock_last != SIMTIME_INVALID) {
			g_assert(worker->clock_now >= worker->clock_last);
		}

		/* do the local task */
		gboolean complete = shadowevent_run(worker->cached_event);

		/* update times */
		worker->clock_last = worker->clock_now;
		worker->clock_now = SIMTIME_INVALID;

		/* finished event can now be destroyed */
		if(complete) {
			shadowevent_free(worker->cached_event);
			nEventsProcessed++;
		}

		/* get the next event, or NULL will tell us to break */
		nextEvent = eventqueue_peek(eventq);
	}

	/* unlock, clear cache */
	node_unlock(worker->cached_node);
	worker->cached_node = NULL;
	worker->cached_event = NULL;

	return nEventsProcessed;
}
Example #3
0
static gint _engine_distributeEvents(Engine* engine) {
	MAGIC_ASSERT(engine);

	GList* nodeList = internetwork_getAllNodes(engine->internet);

	/* assign nodes to the worker threads so they get processed */
	GSList* listArray[engine->config->nWorkerThreads];
	memset(listArray, 0, engine->config->nWorkerThreads * sizeof(GSList*));
	gint counter = 0;

	GList* item = g_list_first(nodeList);
	while(item) {
		Node* node = item->data;

		gint i = counter % engine->config->nWorkerThreads;
		listArray[i] = g_slist_append(listArray[i], node);

		counter++;
		item = g_list_next(item);
	}

	/* we will track when workers finish processing their nodes */
	engine->processingLatch = countdownlatch_new(engine->config->nWorkerThreads + 1);
	/* after the workers finish processing, wait for barrier update */
	engine->barrierLatch = countdownlatch_new(engine->config->nWorkerThreads + 1);

	/* start up the workers */
	GSList* workerThreads = NULL;
	for(gint i = 0; i < engine->config->nWorkerThreads; i++) {
		GString* name = g_string_new(NULL);
		g_string_printf(name, "worker-%i", (i+1));
		GThread* t = g_thread_new(name->str, (GThreadFunc)worker_run, (gpointer)listArray[i]);
		workerThreads = g_slist_append(workerThreads, t);
		g_string_free(name, TRUE);
	}

	/* process all events in the priority queue */
	while(engine->executeWindowStart < engine->endTime)
	{
		/* wait for the workers to finish processing nodes before we touch them */
		countdownlatch_countDownAwait(engine->processingLatch);

		/* we are in control now, the workers are waiting at barrierLatch */
		message("execution window [%lu--%lu] ran %u events from %u active nodes",
				engine->executeWindowStart, engine->executeWindowEnd,
				engine->numEventsCurrentInterval,
				engine->numNodesWithEventsCurrentInterval);

		/* check if we should take 1 step ahead or fast-forward our execute window.
		 * since looping through all the nodes to find the minimum event is
		 * potentially expensive, we use a heuristic of only trying to jump ahead
		 * if the last interval had only a few events in it. */
		if(engine->numEventsCurrentInterval < 10) {
			/* we had no events in that interval, lets try to fast forward */
			SimulationTime minNextEventTime = SIMTIME_INVALID;

			item = g_list_first(nodeList);
			while(item) {
				Node* node = item->data;
				EventQueue* eventq = node_getEvents(node);
				Event* nextEvent = eventqueue_peek(eventq);
				if(nextEvent && (nextEvent->time < minNextEventTime)) {
					minNextEventTime = nextEvent->time;
				}
				item = g_list_next(item);
			}

			/* fast forward to the next event */
			engine->executeWindowStart = minNextEventTime;
		} else {
			/* we still have events, lets just step one interval */
			engine->executeWindowStart = engine->executeWindowEnd;
		}

		/* make sure we dont run over the end */
		engine->executeWindowEnd = engine->executeWindowStart + engine->minTimeJump;
		if(engine->executeWindowEnd > engine->endTime) {
			engine->executeWindowEnd = engine->endTime;
		}

		/* reset for next round */
		countdownlatch_reset(engine->processingLatch);
		engine->numEventsCurrentInterval = 0;
		engine->numNodesWithEventsCurrentInterval = 0;

		/* if we are done, make sure the workers know about it */
		if(engine->executeWindowStart >= engine->endTime) {
			engine->killed = TRUE;
		}

		/* release the workers for the next round, or to exit */
		countdownlatch_countDownAwait(engine->barrierLatch);
		countdownlatch_reset(engine->barrierLatch);
	}

	/* wait for the threads to finish their cleanup */
	GSList* threadItem = workerThreads;
	while(threadItem) {
		GThread* t = threadItem->data;
		g_thread_join(t);
		g_thread_unref(t);
		threadItem = g_slist_next(threadItem);
	}
	g_slist_free(workerThreads);

	for(gint i = 0; i < engine->config->nWorkerThreads; i++) {
		g_slist_free(listArray[i]);
	}

	countdownlatch_free(engine->processingLatch);
	countdownlatch_free(engine->barrierLatch);

	/* frees the list struct we own, but not the nodes it holds (those were
	 * taken care of by the workers) */
	g_list_free(nodeList);

	return 0;
}