Exemplo n.º 1
0
/* Wake up access waiting in a stack's wait list. */
void mod_stack_wakeup_stack(struct mod_stack_t *master_stack)
{
	struct mod_stack_t *stack;
	int event;

	/* No access to wake up */
	if (!master_stack->waiting_list_count)
		return;

	/* Debug */
	mem_debug("  %lld %lld 0x%x wake up accesses:", esim_time,
		master_stack->id, master_stack->addr);

	/* Wake up all coalesced accesses */
	while (master_stack->waiting_list_head)
	{
		stack = master_stack->waiting_list_head;
		event = stack->waiting_list_event;
		DOUBLE_LINKED_LIST_REMOVE(master_stack, waiting, stack);
		stack->master_stack = NULL;
		esim_schedule_event(event, stack, 0);
		if(( stack->addr >= 0x2F20 && stack->addr<= 0x2F2C))
		;// fprintf(stderr, "      wake master %x, %x, %d, %lld\n", master_stack->addr, stack->addr , event,esim_time);
		mem_debug(" %lld", stack->id);
	}

	/* Debug */
	mem_debug("\n");
}
Exemplo n.º 2
0
/* Wake up access waiting in a stack's wait list. */
void mod_stack_wakeup_stack(struct mod_stack_t *master_stack)
{
	struct mod_stack_t *stack;
	int event;

	/* No access to wake up */
	if (!master_stack->waiting_list_count)
		return;

	/* Debug */
	mem_debug("  %lld %lld 0x%x wake up accesses:", esim_time,
		master_stack->id, master_stack->addr);

	//fran
	master_stack->coalesced_count = 0;

	/* Wake up all coalesced accesses */
	while (master_stack->waiting_list_head)
	{
		stack = master_stack->waiting_list_head;
		event = stack->waiting_list_event;
		stack->waiting_list_event = 0;
		DOUBLE_LINKED_LIST_REMOVE(master_stack, waiting, stack);
        stack->state = master_stack->state;
		esim_schedule_event(event, stack, 0);
		mem_debug(" %lld", stack->id);
	}

	/* Debug */
	mem_debug("\n");
}
Exemplo n.º 3
0
void moesi_stack_return(struct moesi_stack_t *stack)
{
	int retevent = stack->retevent;
	void *retstack = stack->retstack;

	repos_free_object(moesi_stack_repos, stack);
	esim_schedule_event(retevent, retstack, 0);
}
Exemplo n.º 4
0
void net_command_stack_return(struct net_command_stack_t *stack)
{
	int retevent = stack->ret_event;
	struct net_command_stack_t *retstack = stack->ret_stack;

	free(stack);
	esim_schedule_event(retevent, retstack, 0);
}
Exemplo n.º 5
0
void dram_request_handler (int event, void *data)
{
	struct list_t *token_list;

	struct request_stack_t *req_stack = data;
	struct dram_system_t *system = req_stack->system;
	char *request_line = req_stack->request_line;
	fprintf(stderr, "request line : %s \n", request_line);
	char request[MAX_STRING_SIZE];

	long long cycle = esim_domain_cycle(dram_domain_index);

	/* Split command in tokens, skip command */
	token_list = str_token_list_create(request_line, " ");
	assert(list_count(token_list));

	long long request_cycle = dram_request_get_llint(token_list,
			request_line,"cycle value");

	if (request_cycle > cycle)
	{
		str_token_list_free(token_list);
		esim_schedule_event(event, req_stack, request_cycle - cycle);
		return;
	}

	struct dram_request_t *dram_request;

	dram_request = dram_request_create();

	dram_request_get_type(token_list, request_line, request,
					sizeof request);

	if (!strcasecmp(request, "READ"))
	{
		dram_request->type = request_type_read;
	}
	else if (!strcasecmp(request, "WRITE"))
	{
		dram_request->type = request_type_write;
	}
	else
		fatal("%s: invalid access type %s.\n\t> %s",
			__FUNCTION__, request, request_line);


	dram_request->addr = dram_request_get_hex_address(token_list, request_line);

	dram_request->system = system;
	dram_request->id = system->request_count;
	system->request_count++;

	list_enqueue(system->dram_request_list, dram_request);

	str_token_list_free(token_list);
	dram_request_stack_free(req_stack);
}
Exemplo n.º 6
0
void mod_stack_return(struct mod_stack_t *stack)
{
	int ret_event = stack->ret_event;
	void *ret_stack = stack->ret_stack;

	/* Wake up dependent accesses */
	mod_stack_wakeup_stack(stack);

	/* Free */
	free(stack);
	esim_schedule_event(ret_event, ret_stack, 0);
}
Exemplo n.º 7
0
/* Wake up accesses waiting in module wait list. */
void mod_stack_wakeup_mod(struct mod_t *mod)
{
	struct mod_stack_t *stack;
	int event;

	while (mod->waiting_list_head)
	{
		stack = mod->waiting_list_head;
		event = stack->waiting_list_event;
		DOUBLE_LINKED_LIST_REMOVE(mod, waiting, stack);
		esim_schedule_event(event, stack, 0);
	}
}
Exemplo n.º 8
0
/* Wake up accesses waiting in a port wait list. */
void mod_stack_wakeup_port(struct mod_port_t *port)
{
	struct mod_stack_t *stack;
	int event;

	while (port->waiting_list_head)
	{
		stack = port->waiting_list_head;
		event = stack->waiting_list_event;
		DOUBLE_LINKED_LIST_REMOVE(port, waiting, stack);
		esim_schedule_event(event, stack, 0);
	}
}
Exemplo n.º 9
0
void dir_lock_unlock(struct dir_lock_t *dir_lock)
{
	cache_debug("  dir_lock %p - unlock\n", dir_lock);

	/* Wake up all waiters */
	while (dir_lock->lock_queue) {
		esim_schedule_event(dir_lock->lock_queue->lock_event, dir_lock->lock_queue, 1);
		cache_debug("    0x%x access resumed\n", dir_lock->lock_queue->tag);
		dir_lock->lock_queue = dir_lock->lock_queue->lock_next;
	}

	/* Unlock entry */
	dir_lock->lock = 0;
}
Exemplo n.º 10
0
static void net_config_command_create(struct net_t *net, struct config_t *config, char *section)
{
	char *command_line;
	char command_var[MAX_STRING_SIZE];

	int command_var_id;

	/* Checks */
	if (net_injection_rate > 0.001)
		fatal("Network %s:%s: Using Command section; \n"
				"\t option --net-injection-rate should not be used \n",
				net->name,section);
	/* Read commands */
	net_injection_rate = 0;
	if (strcmp(net_traffic_pattern, "") &&
			(strcmp(net_traffic_pattern, "command")))
		fatal("Network %s: Command option doesn't comply with other "
				"traffic pattern\n (%s)", net->name,
				net_traffic_pattern);
	net_traffic_pattern = "command";
	command_var_id = 0;

	/* Register events for command handler*/
	EV_NET_COMMAND = esim_register_event_with_name(net_command_handler,
			net_domain_index, "net_command");
	EV_NET_COMMAND_RCV = esim_register_event_with_name(net_command_handler,
			net_domain_index, "net_command_receive");


	while (1)
	{
		/* Get command */
		snprintf(command_var, sizeof command_var, "Command[%d]", command_var_id);
		command_line = config_read_string(config, section, command_var, NULL);
		if (!command_line)
			break;

		/* Schedule event to process command */
		struct net_stack_t *stack;
		stack = net_stack_create(net,ESIM_EV_NONE, NULL);
		stack->net = net;
		stack->command = xstrdup(command_line);
		esim_schedule_event(EV_NET_COMMAND, stack, 0);

		/* Next command */
		command_var_id++;
	}
}
Exemplo n.º 11
0
/* Schedule all events waiting in the wakeup list */
void net_buffer_wakeup(struct net_buffer_t *buffer)
{
	struct net_buffer_wakeup_t *wakeup;

	while (linked_list_count(buffer->wakeup_list))
	{
		/* Get event/stack */
		linked_list_head(buffer->wakeup_list);
		wakeup = linked_list_get(buffer->wakeup_list);
		linked_list_remove(buffer->wakeup_list);

		/* Schedule event */
		esim_schedule_event(wakeup->event, wakeup->stack, 0);
		free(wakeup);
	}
}
Exemplo n.º 12
0
/* Return TRUE if a message can be sent to the network. If it cannot be sent, 
 * return FALSE, and schedule 'retry_event' for the cycle when the check
 * should be performed again. This function should not be called if the
 * reason why a message cannot be sent is permanent (e.g., no route to
 * destination). */
int net_can_send_ev(struct net_t *net, struct net_node_t *src_node,
		struct net_node_t *dst_node, int size,
		int retry_event, void *retry_stack)
{
	struct net_routing_table_t *routing_table = net->routing_table;
	struct net_routing_table_entry_t *entry;
	struct net_buffer_t *output_buffer;
	long long cycle;

	/* Get current cycle */
	cycle = esim_domain_cycle(net_domain_index);

	/* Get output buffer */
	entry = net_routing_table_lookup(routing_table, src_node, dst_node);
	output_buffer = entry->output_buffer;

	/* No route to destination */
	if (!output_buffer)
		fatal("%s: no route between %s and %s.\n%s",
				net->name, src_node->name, dst_node->name,
				net_err_no_route);

	/* Message is too long */
	if (size > output_buffer->size)
		fatal("%s: message too long.\n%s", net->name,
				net_err_large_message);

	/* Output buffer is busy */
	if (output_buffer->write_busy >= cycle)
	{
		esim_schedule_event(retry_event, retry_stack,
				output_buffer->write_busy - cycle + 1);
		return 0;
	}

	/* Message does not fit in output buffer */
	if (output_buffer->count + size > output_buffer->size)
	{
		net_buffer_wait(output_buffer, retry_event, retry_stack);
		return 0;
	}

	/* All conditions satisfied, can send */
	return 1;
}
Exemplo n.º 13
0
void dir_entry_unlock(struct dir_t *dir, int x, int y)
{
	struct dir_lock_t *dir_lock;
	struct mod_stack_t *stack;
	FILE *f;

	/* Get lock */
	assert(x < dir->xsize && y < dir->ysize);
	dir_lock = &dir->dir_lock[x * dir->ysize + y];

	/* Wake up first waiter */
	if (dir_lock->lock_queue)
	{
		/* Debug */
		f = debug_file(mem_debug_category);
		if (f)
		{
			mem_debug("    A-%lld resumed", dir_lock->lock_queue->id);
			if (dir_lock->lock_queue->dir_lock_next)
			{
				mem_debug(" - {");
				for (stack = dir_lock->lock_queue->dir_lock_next; stack;
						stack = stack->dir_lock_next)
					mem_debug(" A-%lld", stack->id);
				mem_debug(" } still waiting");
			}
			mem_debug("\n");
		}

		/* Wake up access */
		esim_schedule_event(dir_lock->lock_queue->dir_lock_event, dir_lock->lock_queue, 1);
		dir_lock->lock_queue = dir_lock->lock_queue->dir_lock_next;
	}

	/* Trace */
	mem_trace("mem.end_access_block cache=\"%s\" access=\"A-%lld\" set=%d way=%d\n",
		dir->name, dir_lock->stack_id, x, y);

	/* Unlock entry */
	dir_lock->lock = 0;
}
Exemplo n.º 14
0
static void dram_config_request_create(struct dram_system_t *system, struct config_t *config,
		char *section)
{
	char *request_line;
	char request_var[MAX_STRING_SIZE];

	int request_var_id;

	/* Read Requests */
	request_var_id = 0;

	/* Register events for request handler*/
	EV_DRAM_REQUEST = esim_register_event_with_name(dram_request_handler,
			dram_domain_index, "dram_request");

	while (1)
	{
		/* Get request */
		snprintf(request_var, sizeof request_var, "Request[%d]", request_var_id);
		request_line = config_read_string(config, section, request_var, NULL);
		if (!request_line)
			break;

		/* Schedule event to process request */
		struct request_stack_t *stack;
		stack = dram_request_stack_create();

		request_line = xstrdup(request_line);
		stack->request_line = request_line;
		stack->system = system;

		esim_schedule_event(EV_DRAM_REQUEST, stack, 0);

		/* Next command */
		request_var_id++;
	}
}
Exemplo n.º 15
0
void mod_handler_local_mem_load(int event, void *data)
{
	struct mod_stack_t *stack = data;
	struct mod_stack_t *new_stack;

	struct mod_t *mod = stack->mod;


	if (event == EV_MOD_LOCAL_MEM_LOAD)
	{
		struct mod_stack_t *master_stack;

		mem_debug("  %lld %lld 0x%x %s load\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.new_access name=\"A-%lld\" type=\"load\" "
			"state=\"%s:load\" addr=0x%x\n",
			stack->id, mod->name, stack->addr);

		/* Record access */
		mod_access_start(mod, stack, mod_access_load);

		/* Coalesce access */
		master_stack = mod_can_coalesce(mod, mod_access_load, stack->addr, stack);
		if (master_stack)
		{
			mod->reads++;
			mod_coalesce(mod, master_stack, stack);
			mod_stack_wait_in_stack(stack, master_stack, EV_MOD_LOCAL_MEM_LOAD_FINISH);
			return;
		}

		esim_schedule_event(EV_MOD_LOCAL_MEM_LOAD_LOCK, stack, 0);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_LOAD_LOCK)
	{
		struct mod_stack_t *older_stack;

		mem_debug("  %lld %lld 0x%x %s load lock\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:load_lock\"\n",
			stack->id, mod->name);

		/* If there is any older write, wait for it */
		older_stack = mod_in_flight_write(mod, stack);
		if (older_stack)
		{
			mem_debug("    %lld wait for write %lld\n",
				stack->id, older_stack->id);
			mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_LOAD_LOCK);
			return;
		}

		/* If there is any older access to the same address that this access could not
		 * be coalesced with, wait for it. */
		older_stack = mod_in_flight_address(mod, stack->addr, stack);
		if (older_stack)
		{
			mem_debug("    %lld wait for access %lld\n",
				stack->id, older_stack->id);
			mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_LOAD_LOCK);
			return;
		}

		/* Call find and lock to lock the port */
		new_stack = mod_stack_create(stack->id, mod, stack->addr,
			EV_MOD_LOCAL_MEM_LOAD_FINISH, stack);
		new_stack->read = 1;
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK, new_stack, 0);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_LOAD_FINISH)
	{
		mem_debug("%lld %lld 0x%x %s load finish\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:load_finish\"\n",
			stack->id, mod->name);
		mem_trace("mem.end_access name=\"A-%lld\"\n", stack->id);

		/* Increment witness variable */
                if (stack->witness_ptr) {
                        (*stack->witness_ptr)++;
		}

		/* Return event queue element into event queue */
		if (stack->event_queue && stack->event_queue_item)
			linked_list_add(stack->event_queue, stack->event_queue_item);

		/* Finish access */
		mod_access_finish(mod, stack);

		/* Return */
		mod_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 16
0
void net_command_handler(int event, void *data)
{

	struct net_command_stack_t *stack= data;
	struct net_t *net = stack->net;

	char out_msg[MAX_STRING_SIZE];
	char msg_detail[MAX_STRING_SIZE];

	char *msg_str = out_msg;
	int msg_size = sizeof out_msg;

	char *msg_detail_str = msg_detail;
	int msg_detail_size = sizeof msg_detail;

	int test_failed;

	test_failed = 0;
	*msg_str = '\0';
	*msg_detail_str = '\0';

	long long cycle = esim_domain_cycle(net_domain_index);

	if (event == EV_NET_COMMAND)
	{
		char *command_line = stack->command;
		char command[MAX_STRING_SIZE];

		struct list_t *token_list;

		/* Split command in tokens, skip command */
		token_list = str_token_list_create(command_line, " ");
		assert(list_count(token_list));

		long long command_cycle = net_command_get_llint(token_list,
				command_line,"cycle value");

		if (command_cycle > cycle)
		{
			str_token_list_free(token_list);
			esim_schedule_event(event, stack, command_cycle - cycle);
			return;
		}

		net_command_get_string(token_list, command_line, command,
				sizeof command );

		if (!strcasecmp(command, "Send"))
		{
			int msg_size;
			long long int msg_id;

			struct net_node_t *src_node;
			struct net_node_t *dst_node;


			/* Getting the Source Node from the command */
			src_node = net_command_get_node(net,token_list,
					command_line, net_node_end);

			/* Getting the Source Node from the command */
			dst_node = net_command_get_node(net, token_list,
					command_line, net_node_end);

			/* Getting the Message Size; or Default values */
			msg_size = (int) net_command_get_def(token_list,
					command_line,"Message size value",
					(long long) net_msg_size);

			/* Getting the Message ID; or default value */
			msg_id = net_command_get_def(token_list, command_line,
					"Message id value",
					net->msg_id_counter + 1);

			if (msg_id != net->msg_id_counter + 1)
			{
				fatal("%s: The message id is out of order. "
					"\n\t\"You are out o' order. "
					"The whole Trial is out o' order \" \n"
					"\tBear with us. We are being "
					"cute here \n\t> %s",
					__FUNCTION__, command_line);
			}

			stack->msg = net_send_ev(net, src_node, dst_node, msg_size,
					EV_NET_COMMAND_RCV, stack);
			fprintf(stderr, "\n Message %lld sent at %lld \n\n", msg_id,
					cycle);

		}

		else if (!strcasecmp(command, "Receive"))
		{
			long long msg_id;
			struct net_node_t *dst_node;
			struct net_msg_t *msg;

			dst_node = net_command_get_node(net, token_list,
					command_line, net_node_end);

			msg_id = net_command_get_llint(token_list, command_line,
					"Message id value");

			/* Output */
			str_printf(&msg_str, &msg_size,
				"CHECK: Cycle %lld: Receive in node %s for message"
				" %lld ",cycle, dst_node->name, msg_id);

			msg = net_msg_table_get(net, msg_id);
			if (!msg)
			{
				test_failed = 1;
				str_printf(&msg_detail_str, &msg_detail_size,
					"\t Message is either invalid, "
					"not yet send or already received\n");
			}

			/* Checking : the node indicated in the receive command
			 * is the destination node for the message*/
			else
			{
				if (dst_node != msg->dst_node)
					warning("%s: The node %s in the receive "
						"command is \n\tnot intended node "
						"%s in the send command\n > %s"
						,__FUNCTION__, dst_node->name,
						msg->dst_node->name, command_line);

				if (msg->node != dst_node)
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"\t Message expected to be "
						"in node %s, but found in %s\n",
						dst_node->name,	msg->node->name);
				}

				else if (msg != list_get(msg->buffer->msg_list, 0))
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"\tMessage expected to be "
						"ready for receive in node %s \n\tbut "
						"not in the buffer head\n",
						dst_node->name);
				}
			}

			/* Output */
			fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ?
				"failed" : "passed");
			fprintf(stderr, "%s", msg_detail);
			net_command_stack_return(stack);

		}

		else if (!strcasecmp(command, "InBufferCheck"))
		{
			long long int msg_id;
			struct net_msg_t * msg;
			struct net_node_t * node;
			struct net_buffer_t *buffer;

			node = net_command_get_node(net, token_list,
					command_line, 0);

			msg_id = net_command_get_llint(token_list, command_line,
					"invalid message id");
			/* Output */
			str_printf(&msg_str, &msg_size,
				"CHECK: Cycle %lld: message %lld is in input buffer of "
				"node %s", cycle, msg_id, node->name);


			msg = net_msg_table_get(net, msg_id);
			if (!msg)
			{
				test_failed = 1;
				str_printf(&msg_detail_str, &msg_detail_size,
					"\t Message is either invalid, "
					"not yet send or already received\n");
			}
			else
			{
				buffer = msg->buffer;
				if (msg->node != node)
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"\tMessage expected to be in node %s"
						"but found in node %s \n",
						node->name, msg->node->name);
				}
				else if (buffer != list_get(node->input_buffer_list,
						buffer->index))
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"\t Message is not in any of input "
						"buffers of the node %s\n",
						node->name);
				}
			}
			/* Output */
			fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ?
				"failed" : "passed");
			fprintf(stderr, "%s", msg_detail);
			net_command_stack_return(stack);
		}

		else if (!strcasecmp(command, "OutBufferCheck"))
		{
			long long int msg_id;
			struct net_msg_t * msg;
			struct net_node_t * node;
			struct net_buffer_t *buffer;

			node = net_command_get_node(net, token_list,
					command_line, 0);

			msg_id = net_command_get_llint(token_list, command_line,
					"invalid message id");
			/* Output */
			str_printf(&msg_str, &msg_size,
				"CHECK: Cycle %lld: message %lld is in one of output buffers"
				" of node %s", cycle, msg_id, node->name);


			msg = net_msg_table_get(net, msg_id);
			if (!msg)
			{
				test_failed = 1;
				str_printf(&msg_detail_str, &msg_detail_size,
					"\t Message is either invalid, "
					"not yet send or already received\n");
			}
			else
			{
				buffer = msg->buffer;
				if (msg->node != node)
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"\tMessage expected to be in node %s"
						"but found in node %s\n",
						node->name, msg->node->name);
				}
				else if (buffer != list_get(node->output_buffer_list,
						buffer->index))
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"\t Message is not in any of output"
						"buffers of the node %s\n", node->name);
				}
			}
			/* Output */
			fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ?
				"failed" : "passed");
			fprintf(stderr, "%s", msg_detail);
			net_command_stack_return(stack);
		}

		else if (!strcasecmp(command, "NodeCheck"))
		{

			long long int msg_id;
			struct net_msg_t * msg;
			struct net_node_t * node;

			node = net_command_get_node(net, token_list,
					command_line, 0);

			msg_id = net_command_get_llint(token_list, command_line,
					"invalid message id");
			/* Output */
			str_printf(&msg_str, &msg_size,
				"CHECK: Cycle %lld: message %lld is in node %s", cycle,
				msg_id, node->name);


			msg = net_msg_table_get(net, msg_id);
			if (!msg)
			{
				test_failed = 1;
				str_printf(&msg_detail_str, &msg_detail_size,
					"\t Message is either invalid, "
					"not yet send or already received\n");
			}
			else
			{
				if (msg->node != node)
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"\tMessage is not in the node %s\n",
						node->name);
				}
			}
			/* Output */
			fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ?
				"failed" : "passed");
			fprintf(stderr, "%s", msg_detail);
			net_command_stack_return(stack);
		}

		else if (!strcasecmp(command, "ExactPosCheck"))
		{
			long long int msg_id;
			struct net_msg_t * msg;
			struct net_node_t * node;
			struct net_buffer_t *buffer;

			node = net_command_get_node(net, token_list,
				command_line, 0);

			buffer = net_command_get_buffer(node, token_list,
					command_line);

			msg_id = net_command_get_llint(token_list, command_line,
				"invalid message id");
			/* Output */
			str_printf(&msg_str, &msg_size,
				"CHECK: Cycle %lld: message %lld is in buffer %s"
				" of node %s", cycle, msg_id, buffer->name, node->name);

			/* Checks */
			msg = net_msg_table_get(net, msg_id);
			if (!msg)
			{
				test_failed = 1;
				str_printf(&msg_detail_str, &msg_detail_size,
					"\t Message is either invalid, "
					"not yet send or already received\n");
			}
			else
			{
				if (msg->buffer != buffer || msg->node != node)
				{
					test_failed = 1;
					str_printf(&msg_detail_str, &msg_detail_size,
						"Message expected to be in buffer %s"
						"(node %s) \n\tbut found in buffer %s"
						"(node %s)\n",buffer->name, node->name,
						msg->buffer->name, msg->node->name);
				}
			}
			/* Output */
			fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ?
				"failed" : "passed");
			fprintf(stderr, "%s", msg_detail);
			net_command_stack_return(stack);
		}
		else
			fatal("%s: %s: invalid command.\n\t> %s",
					__FUNCTION__, command, command_line);
		free(command_line);
		str_token_list_free(token_list);
	}

	else if (event == EV_NET_COMMAND_RCV)
	{
		struct net_msg_t *msg;
		struct net_node_t *dst_node;
		msg = stack->msg;
		dst_node = msg->dst_node;

		assert(dst_node == msg->node);

		fprintf(stderr, "\n Message %lld received at %lld \n\n", msg->id, cycle);
		net_receive(net, dst_node, msg);
		net_command_stack_return(stack);
	}
}
Exemplo n.º 17
0
void mod_handler_local_mem_store(int event, void *data)
{
	struct mod_stack_t *stack = data;
	struct mod_stack_t *new_stack;

	struct mod_t *mod = stack->mod;


	if (event == EV_MOD_LOCAL_MEM_STORE)
	{
		struct mod_stack_t *master_stack;

		mem_debug("%lld %lld 0x%x %s store\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.new_access name=\"A-%lld\" type=\"store\" "
			"state=\"%s:store\" addr=0x%x\n",
			stack->id, mod->name, stack->addr);

		/* Record access */
		mod_access_start(mod, stack, mod_access_store);

		/* Coalesce access */
		master_stack = mod_can_coalesce(mod, mod_access_store, stack->addr, stack);
		if (master_stack)
		{
			mod->writes++;
			mod_coalesce(mod, master_stack, stack);
			mod_stack_wait_in_stack(stack, master_stack, EV_MOD_LOCAL_MEM_STORE_FINISH);

			/* Increment witness variable */
			if (stack->witness_ptr)
				(*stack->witness_ptr)++;

			return;
		}

		/* Continue */
		esim_schedule_event(EV_MOD_LOCAL_MEM_STORE_LOCK, stack, 0);
		return;
	}


	if (event == EV_MOD_LOCAL_MEM_STORE_LOCK)
	{
		struct mod_stack_t *older_stack;

		mem_debug("  %lld %lld 0x%x %s store lock\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:store_lock\"\n",
			stack->id, mod->name);

		/* If there is any older access, wait for it */
		older_stack = stack->access_list_prev;
		if (older_stack)
		{
			mem_debug("    %lld wait for access %lld\n",
				stack->id, older_stack->id);
			mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_STORE_LOCK);
			return;
		}

		/* Call find and lock */
		new_stack = mod_stack_create(stack->id, mod, stack->addr,
			EV_MOD_LOCAL_MEM_STORE_FINISH, stack);
		new_stack->read = 0;
		new_stack->witness_ptr = stack->witness_ptr;
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK, new_stack, 0);

		/* Set witness variable to NULL so that retries from the same
		 * stack do not increment it multiple times */
		stack->witness_ptr = NULL;

		return;
	}

	if (event == EV_MOD_LOCAL_MEM_STORE_FINISH)
	{
		mem_debug("%lld %lld 0x%x %s store finish\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:store_finish\"\n",
			stack->id, mod->name);
		mem_trace("mem.end_access name=\"A-%lld\"\n", stack->id);

		/* Return event queue element into event queue */
		if (stack->event_queue && stack->event_queue_item)
			linked_list_add(stack->event_queue, stack->event_queue_item);

		/* Finish access */
		mod_access_finish(mod, stack);

		/* Return */
		mod_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 18
0
void mod_handler_local_mem_find_and_lock(int event, void *data)
{
	struct mod_stack_t *stack = data;
	struct mod_stack_t *ret = stack->ret_stack;

	struct mod_t *mod = stack->mod;


	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK)
	{
		mem_debug("  %lld %lld 0x%x %s find and lock\n",
			esim_cycle, stack->id, stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock\"\n",
			stack->id, mod->name);

		/* Get a port */
		mod_lock_port(mod, stack, EV_MOD_LOCAL_MEM_FIND_AND_LOCK_PORT);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_PORT)
	{
		mem_debug("  %lld %lld 0x%x %s find and lock port\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_port\"\n",
			stack->id, mod->name);

		/* Set parent stack flag expressing that port has already been locked.
		 * This flag is checked by new writes to find out if it is already too
		 * late to coalesce. */
		ret->port_locked = 1;

		/* Statistics */
		mod->accesses++;
		if (stack->read)
		{
			mod->reads++;
			mod->effective_reads++;
		}
		else
		{
			mod->writes++;
			mod->effective_writes++;

			/* Increment witness variable when port is locked */
			if (stack->witness_ptr)
			{
				(*stack->witness_ptr)++;
				stack->witness_ptr = NULL;
			}
		}

		/* Access latency */
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK_ACTION, stack, mod->latency);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_ACTION)
	{
		struct mod_port_t *port = stack->port;

		assert(port);
		mem_debug("  %lld %lld 0x%x %s find and lock action\n", esim_cycle, stack->id,
			stack->tag, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_action\"\n",
			stack->id, mod->name);

		/* Release port */
		mod_unlock_port(mod, port, stack);

		/* Continue */
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_FINISH)
	{
		mem_debug("  %lld %lld 0x%x %s find and lock finish (err=%d)\n", esim_cycle, stack->id,
			stack->tag, mod->name, stack->err);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_finish\"\n",
			stack->id, mod->name);

		mod_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 19
0
void moesi_handler_read_request(int event, void *data)
{
	struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack;
	struct ccache_t *ccache = stack->ccache, *target = stack->target;
	uint32_t dir_entry_tag, z;
	struct dir_t *dir;
	struct dir_entry_t *dir_entry;

	if (event == EV_MOESI_READ_REQUEST)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s read request\n", CYCLE, ID,
			stack->addr, ccache->name);

		/* Default return values*/
		ret->shared = 0;
		ret->err = 0;

		/* Send request to target */
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? ccache->loid : 0;
		dest = ccache->next == target ? 0 : target->loid;
		net_send_ev(net, src, dest, 8, EV_MOESI_READ_REQUEST_RECEIVE, stack);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_RECEIVE)
	{
		cache_debug("  %lld %lld 0x%x %s read request receive\n", CYCLE, ID,
			stack->addr, target->name);
		
		/* Find and lock */
		newstack = moesi_stack_create(stack->id, target, stack->addr,
			EV_MOESI_READ_REQUEST_ACTION, stack);
		newstack->blocking = target->next == ccache;
		newstack->read = 1;
		newstack->retry = 0;
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_ACTION)
	{
		cache_debug("  %lld %lld 0x%x %s read request action\n", CYCLE, ID,
			stack->tag, target->name);

		/* Check block locking error. If read request is down-up, there should not
		 * have been any error while locking. */
		if (stack->err) {
			assert(ccache->next == target);
			ret->err = 1;
			stack->response = 8;
			esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
			return;
		}
		esim_schedule_event(ccache->next == target ? EV_MOESI_READ_REQUEST_UPDOWN :
			EV_MOESI_READ_REQUEST_DOWNUP, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_UPDOWN)
	{
		struct ccache_t *owner;

		cache_debug("  %lld %lld 0x%x %s read request updown\n", CYCLE, ID,
			stack->tag, target->name);
		stack->pending = 1;
		
		if (stack->status) {
			
			/* Status = M/O/E/S
			 * Check: addr multiple of requester's bsize
			 * Check: no subblock requested by ccache is already owned by ccache */
			assert(stack->addr % ccache->bsize == 0);
			dir = ccache_get_dir(target, stack->tag);
			for (z = 0; z < dir->zsize; z++) {
				dir_entry_tag = stack->tag + z * cache_min_block_size;
				if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
					continue;
				dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
				assert(dir_entry->owner != ccache->loid);
			}

			/* Send read request to owners other than ccache for all subblocks. */
			for (z = 0; z < dir->zsize; z++) {
				dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
				dir_entry_tag = stack->tag + z * cache_min_block_size;
				if (!dir_entry->owner) /* no owner */
					continue;
				if (dir_entry->owner == ccache->loid) /* owner is ccache */
					continue;
				owner = net_get_node_data(target->hinet, dir_entry->owner);
				if (dir_entry_tag % owner->bsize) /* not the first owner subblock */
					continue;

				/* Send read request */
				stack->pending++;
				newstack = moesi_stack_create(stack->id, target, dir_entry_tag,
					EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack);
				newstack->target = owner;
				esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0);
			}
			esim_schedule_event(EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack, 0);

		} else {
			
			/* Status = I */
			assert(!dir_entry_group_shared_or_owned(target->dir,
				stack->set, stack->way));
			newstack = moesi_stack_create(stack->id, target, stack->tag,
				EV_MOESI_READ_REQUEST_UPDOWN_MISS, stack);
			newstack->target = target->next;
			esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0);
		}
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_UPDOWN_MISS)
	{
		cache_debug("  %lld %lld 0x%x %s read request updown miss\n", CYCLE, ID,
			stack->tag, target->name);
		
		/* Check error */
		if (stack->err) {
			dir_lock_unlock(stack->dir_lock);
			ret->err = 1;
			stack->response = 8;
			esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
			return;
		}

		/* Set block state to excl/shared depending on the return value 'shared'
		 * that comes from a read request into the next cache level.
		 * Also set the tag of the block. */
		cache_set_block(target->cache, stack->set, stack->way, stack->tag,
			stack->shared ? moesi_status_shared : moesi_status_exclusive);
		esim_schedule_event(EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_UPDOWN_FINISH)
	{
		int shared;

		/* Ignore while pending requests */
		assert(stack->pending > 0);
		stack->pending--;
		if (stack->pending)
			return;
		cache_debug("  %lld %lld 0x%x %s read request updown finish\n", CYCLE, ID,
			stack->tag, target->name);

		/* Set owner to 0 for all directory entries not owned by ccache. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			if (dir_entry->owner != ccache->loid)
				dir_entry->owner = 0;
		}

		/* For each subblock requested by ccache, set ccache as sharer, and
		 * check whether there is other cache sharing it. */
		shared = 0;
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
				continue;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			dir_entry_set_sharer(dir, dir_entry, ccache->loid);
			if (dir_entry->sharers > 1)
				shared = 1;
		}

		/* If no subblock requested by ccache is shared by other cache, set ccache
		 * as owner of all of them. Otherwise, notify requester that the block is
		 * shared by setting the 'shared' return value to true. */
		ret->shared = shared;
		if (!shared) {
			for (z = 0; z < dir->zsize; z++) {
				dir_entry_tag = stack->tag + z * cache_min_block_size;
				if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
					continue;
				dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
				dir_entry->owner = ccache->loid;
			}
		}

		/* Respond with data, update LRU, unlock */
		stack->response = ccache->bsize + 8;
		if (target->cache)
			cache_access_block(target->cache, stack->set, stack->way);
		dir_lock_unlock(stack->dir_lock);
		esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_DOWNUP)
	{
		struct ccache_t *owner;

		cache_debug("  %lld %lld 0x%x %s read request downup\n", CYCLE, ID,
			stack->tag, target->name);

		/* Check: status must not be invalid.
		 * By default, only one pending request.
		 * Response depends on status */
		assert(stack->status != moesi_status_invalid);
		stack->pending = 1;
		stack->response = stack->status == moesi_status_exclusive ||
			stack->status == moesi_status_shared ?
			8 : target->bsize + 8;

		/* Send a read request to the owner of each subblock. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			if (!dir_entry->owner)  /* no owner */
				continue;
			owner = net_get_node_data(target->hinet, dir_entry->owner);
			if (dir_entry_tag % owner->bsize)  /* not the first subblock */
				continue;
			stack->pending++;
			stack->response = target->bsize + 8;
			newstack = moesi_stack_create(stack->id, target, dir_entry_tag,
				EV_MOESI_READ_REQUEST_DOWNUP_FINISH, stack);
			newstack->target = owner;
			esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0);
		}

		esim_schedule_event(EV_MOESI_READ_REQUEST_DOWNUP_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_DOWNUP_FINISH)
	{
		/* Ignore while pending requests */
		assert(stack->pending > 0);
		stack->pending--;
		if (stack->pending)
			return;
		cache_debug("  %lld %lld 0x%x %s read request downup finish\n", CYCLE, ID,
			stack->tag, target->name);

		/* Set owner of subblocks to 0. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			dir_entry->owner = 0;
		}

		/* Set status to S, update LRU, unlock */
		cache_set_block(target->cache, stack->set, stack->way, stack->tag,
			moesi_status_shared);
		cache_access_block(target->cache, stack->set, stack->way);
		dir_lock_unlock(stack->dir_lock);
		esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_REPLY)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s read request reply\n", CYCLE, ID,
			stack->tag, target->name);

		assert(stack->response);
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? 0 : target->loid;
		dest = ccache->next == target ? ccache->loid : 0;
		net_send_ev(net, src, dest, stack->response,
			EV_MOESI_READ_REQUEST_FINISH, stack);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s read request finish\n", CYCLE, ID,
			stack->tag, ccache->name);

		moesi_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 20
0
void dram_controller_schedule_command(struct dram_controller_t *controller)
{
	int i, j, k, num_info_per_scheduler, valid;
	struct dram_command_t *command;
	struct dram_bank_info_t *info;
	struct dram_command_scheduler_t *scheduler;

	num_info_per_scheduler = controller->dram_num_ranks * controller->dram_num_banks_per_device;

	for (i = 0; i < controller->num_physical_channels; i++)
	{
		/* Get scheduler */
		scheduler = list_get(controller->dram_command_scheduler_list, i);

		for (j = 0; j < num_info_per_scheduler; j++)
		{

			switch (controller->scheduling_policy)
			{

				case rank_bank_round_robin:

					/* Locate bank info */
					scheduler->last_scheduled_rank_id++;
					if (scheduler->last_scheduled_rank_id == controller->dram_num_ranks)
					{
						scheduler->last_scheduled_rank_id = 0;
						scheduler->last_scheduled_bank_id ++;
						if (scheduler->last_scheduled_bank_id == controller->dram_num_banks_per_device)
							scheduler->last_scheduled_bank_id = 0;
					}

					break;

				case bank_rank_round_robin:

					/* Locate bank info */
					scheduler->last_scheduled_bank_id++;
					if (scheduler->last_scheduled_bank_id == controller->dram_num_banks_per_device)
					{
						scheduler->last_scheduled_bank_id = 0;
						scheduler->last_scheduled_rank_id ++;
						if (scheduler->last_scheduled_rank_id == controller->dram_num_ranks)
							scheduler->last_scheduled_rank_id = 0;
					}

					break;

				default:

					break;
			}

			info = list_get(controller->dram_bank_info_list, scheduler->last_scheduled_bank_id + scheduler->last_scheduled_rank_id * controller->dram_num_banks_per_device + scheduler->channel_id * controller->dram_num_ranks * controller->dram_num_banks_per_device);

			/* Fetch a command from command queue */
			command = list_head(info->command_queue);
			if (command)
			{
				/* Check timing */
				valid = 1;
				for (k = 0; k < DRAM_TIMING_MATRIX_SIZE; k++)
				{
					if (esim_cycle - info->dram_bank_info_last_scheduled_time_matrix[k] < controller->dram_timing_matrix[command->type][k])
						valid = 0;
				}

				/* If timing is valid, schedule the command */
				if (valid)
				{
					/* Dequeue command */
					command = list_dequeue(info->command_queue);

					/* Schedule command receive */
					esim_schedule_event(EV_DRAM_COMMAND_RECEIVE, command, 0);

					/* Update last scheduled time matrix */
					info->dram_bank_info_last_scheduled_time_matrix[command->type] = esim_cycle;
				}
			}
		}
	}
}
Exemplo n.º 21
0
void moesi_handler_evict(int event, void *data)
{
	struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack;
	struct ccache_t *ccache = stack->ccache, *target = stack->target;
	struct dir_t *dir;
	struct dir_entry_t *dir_entry;
	uint32_t dir_entry_tag, z;

	if (event == EV_MOESI_EVICT)
	{
		/* Default ret value */
		ret->err = 0;

		/* Get block info */
		ccache_get_block(ccache, stack->set, stack->way, &stack->tag, &stack->status);
		assert(stack->status || !dir_entry_group_shared_or_owned(ccache->dir,
			stack->set, stack->way));
		cache_debug("  %lld %lld 0x%x %s evict (set=%d, way=%d, status=%d)\n", CYCLE, ID,
			stack->tag, ccache->name, stack->set, stack->way, stack->status);
	
		/* Save some data */
		stack->src_set = stack->set;
		stack->src_way = stack->way;
		stack->src_tag = stack->tag;
		stack->target = target = ccache->next;

		/* Send write request to all sharers */
		newstack = moesi_stack_create(stack->id, ccache, 0,
			EV_MOESI_EVICT_ACTION, stack);
		newstack->except = NULL;
		newstack->set = stack->set;
		newstack->way = stack->way;
		esim_schedule_event(EV_MOESI_INVALIDATE, newstack, 0);
		return;
	}

	if (event == EV_MOESI_EVICT_ACTION)
	{
		cache_debug("  %lld %lld 0x%x %s evict action\n", CYCLE, ID,
			stack->tag, ccache->name);
		
		/* status = I */
		if (stack->status == moesi_status_invalid) {
			esim_schedule_event(EV_MOESI_EVICT_FINISH, stack, 0);
			return;
		}

		/* status = M/O */
		if (stack->status == moesi_status_modified ||
			stack->status == moesi_status_owned) {
			net_send_ev(ccache->lonet, ccache->loid, 0,
				ccache->bsize + 8, EV_MOESI_EVICT_RECEIVE, stack);
			stack->writeback = 1;
			return;
		}

		/* status = S/E */
		net_send_ev(ccache->lonet, ccache->loid, 0, 8,
			EV_MOESI_EVICT_RECEIVE, stack);
		return;
	}

	if (event == EV_MOESI_EVICT_RECEIVE) {
		cache_debug("  %lld %lld 0x%x %s evict receive\n", CYCLE, ID,
			stack->tag, target->name);
		
		/* Find and lock */
		newstack = moesi_stack_create(stack->id, target, stack->src_tag,
			EV_MOESI_EVICT_WRITEBACK, stack);
		newstack->blocking = 0;
		newstack->read = 0;
		newstack->retry = 0;
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0);
		return;
	}

	if (event == EV_MOESI_EVICT_WRITEBACK)
	{
		cache_debug("  %lld %lld 0x%x %s evict writeback\n", CYCLE, ID,
			stack->tag, target->name);

		/* Error locking block */
		if (stack->err) {
			ret->err = 1;
			esim_schedule_event(EV_MOESI_EVICT_REPLY, stack, 0);
			return;
		}

		/* No writeback */
		if (!stack->writeback) {
			esim_schedule_event(EV_MOESI_EVICT_PROCESS, stack, 0);
			return;
		}

		/* Writeback */
		newstack = moesi_stack_create(stack->id, target, 0,
			EV_MOESI_EVICT_WRITEBACK_EXCLUSIVE, stack);
		newstack->except = ccache;
		newstack->set = stack->set;
		newstack->way = stack->way;
		esim_schedule_event(EV_MOESI_INVALIDATE, newstack, 0);
		return;
	}

	if (event == EV_MOESI_EVICT_WRITEBACK_EXCLUSIVE)
	{
		cache_debug("  %lld %lld 0x%x %s evict writeback exclusive\n", CYCLE, ID,
			stack->tag, target->name);

		/* Status = O/S/I */
		assert(stack->status != moesi_status_invalid);
		if (stack->status == moesi_status_owned || stack->status ==
			moesi_status_shared)
		{
			newstack = moesi_stack_create(stack->id, target, stack->tag,
				EV_MOESI_EVICT_WRITEBACK_FINISH, stack);
			newstack->target = target->next;
			esim_schedule_event(EV_MOESI_WRITE_REQUEST, newstack, 0);
			return;
		}

		/* Status = M/E */
		esim_schedule_event(EV_MOESI_EVICT_WRITEBACK_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_EVICT_WRITEBACK_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s evict writeback finish\n", CYCLE, ID,
			stack->tag, target->name);

		/* Error in write request */
		if (stack->err) {
			ret->err = 1;
			dir_lock_unlock(stack->dir_lock);
			esim_schedule_event(EV_MOESI_EVICT_REPLY, stack, 0);
			return;
		}

		/* Set tag, status and lru */
		if (target->cache) {
			cache_set_block(target->cache, stack->set, stack->way, stack->tag,
				moesi_status_modified);
			cache_access_block(target->cache, stack->set, stack->way);
		}
		esim_schedule_event(EV_MOESI_EVICT_PROCESS, stack, 0);
		return;
	}

	if (event == EV_MOESI_EVICT_PROCESS)
	{

		cache_debug("  %lld %lld 0x%x %s evict process\n", CYCLE, ID,
			stack->tag, target->name);

		/* Remove sharer, owner, and unlock */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			if (dir_entry_tag < stack->src_tag || dir_entry_tag >= stack->src_tag + ccache->bsize)
				continue;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			dir_entry_clear_sharer(dir, dir_entry, ccache->loid);
			if (dir_entry->owner == ccache->loid)
				dir_entry->owner = 0;
		}
		dir_lock_unlock(stack->dir_lock);

		esim_schedule_event(EV_MOESI_EVICT_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_EVICT_REPLY)
	{
		cache_debug("  %lld %lld 0x%x %s evict reply\n", CYCLE, ID,
			stack->tag, target->name);
		
		net_send_ev(target->hinet, 0, ccache->loid, 8,
			EV_MOESI_EVICT_REPLY_RECEIVE, stack);
		return;

	}

	if (event == EV_MOESI_EVICT_REPLY_RECEIVE)
	{
		cache_debug("  %lld %lld 0x%x %s evict reply receive\n", CYCLE, ID,
			stack->tag, ccache->name);

		/* Invalidate block if there was no error. */
		if (!stack->err)
			cache_set_block(ccache->cache, stack->src_set, stack->src_way,
				0, moesi_status_invalid);
		assert(!dir_entry_group_shared_or_owned(ccache->dir,
			stack->src_set, stack->src_way));
		esim_schedule_event(EV_MOESI_EVICT_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_EVICT_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s evict finish\n", CYCLE, ID,
			stack->tag, ccache->name);
		
		moesi_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 22
0
void net_event_handler(int event, void *data)
{
	struct net_stack_t *stack = data;
	struct net_t *net = stack->net;
	struct net_routing_table_t *routing_table = net->routing_table;
	struct net_msg_t *msg = stack->msg;

	struct net_node_t *src_node = msg->src_node;
	struct net_node_t *dst_node = msg->dst_node;

	struct net_node_t *node = msg->node;
	struct net_buffer_t *buffer = msg->buffer;

	if (event == EV_NET_SEND)
	{
		struct net_routing_table_entry_t *entry;
		struct net_buffer_t *output_buffer;

		/* Debug */
		net_debug("msg "
			"a=\"send\" "
			"net=\"%s\" "
			"msg=%lld "
			"size=%d "
			"src=\"%s\" "
			"dst=\"%s\"\n",
			net->name,
			msg->id,
			msg->size,
			src_node->name,
			dst_node->name);

		/* Get output buffer */
		entry = net_routing_table_lookup(routing_table, src_node, dst_node);
		output_buffer = entry->output_buffer;
		if (!output_buffer)
			fatal("%s: no route from %s to %s.\n%s", net->name, src_node->name,
				dst_node->name, net_err_no_route);
		if (output_buffer->write_busy >= esim_cycle)
			panic("%s: output buffer busy.\n%s", __FUNCTION__, net_err_can_send);
		if (msg->size > output_buffer->size)
			panic("%s: message does not fit in buffer.\n%s", __FUNCTION__, net_err_can_send);
		if (output_buffer->count + msg->size > output_buffer->size)
			panic("%s: output buffer full.\n%s", __FUNCTION__, net_err_can_send);

		/* Insert in output buffer (1 cycle latency) */
		net_buffer_insert(output_buffer, msg);
		output_buffer->write_busy = esim_cycle;
		msg->node = src_node;
		msg->buffer = output_buffer;
		msg->busy = esim_cycle;

		/* Schedule next event */
		esim_schedule_event(EV_NET_OUTPUT_BUFFER, stack, 1);
	}

	else if (event == EV_NET_OUTPUT_BUFFER)
	{
		struct net_link_t *link;
		struct net_buffer_t *input_buffer;
		int lat;

		/* Debug */
		net_debug("msg "
			"a=\"obuf\" "
			"net=\"%s\" "
			"msg=%lld "
			"node=\"%s\" "
			"buf=\"%s\"\n",
			net->name,
			msg->id,
			node->name,
			buffer->name);

		/* If message is not at buffer head, process later */
		assert(list_count(buffer->msg_list));
		if (list_get(buffer->msg_list, 0) != msg)
		{
			net_buffer_wait(buffer, event, stack);
			return;
		}

		/* If source output buffer is busy, wait */
		if (buffer->read_busy >= esim_cycle)
		{
			esim_schedule_event(event, stack, buffer->read_busy - esim_cycle + 1);
			return;
		}
		
		/* If link is busy, wait */
		link = buffer->link;
		if (link->busy >= esim_cycle)
		{
			esim_schedule_event(event, stack, link->busy - esim_cycle + 1);
			return;
		}

		/* If buffer contain the message but doesn't have the shared link in control, wait*/
		if (link->virtual_channel > 1)
		{
			struct net_buffer_t *temp_buffer;
			temp_buffer = net_link_arbitrator_vc(link, node);
			if (temp_buffer != buffer)
			{
				net_debug("msg "
					"a=\"arbitrator stall\" "
					"net=\"%s\" "
					"msg=%lld "
					"why=\"sched\"\n",
					net->name,
					msg->id);
				esim_schedule_event(event, stack, 1);
				return;
			}
		}

		/* If destination input buffer is busy, wait */
		assert(buffer == link->src_buffer);
		input_buffer = link->dst_buffer;
		if (input_buffer->write_busy >= esim_cycle)
		{
			esim_schedule_event(event, stack, input_buffer->write_busy - esim_cycle + 1);
			return;
		}

		/* If destination input buffer is full, wait */
		if (msg->size > input_buffer->size)
			fatal("%s: message does not fit in buffer.\n%s",
				net->name, net_err_large_message);
		if (input_buffer->count + msg->size > input_buffer->size)
		{
			net_buffer_wait(input_buffer, event, stack);
			return;
		}

		/* Calculate latency and occupy resources */
		lat = (msg->size - 1) / link->bandwidth + 1;
		assert(lat > 0);
		buffer->read_busy = esim_cycle + lat - 1;
		link->busy = esim_cycle + lat - 1;
		input_buffer->write_busy = esim_cycle + lat - 1;

		/* Transfer message to next input buffer */
		assert(msg->busy < esim_cycle);
		net_buffer_extract(buffer, msg);
		net_buffer_insert(input_buffer, msg);
		msg->node = input_buffer->node;
		msg->buffer = input_buffer;
		msg->busy = esim_cycle + lat - 1;

		/* Stats */
		link->busy_cycles += lat;
		link->transferred_bytes += msg->size;
		link->transferred_msgs++;
		node->bytes_sent += msg->size;
		node->msgs_sent++;
		input_buffer->node->bytes_received += msg->size;
		input_buffer->node->msgs_received++;

		/* Schedule next event */
		esim_schedule_event(EV_NET_INPUT_BUFFER, stack, lat);
	}

	else if (event == EV_NET_INPUT_BUFFER)
	{
		struct net_routing_table_entry_t *entry;
		struct net_buffer_t *output_buffer;
		int lat;

		/* Debug */
		net_debug("msg "
			"a=\"ibuf\" "
			"net=\"%s\" "
			"msg=%lld "
			"node=\"%s\" "
			"buf=\"%s\"\n",
			net->name,
			msg->id,
			node->name,
			buffer->name);

		/* If message is not at buffer head, process later */
		assert(list_count(buffer->msg_list));
		if (list_get(buffer->msg_list, 0) != msg)
		{
			net_debug("msg "
				"a=\"stall\" "
				"net=\"%s\" "
				"msg=%lld "
				"why=\"not-head\"\n",
				net->name,
				msg->id);
			net_buffer_wait(buffer, event, stack);
			return;
		}
		
		/* If this is the destination node, finish */
		if (node == msg->dst_node)
		{
			esim_schedule_event(EV_NET_RECEIVE, stack, 0);
			return;
		}
		
		/* If source input buffer is busy, wait */
		if (buffer->read_busy >= esim_cycle)
		{
			net_debug("msg "
				"a=\"stall\" "
				"net=\"%s\" "
				"msg=%lld "
				"why=\"src-busy\"\n",
				net->name,
				msg->id);
			esim_schedule_event(event, stack, buffer->read_busy - esim_cycle + 1);
			return;
		}
		
		/* Get output buffer */
		entry = net_routing_table_lookup(routing_table, node, dst_node);
		output_buffer = entry->output_buffer;
		if (!output_buffer)
			fatal("%s: no route from %s to %s.\n%s", net->name,
				node->name, dst_node->name, net_err_no_route);
		
		/* If destination output buffer is busy, wait */
		if (output_buffer->write_busy >= esim_cycle)
		{
			net_debug("msg "
				"a=\"stall\" "
				"net=\"%s\" "
				"msg=%lld "
				"why=\"dst-busy\"\n",
				net->name,
				msg->id);
			esim_schedule_event(event, stack, output_buffer->write_busy - esim_cycle + 1);
			return;
		}

		/* If destination output buffer is full, wait */
		if (msg->size > output_buffer->size)
			fatal("%s: message does not fit in buffer.\n%s",
				net->name, net_err_large_message);
		if (output_buffer->count + msg->size > output_buffer->size)
		{
			net_debug("msg "
				"a=\"stall\" "
				"net=\"%s\" "
				"msg=%lld "
				"why=\"dst-full\"\n",
				net->name,
				msg->id);
			net_buffer_wait(output_buffer, event, stack);
			return;
		}

		/* If scheduler says that it is not our turn, try later */
		if (net_node_schedule(node, output_buffer) != buffer)
		{
			net_debug("msg "
				"a=\"stall\" "
				"net=\"%s\" "
				"msg=%lld "
				"why=\"sched\"\n",
				net->name,
				msg->id);
			esim_schedule_event(event, stack, 1);
			return;
		}

		/* Calculate latency and occupy resources */
		assert(node->kind != net_node_end);
		assert(node->bandwidth > 0);
		lat = (msg->size - 1) / node->bandwidth + 1;
		assert(lat > 0);
		buffer->read_busy = esim_cycle + lat - 1;
		output_buffer->write_busy = esim_cycle + lat - 1;

		/* Transfer message to next output buffer */
		assert(msg->busy < esim_cycle);
		net_buffer_extract(buffer, msg);
		net_buffer_insert(output_buffer, msg);
		msg->buffer = output_buffer;
		msg->busy = esim_cycle + lat - 1;

		/* Schedule next event */
		esim_schedule_event(EV_NET_OUTPUT_BUFFER, stack, lat);
	}

	else if (event == EV_NET_RECEIVE)
	{
		/* Debug */
		net_debug("msg "
			"a=\"receive\" "
			"net=\"%s\" "
			"msg=%lld "
			"node=\"%s\"\n",
			net->name,
			msg->id,
			dst_node->name);

		/* Stats */
		net->transfers++;
		net->lat_acc += esim_cycle - msg->send_cycle;
		net->msg_size_acc += msg->size;

		/* If not return event was specified, free message here */
		if (stack->ret_event == ESIM_EV_NONE)
			net_receive(net, node, msg);

		/* Finish */
		net_stack_return(stack);
	}

	else
	{
		panic("%s: unknown event", __FUNCTION__);
	}
}
Exemplo n.º 23
0
void moesi_handler_store(int event, void *data)
{
	struct moesi_stack_t *stack = data, *newstack;
	struct ccache_t *ccache = stack->ccache;

	if (event == EV_MOESI_STORE)
	{
		cache_debug("%lld %lld 0x%x %s store\n", CYCLE, ID,
			stack->addr, ccache->name);

		/* Call find and lock */
		newstack = moesi_stack_create(stack->id, ccache, stack->addr,
			EV_MOESI_STORE_ACTION, stack);
		newstack->blocking = 0;
		newstack->read = 0;
		newstack->retry = stack->retry;
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0);
		return;
	}

	if (event == EV_MOESI_STORE_ACTION)
	{
		int retry_lat;
		cache_debug("  %lld %lld 0x%x %s store action\n", CYCLE, ID,
			stack->tag, ccache->name);

		/* Error locking */
		if (stack->err) {
			ccache->write_retries++;
			retry_lat = RETRY_LATENCY;
			cache_debug("    lock error, retrying in %d cycles\n", retry_lat);
			stack->retry = 1;
			esim_schedule_event(EV_MOESI_STORE, stack, retry_lat);
			return;
		}

		/* Hit - status=M/E */
		if (stack->status == moesi_status_modified ||
			stack->status == moesi_status_exclusive)
		{
			esim_schedule_event(EV_MOESI_STORE_FINISH, stack, 0);
			return;
		}

		/* Miss - status=O/S/I */
		newstack = moesi_stack_create(stack->id, ccache, stack->tag,
			EV_MOESI_STORE_FINISH, stack);
		newstack->target = ccache->next;
		esim_schedule_event(EV_MOESI_WRITE_REQUEST, newstack, 0);
		return;
	}

	if (event == EV_MOESI_STORE_FINISH)
	{
		int retry_lat;
		cache_debug("%lld %lld 0x%x %s store finish\n", CYCLE, ID,
			stack->tag, ccache->name);

		/* Error in write request, unlock block and retry store. */
		if (stack->err) {
			ccache->write_retries++;
			retry_lat = RETRY_LATENCY;
			dir_lock_unlock(stack->dir_lock);
			cache_debug("    lock error, retrying in %d cycles\n", retry_lat);
			stack->retry = 1;
			esim_schedule_event(EV_MOESI_STORE, stack, retry_lat);
			return;
		}

		/* Update LRU, tag/status, unlock, and return. */
		if (ccache->cache) {
			cache_access_block(ccache->cache, stack->set, stack->way);
			cache_set_block(ccache->cache, stack->set, stack->way,
				stack->tag, moesi_status_modified);
		}
		dir_lock_unlock(stack->dir_lock);
		moesi_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 24
0
void moesi_handler_load(int event, void *data)
{
	struct moesi_stack_t *stack = data, *newstack;
	struct ccache_t *ccache = stack->ccache;

	if (event == EV_MOESI_LOAD)
	{
		cache_debug("%lld %lld 0x%x %s load\n", CYCLE, ID,
			stack->addr, ccache->name);

		/* Call find and lock */
		newstack = moesi_stack_create(stack->id, ccache, stack->addr,
			EV_MOESI_LOAD_ACTION, stack);
		newstack->blocking = 0;
		newstack->read = 1;
		newstack->retry = stack->retry;
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0);
		return;
	}

	if (event == EV_MOESI_LOAD_ACTION)
	{
		int retry_lat;
		cache_debug("  %lld %lld 0x%x %s load action\n", CYCLE, ID,
			stack->tag, ccache->name);

		/* Error locking */
		if (stack->err) {
			ccache->read_retries++;
			retry_lat = RETRY_LATENCY;
			cache_debug("    lock error, retrying in %d cycles\n", retry_lat);
			stack->retry = 1;
			esim_schedule_event(EV_MOESI_LOAD, stack, retry_lat);
			return;
		}

		/* Hit */
		if (stack->status) {
			esim_schedule_event(EV_MOESI_LOAD_FINISH, stack, 0);
			return;
		}

		/* Miss */
		newstack = moesi_stack_create(stack->id, ccache, stack->tag,
			EV_MOESI_LOAD_MISS, stack);
		newstack->target = ccache->next;
		esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0);
		return;
	}

	if (event == EV_MOESI_LOAD_MISS)
	{
		int retry_lat;
		cache_debug("  %lld %lld 0x%x %s load miss\n", CYCLE, ID,
			stack->tag, ccache->name);

		/* Error on read request. Unlock block and retry load. */
		if (stack->err) {
			ccache->read_retries++;
			retry_lat = RETRY_LATENCY;
			dir_lock_unlock(stack->dir_lock);
			cache_debug("    lock error, retrying in %d cycles\n", retry_lat);
			stack->retry = 1;
			esim_schedule_event(EV_MOESI_LOAD, stack, retry_lat);
			return;
		}

		/* Set block state to excl/shared depending on return var 'shared'.
		 * Also set the tag of the block. */
		cache_set_block(ccache->cache, stack->set, stack->way, stack->tag,
			stack->shared ? moesi_status_shared : moesi_status_exclusive);

		/* Continue */
		esim_schedule_event(EV_MOESI_LOAD_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_LOAD_FINISH)
	{
		cache_debug("%lld %lld 0x%x %s load finish\n", CYCLE, ID,
			stack->tag, ccache->name);

		/* Update LRU, unlock, and return. */
		if (ccache->cache)
			cache_access_block(ccache->cache, stack->set, stack->way);
		dir_lock_unlock(stack->dir_lock);
		moesi_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 25
0
void moesi_handler_find_and_lock(int event, void *data)
{
	struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack;
	struct ccache_t *ccache = stack->ccache;

	if (event == EV_MOESI_FIND_AND_LOCK)
	{
		int hit;
		cache_debug("  %lld %lld 0x%x %s find and lock (blocking=%d)\n", CYCLE, ID,
			stack->addr, ccache->name, stack->blocking);

		/* Default return values */
		ret->err = 0;
		ret->set = 0;
		ret->way = 0;
		ret->status = 0;
		ret->tag = 0;

		/* Look for block. */
		hit = ccache_find_block(ccache, stack->addr, &stack->set,
			&stack->way, &stack->tag, &stack->status);
		if (hit)
			cache_debug("    %lld 0x%x %s hit: set=%d, way=%d, status=%d\n", ID,
				stack->tag, ccache->name, stack->set, stack->way, stack->status);

		/* Stats */
		ccache->accesses++;
		if (hit)
			ccache->hits++;
		if (stack->read) {
			ccache->reads++;
			stack->blocking ? ccache->blocking_reads++ : ccache->non_blocking_reads++;
			if (hit)
				ccache->read_hits++;
		} else {
			ccache->writes++;
			stack->blocking ? ccache->blocking_writes++ : ccache->non_blocking_writes++;
			if (hit)
				ccache->write_hits++;
		}
		if (!stack->retry) {
			ccache->no_retry_accesses++;
			if (hit)
				ccache->no_retry_hits++;
			if (stack->read) {
				ccache->no_retry_reads++;
				if (hit)
					ccache->no_retry_read_hits++;
			} else {
				ccache->no_retry_writes++;
				if (hit)
					ccache->no_retry_write_hits++;
			}
		}

		/* Miss */
		if (!hit) {
			
			assert(!stack->blocking);
			assert(ccache != main_memory);

			/* Find victim */
			stack->way = cache_replace_block(ccache->cache, stack->set);
			cache_get_block(ccache->cache, stack->set, stack->way, NULL, &stack->status);
			assert(stack->status || !dir_entry_group_shared_or_owned(ccache->dir,
				stack->set, stack->way));
			cache_debug("    %lld 0x%x %s miss -> lru: set=%d, way=%d, status=%d\n",
				ID, stack->tag, ccache->name, stack->set, stack->way, stack->status);
		}

		/* Lock entry */
		stack->dir_lock = ccache_get_dir_lock(ccache, stack->set, stack->way);
		if (stack->dir_lock->lock && !stack->blocking) {
			cache_debug("    %lld 0x%x %s block already locked: set=%d, way=%d\n",
				ID, stack->tag, ccache->name, stack->set, stack->way);
			ret->err = 1;
			moesi_stack_return(stack);
			return;
		}
		if (!dir_lock_lock(stack->dir_lock, EV_MOESI_FIND_AND_LOCK, stack))
			return;

		/* Entry is locked. Record the transient tag so that a subsequent lookup
		 * detects that the block is being brought. */
		if (ccache->cache)
			cache_set_transient_tag(ccache->cache, stack->set, stack->way, stack->tag);

		/* On miss, evict if victim is a valid block. */
		if (!hit && stack->status) {
			stack->eviction = 1;
			newstack = moesi_stack_create(stack->id, ccache, 0,
				EV_MOESI_FIND_AND_LOCK_FINISH, stack);
			newstack->set = stack->set;
			newstack->way = stack->way;
			esim_schedule_event(EV_MOESI_EVICT, newstack, ccache->lat);
			return;
		}

		/* Access latency */
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK_FINISH, stack, ccache->lat);
		return;
	}

	if (event == EV_MOESI_FIND_AND_LOCK_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s find and lock finish (err=%d)\n", CYCLE, ID,
			stack->tag, ccache->name, stack->err);

		/* If evict produced err, return err */
		if (stack->err) {
			cache_get_block(ccache->cache, stack->set, stack->way, NULL, &stack->status);
			assert(stack->status);
			assert(stack->eviction);
			ret->err = 1;
			dir_lock_unlock(stack->dir_lock);
			moesi_stack_return(stack);
			return;
		}

		/* Eviction */
		if (stack->eviction) {
			ccache->evictions++;
			cache_get_block(ccache->cache, stack->set, stack->way, NULL, &stack->status);
			assert(!stack->status);
		}

		/* Return */
		ret->err = 0;
		ret->set = stack->set;
		ret->way = stack->way;
		ret->status = stack->status;
		ret->tag = stack->tag;
		ret->dir_lock = stack->dir_lock;
		moesi_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 26
0
void moesi_handler_invalidate(int event, void *data)
{
	struct moesi_stack_t *stack = data, *newstack;
	struct ccache_t *ccache = stack->ccache;
	struct dir_t *dir;
	struct dir_entry_t *dir_entry;
	uint32_t dir_entry_tag, z;

	if (event == EV_MOESI_INVALIDATE)
	{
		int node_count, i;
		struct ccache_t *sharer;

		/* Get block info */
		ccache_get_block(ccache, stack->set, stack->way, &stack->tag, &stack->status);
		cache_debug("  %lld %lld 0x%x %s invalidate (set=%d, way=%d, status=%d)\n", CYCLE, ID,
			stack->tag, ccache->name, stack->set, stack->way, stack->status);
		stack->pending = 1;

		/* Send write request to all upper level sharers but ccache */
		dir = ccache_get_dir(ccache, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			dir_entry = ccache_get_dir_entry(ccache, stack->set, stack->way, z);
			node_count = ccache->hinet ? ccache->hinet->end_node_count : 0;
			for (i = 1; i < node_count; i++) {
				
				/* Skip non-sharers and 'except' */
				if (!dir_entry_is_sharer(dir, dir_entry, i))
					continue;
				sharer = net_get_node_data(ccache->hinet, i);
				if (sharer == stack->except)
					continue;

				/* Clear sharer and owner */
				dir_entry_clear_sharer(dir, dir_entry, i);
				if (dir_entry->owner == i)
					dir_entry->owner = 0;

				/* Send write request upwards if beginning of block */
				if (dir_entry_tag % sharer->bsize)
					continue;
				newstack = moesi_stack_create(stack->id, ccache, dir_entry_tag,
					EV_MOESI_INVALIDATE_FINISH, stack);
				newstack->target = sharer;
				esim_schedule_event(EV_MOESI_WRITE_REQUEST, newstack, 0);
				stack->pending++;
			}
		}
		esim_schedule_event(EV_MOESI_INVALIDATE_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_INVALIDATE_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s invalidate finish\n", CYCLE, ID,
			stack->tag, ccache->name);

		/* Ignore while pending */
		assert(stack->pending > 0);
		stack->pending--;
		if (stack->pending)
			return;
		moesi_stack_return(stack);
		return;
	}

	abort();
}
Exemplo n.º 27
0
void net_event_handler(int event, void *data)
{
	struct net_stack_t *stack = data;

	struct net_t *net = stack->net;
	struct net_routing_table_t *routing_table = net->routing_table;
	struct net_packet_t *pkt= stack->packet;

	struct net_node_t *src_node = pkt->msg->src_node;
	struct net_node_t *dst_node = pkt->msg->dst_node;

	struct net_node_t *node = pkt->node;
	struct net_buffer_t *buffer = pkt->buffer;

	long long cycle;

	/* Get current cycle */
	cycle = esim_domain_cycle(net_domain_index);

	if ((net_snap_period != 0) &&
	                (net->last_recorded_cycle < (cycle/net_snap_period )))
	        net_bandwidth_snapshot(net, cycle);

	if (event == EV_NET_SEND)
	{
		struct net_routing_table_entry_t *entry;
		struct net_buffer_t *output_buffer;

		if (net->magicNet)
		{
			/* Magic Net work-around */
			src_node->bytes_sent += pkt->size;
			src_node->msgs_sent++;
			dst_node->bytes_received += pkt->size;
			dst_node->msgs_received++;
			pkt->node = dst_node;
			esim_schedule_event(EV_NET_RECEIVE, stack,
				net->fixed_delay);
		}

		/* Get output buffer */
		entry = net_routing_table_lookup(routing_table, src_node,
				dst_node);
		output_buffer = entry->output_buffer;
		if (!output_buffer)
			fatal("%s: no route from %s to %s.\n%s", net->name,
					src_node->name, dst_node->name,
					net_err_no_route);

		if (pkt->msg->size > output_buffer->size)
			panic("%s: message does not fit in buffer.\n%s",
					__FUNCTION__, net_err_can_send);

		if (output_buffer->count + pkt->size > output_buffer->size)
			panic("%s: output buffer full.\n%s", __FUNCTION__,
					net_err_can_send);

		/* Insert in output buffer (1 cycle latency) */
		net_buffer_insert(output_buffer, pkt);
		output_buffer->write_busy = cycle;
		pkt->node = src_node;
		pkt->buffer = output_buffer;
		pkt->busy = cycle;

		/* Schedule next event */
		esim_schedule_event(EV_NET_OUTPUT_BUFFER, stack, 1);

	}

	else if (event == EV_NET_OUTPUT_BUFFER)
	{
		struct net_buffer_t *input_buffer;
		int lat;

		/* Debug */
		net_debug("msg "
				"a=\"obuf\" "
				"net=\"%s\" "
				"msg-->pkt=%lld-->%d "
				"node=\"%s\" "
				"buf=\"%s\"\n",
				net->name,
				pkt->msg->id,
				pkt->session_id,
				node->name,
				buffer->name);

		/* If message is not at buffer head, process later */
		assert(list_count(buffer->msg_list));

		if (list_get(buffer->msg_list, 0) != pkt)
		{
			net_buffer_wait(buffer, event, stack);
			net_debug("msg "
					"a=\"stall\" "
					"net=\"%s\" "
					"msg-->packet=%lld:%d "
					"why=\"not output buffer head\"\n",
					net->name,
					pkt->msg->id,
					pkt->session_id);
			return;
		}

		if (buffer->read_busy >= cycle)
		{
			esim_schedule_event(event, stack,
					buffer->read_busy - cycle + 1);
			net_debug("msg "
					"a=\"stall\" "
					"net=\"%s\" "
					"msg-->pkt=%lld:%d "
					"why=\"output buffer busy\" \n",
					net->name,
					pkt->msg->id,
					pkt->session_id);
			return;
		}

		/* If link is busy, wait */
		if (buffer->kind == net_buffer_link)
		{
			struct net_link_t *link;

			assert(buffer->link);
			link = buffer->link;
			if (link->busy >= cycle)
			{
				esim_schedule_event(event, stack,
						link->busy - cycle + 1);
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"link busy\"\n",
						net->name,
						pkt->msg->id,
						pkt->session_id);

				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:link_busy\" "
						"stg=\"LB\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);
				return;
			}

			/* If buffer contain the message but doesn't have the 
			 * shared link in control, wait */
			if (link->virtual_channel > 1)
			{
				struct net_buffer_t *temp_buffer;

				temp_buffer = net_link_arbitrator_vc(link, node);
				if (temp_buffer != buffer)
				{
					net_debug("msg "
							"a=\"stall\" "
							"net=\"%s\" "
							"msg-->pkt=%lld:%d "
							"why=\"arbitrator sched\"\n",
							net->name,
							pkt->msg->id,
							pkt->session_id);
					esim_schedule_event(event, stack, 1);

					net_trace("net.packet "
							"net=\"%s\" "
							"name=\"P-%lld:%d\" "
							"state=\"%s:%s:VC_arbitration_fail\" "
							"stg=\"VCA\"\n",
							net->name, pkt->msg->id,
							pkt->session_id,
							node->name,
							buffer->name);
					return;
				}
			}

			/* If destination input buffer is busy, wait */
			assert(buffer == link->src_buffer);
			input_buffer = link->dst_buffer;
			if (input_buffer->write_busy >= cycle)
			{
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"input buffer busy\"\n",
						net->name, pkt->msg->id,
						pkt->session_id);
				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:Dest_buffer_busy\" "
						"stg=\"DBB\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				esim_schedule_event(event, stack,
						input_buffer->write_busy - cycle + 1);
				return;
			}

			/* If destination input buffer is full, wait */
			if (pkt->size > input_buffer->size)
				fatal("%s: packet does not fit in buffer.\n%s",
						net->name, net_err_large_message);
			if (input_buffer->count + pkt->size >
			input_buffer->size)
			{
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"input buffer full\"\n",
						net->name, pkt->msg->id, pkt->session_id);
				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:Dest_buffer_full\" "
						"stg=\"DBF\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				net_buffer_wait(input_buffer, event, stack);
				return;
			}

			/* Calculate latency and occupy resources */
			lat = (pkt->size - 1) / link->bandwidth + 1;
			assert(lat > 0);
			buffer->read_busy = cycle + lat - 1;
			link->busy = cycle + lat - 1;
			input_buffer->write_busy = cycle + lat - 1;

			/* Transfer message to next input buffer */
			assert(pkt->busy < cycle);
			net_buffer_extract(buffer, pkt);
			net_buffer_insert(input_buffer, pkt);
			pkt->node = input_buffer->node;
			pkt->buffer = input_buffer;
			pkt->busy = cycle + lat - 1;

			/* Stats */
			link->busy_cycles += lat;
			link->transferred_bytes += pkt->size;
			link->transferred_msgs++;

			net->topology_util_bw += pkt->size;

			node->bytes_sent += pkt->size;
			node->msgs_sent++;
			input_buffer->node->bytes_received += pkt->size;
			input_buffer->node->msgs_received++;
			net_trace("net.link_transfer net=\"%s\" link=\"%s\" "
					"transB=%lld last_size=%d busy=%lld\n",
					net->name, link->name,
					link->transferred_bytes,
					pkt->size, link->busy);
		}
		else if (buffer->kind == net_buffer_bus)
		{
			struct net_bus_t *bus, *updated_bus;
			struct net_node_t *bus_node;

			assert(!buffer->link);
			assert(buffer->bus);
			bus = buffer->bus;
			bus_node = bus->node;

			/* before initiating bus transfer we have to figure out what is the
			 * next input buffer since it is not clear from the
			 * output buffer */
			int input_buffer_detection = 0;
			struct net_routing_table_entry_t *entry;

			entry = net_routing_table_lookup(routing_table,
					pkt->node, pkt->msg->dst_node);

			for (int i = 0; i < list_count(bus_node->dst_buffer_list); i++)
			{
				input_buffer = list_get(bus_node->dst_buffer_list, i);
				if (entry->next_node == input_buffer->node)
				{
					input_buffer_detection = 1;
					break;
				}
			}
			if (input_buffer_detection == 0)
				fatal("%s: Something went wrong so there is no appropriate input"
						"buffer for the route between %s and %s \n", net->name,
						pkt->node->name,entry->next_node->name);

			/* 1. Check the destination buffer is busy or not */
			if (input_buffer->write_busy >= cycle)
			{
				esim_schedule_event(event, stack,
						input_buffer->write_busy - cycle + 1);
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"input busy\"\n",
						net->name,
						pkt->msg->id,
						pkt->session_id);

				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:Dest_buffer_busy\" "
						"stg=\"DBB\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				return;
			}

			/* 2. Check the destination buffer is full or not */
			if (pkt->size > input_buffer->size)
				fatal("%s: packet  does not fit in buffer.\n%s",
						net->name, net_err_large_message);

			if (input_buffer->count + pkt->size > input_buffer->size)
			{
				net_buffer_wait(input_buffer, event, stack);
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"input full\"\n",
						net->name, pkt->msg->id,
						pkt->session_id);

				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:Dest_buffer_full\" "
						"stg=\"DBF\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				return;
			}

			/* 3. Make sure if any bus is available; return one
			 * that is available the fastest */
			updated_bus = net_bus_arbitration(bus_node, buffer);
			if (updated_bus == NULL)
			{
				esim_schedule_event(event, stack, 1);
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"bus arbiter\"\n",
						net->name, pkt->msg->id,
						pkt->session_id);
				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:BUS_arbit_fail\" "
						"stg=\"BA\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				return;
			}

			/* 4. assign the bus to the buffer. update the
			 * necessary data ; before here, the bus is not
			 * assign to anything and is not updated so it can be 
			 * assign to other buffers as well. If this certain
			 * buffer wins that specific bus_lane the appropriate
			 * fields will be updated. Contains: bus_lane
			 * cin_buffer and cout_buffer and busy time as well as
			 * buffer data itself */
			assert(updated_bus);
			buffer->bus = updated_bus;
			input_buffer->bus = updated_bus;
			bus = buffer->bus;
			assert(bus);


			/* Calculate latency and occupy resources */
			/* Wire delay is introduced when the packet is on transit */
			lat = bus->fix_delay + ((pkt->size - 1) / bus->bandwidth + 1) ;
			assert(lat > 0);
			buffer->read_busy = cycle + lat - 1;
			bus->busy = cycle + lat - 1;
			input_buffer->write_busy = cycle + lat - 1 ;

			/* Transfer message to next input buffer */
			assert(pkt->busy < cycle);
			net_buffer_extract(buffer, pkt);
			net_buffer_insert(input_buffer, pkt);
			pkt->node = input_buffer->node;
			pkt->buffer = input_buffer;
			pkt->busy = cycle + lat - 1;

			/* Stats */
			bus->busy_cycles += lat;
			bus->transferred_bytes += pkt->size;
			bus->transferred_msgs++;

                        net->topology_util_bw += pkt->size;

			node->bytes_sent += pkt->size;
			node->msgs_sent++;
			input_buffer->node->bytes_received += pkt->size;
			input_buffer->node->msgs_received++;
			net_trace("net.bus_transfer net=\"%s\" node=\"%s\" "
					"lane_index=%d transB=%lld last_size=%d busy=%lld\n",
					net->name, bus->node->name, bus->index,
					bus->transferred_bytes, pkt->size, bus->busy);
		}

		else if (buffer->kind == net_buffer_photonic)
		{
			struct net_bus_t *bus, *updated_bus;
			struct net_node_t *bus_node;

			assert(!buffer->link);
			assert(buffer->bus);
			bus = buffer->bus;
			bus_node = bus->node;

			/* before 1 and 2 we have to figure out what is the
			 * next input buffer since it is not clear from the
			 * output buffer */
			int input_buffer_detection = 0;
			struct net_routing_table_entry_t *entry;

			entry = net_routing_table_lookup(routing_table,
					pkt->node, pkt->msg->dst_node);

			for (int i = 0; i < list_count(bus_node->dst_buffer_list); i++)
			{
				input_buffer = list_get(bus_node->dst_buffer_list, i);
				if (entry->next_node == input_buffer->node)
				{
					input_buffer_detection = 1;
					break;
				}
			}
			if (input_buffer_detection == 0)
				fatal("%s: Something went wrong so there is no appropriate input"
						"buffer for the route between %s and %s \n", net->name,
						pkt->node->name,entry->next_node->name);

			/* 1. Check the destination buffer is busy or not */
			if (input_buffer->write_busy > cycle)
			{
				esim_schedule_event(event, stack,
						input_buffer->write_busy - cycle + 1);
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"input busy\"\n",
						net->name,
						pkt->msg->id,
						pkt->session_id);

				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:Dest_buffer_busy\" "
						"stg=\"DBB\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				return;
			}

			/* 2. Check the destination buffer is full or not */
			if (pkt->size > input_buffer->size)
				fatal("%s: message does not fit in buffer.\n%s",
						net->name, net_err_large_message);

			if (input_buffer->count + pkt->size > input_buffer->size)
			{
				net_buffer_wait(input_buffer, event, stack);
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"input full\"\n",
						net->name, pkt->msg->id,
						pkt->session_id);

				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:Dest_buffer_full\" "
						"stg=\"DBF\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				return;
			}


			/* 3. Make sure if any bus is available; return one
			 * that is available the fastest */
			updated_bus = net_photo_link_arbitration(bus_node, buffer);
			if (updated_bus == NULL)
			{
				esim_schedule_event(event, stack, 1);
				net_debug("msg "
						"a=\"stall\" "
						"net=\"%s\" "
						"msg-->pkt=%lld:%d "
						"why=\"bus arbiter\"\n",
						net->name, pkt->msg->id,
						pkt->session_id);

				net_trace("net.packet "
						"net=\"%s\" "
						"name=\"P-%lld:%d\" "
						"state=\"%s:%s:photonic_arbitration\" "
						"stg=\"BA\"\n",
						net->name, pkt->msg->id,
						pkt->session_id,
						node->name,
						buffer->name);

				return;
			}

			/* 4. assign the bus to the buffer. update the
			 * necessary data ; before here, the bus is not
			 * assign to anything and is not updated so it can be
			 * assign to other buffers as well. If this certain
			 * buffer wins that specific bus_lane the appropriate
			 * fields will be updated. Contains: bus_lane
			 * cin_buffer and cout_buffer and busy time as well as
			 * buffer data itself */
			assert(updated_bus);
			buffer->bus = updated_bus;
			input_buffer->bus = updated_bus;
			bus = buffer->bus;
			assert(bus);

			/* Calculate latency and occupy resources */
			lat = (pkt->size - 1) / bus->bandwidth + 1;
			assert(lat > 0);
			buffer->read_busy = cycle + lat - 1;
			bus->busy = cycle + lat - 1;
			input_buffer->write_busy = cycle + lat - 1;

			/* Transfer message to next input buffer */
			assert(pkt->busy < cycle);
			net_buffer_extract(buffer, pkt);
			net_buffer_insert(input_buffer, pkt);
			pkt->node = input_buffer->node;
			pkt->buffer = input_buffer;
			pkt->busy = cycle + lat - 1;

			/* Stats */
			bus->busy_cycles += lat;
			bus->transferred_bytes += pkt->size;
			bus->transferred_msgs++;

                        net->topology_util_bw += pkt->size;

			node->bytes_sent += pkt->size;
			node->msgs_sent++;
			input_buffer->node->bytes_received += pkt->size;
			input_buffer->node->msgs_received++;
			net_trace("net.photonic_transfer net=\"%s\" node=\"%s\" "
					"lane_index=%d transB=%lld last_size=%d busy=%lld\n",
					net->name, bus->node->name, bus->index,
					bus->transferred_bytes,pkt->size, bus->busy);
			net_debug("msg "
					"a=\"success photonic transmission\" "
					"net=\"%s\" "
					"msg-->pkt=%lld:%d "
					"through = \" %d\"\n",
					net->name, pkt->msg->id,
					pkt->session_id, updated_bus->index);
		}

		/* Schedule next event */
		esim_schedule_event(EV_NET_INPUT_BUFFER, stack, lat);
	}

	else if (event == EV_NET_INPUT_BUFFER)
	{
		struct net_routing_table_entry_t *entry;
		struct net_buffer_t *output_buffer;

		int lat;

		/* Debug */
		net_debug("msg "
				"a=\"ibuf\" "
				"net=\"%s\" "
				"msg-->pkt=%lld:%d "
				"node=\"%s\" "
				"buf=\"%s\"\n",
				net->name,
				pkt->msg->id,
				pkt->session_id,
				node->name,
				buffer->name);

		/* If message is not at buffer head, process later */
		assert(list_count(buffer->msg_list));
		if (list_get(buffer->msg_list, 0) != pkt)
		{
			net_debug("msg "
					"a=\"stall\" "
					"net=\"%s\" "
					"msg-->pkt=%lld:%d"
					"why=\"not-head\"\n",
					net->name, pkt->msg->id,
					pkt->session_id);
			net_buffer_wait(buffer, event, stack);
			return;
		}

		/* If this is the destination node, finish */
		if (node == pkt->msg->dst_node)
		{
			esim_schedule_event(EV_NET_RECEIVE, stack, 0);
			return;
		}

		/* If source input buffer is busy, wait */
		if (buffer->read_busy >= cycle)
		{
			net_debug("pkt"
					"a=\"stall\" "
					"net=\"%s\" "
					"msg-->pkt=%lld:%d "
					"why=\"src-busy\"\n",
					net->name,
					pkt->msg->id,
					pkt->session_id);

			esim_schedule_event(event, stack,
					buffer->read_busy - cycle + 1);
			return;
		}

		/* Get output buffer */
		entry = net_routing_table_lookup(routing_table, node,
				dst_node);
		output_buffer = entry->output_buffer;
		if (!output_buffer)
			fatal("%s: no route from %s to %s.\n%s", net->name,
					node->name, dst_node->name, net_err_no_route);

		/* If destination output buffer is busy, wait */
		if (output_buffer->write_busy >= cycle)
		{
			net_debug("pkt "
					"a=\"stall\" "
					"net=\"%s\" "
					"msg-->pkt=%lld:%d "
					"why=\"dst-busy\"\n",
					net->name,
					pkt->msg->id,
					pkt->session_id);
			net_trace("net.packet "
					"net=\"%s\" "
					"name=\"P-%lld:%d\" "
					"state=\"%s:%s:Dest_buffer_busy\" "
					"stg=\"DBB\"\n",
					net->name, pkt->msg->id,
					pkt->session_id,
					node->name,
					buffer->name);

			esim_schedule_event(event, stack,
					output_buffer->write_busy - cycle + 1);
			return;
		}

		/* If destination output buffer is full, wait */
		if (pkt->size > output_buffer->size)
			fatal("%s: packet does not fit in buffer.\n%s",
					net->name, net_err_large_message);


		if (output_buffer->count + pkt->size > output_buffer->size)
		{
			net_debug("pkt "
					"a=\"stall\" "
					"net=\"%s\" "
					"msg-->pkt=%lld:%d "
					"why=\"dst-full\"\n",
					net->name,
					pkt->msg->id,
					pkt->session_id);

			net_trace("net.packet "
					"net=\"%s\" "
					"name=\"P-%lld:%d\" "
					"state=\"%s:%s:Dest_buffer_full\" "
					"stg=\"DBF\"\n",
					net->name, pkt->msg->id,
					pkt->session_id,
					node->name,
					buffer->name);


			net_buffer_wait(output_buffer, event, stack);
			return;
		}

		/* If scheduler says that it is not our turn, try later */
		if (net_node_schedule(node, output_buffer) != buffer)
		{
			net_debug("pkt "
					"a=\"stall\" "
					"net=\"%s\" "
					"msg-->pkt=%lld:%d "
					"why=\"sched\"\n",
					net->name,
					pkt->msg->id,
					pkt->session_id);

			net_trace("net.packet "
					"net=\"%s\" "
					"name=\"P-%lld:%d\" "
					"state=\"%s:%s:switch_arbit_fail\" "
					"stg=\"SA\"\n",
					net->name, pkt->msg->id,
					pkt->session_id,
					node->name,
					buffer->name);

			esim_schedule_event(event, stack, 1);
			return;
		}

		/* Calculate latency and occupy resources */
		assert(node->kind != net_node_end);
		assert(node->bandwidth > 0);
		lat = (pkt->size - 1) / node->bandwidth + 1;
		assert(lat > 0);
		buffer->read_busy = cycle + lat - 1;
		output_buffer->write_busy = cycle + lat - 1;

		/* Transfer message to next output buffer */
		assert(pkt->busy < cycle);
		net_buffer_extract(buffer, pkt);
		net_buffer_insert(output_buffer, pkt);
		pkt->buffer = output_buffer;
		pkt->busy = cycle + lat - 1;

		/* Schedule next event */
		esim_schedule_event(EV_NET_OUTPUT_BUFFER, stack, lat);
	}

	else if (event == EV_NET_RECEIVE)
	{
		assert (pkt);
		struct net_msg_t *msg = pkt->msg;
		/* Debug */
		net_debug("pkt "
				"a=\"receive\" "
				"net=\"%s\" "
				"msg-->pkt=%lld:%d "
				"node=\"%s\"\n",
				net->name,
				pkt->msg->id,
				pkt->session_id,
				dst_node->name);

		if (net_depacketizer(net, node, pkt) == 1)
		{
			if (pkt->msg->packet_list_count > 1)
				net_trace("net.msg net=\"%s\" name=\"M-%lld\" "
						"state=\"%s:depacketize\"\n",
						net->name, msg->id, node->name);

			if (stack->ret_event == ESIM_EV_NONE)
			{
				assert (msg);
				net_receive(net, node, msg);

			}
			/* Finish */
			net_stack_return(stack);
		}
		else
			/* Freeing packet stack, not the message */
			free(stack);
	}

	else
	{
		panic("%s: unknown event", __FUNCTION__);
	}
}
Exemplo n.º 28
0
/* Event handler for EV_MEM_SYSTEM_COMMAND.
 * The event data is a string of type 'char *' that needs to be deallocated
 * after processing this event. */
void mem_system_command_handler(int event, void *data)
{
	struct list_t *token_list;

	char *command_line = data;
	char command[MAX_STRING_SIZE];

	/* Get command */
	str_token(command, sizeof command, command_line, 0, " ");
	if (!command[0])
		fatal("%s: invalid command syntax.\n\t> %s",
			__FUNCTION__, command_line);

	/* Commands that need to be processed at the end of the simulation
	 * are ignored here. These are command prefixed with 'CheckXXX'. */
	if (!strncasecmp(command, "Check", 5))
	{
		esim_schedule_end_event(EV_MEM_SYSTEM_END_COMMAND, data);
		return;
	}

	/* Split command in tokens, skip command */
	token_list = str_token_list_create(command_line, " ");
	assert(list_count(token_list));
	str_token_list_shift(token_list);

	/* Command 'SetBlock' */
	if (!strcasecmp(command, "SetBlock"))
	{
		struct mod_t *mod;

		int set;
		int way;
		int tag;

		int set_check;
		int tag_check;

		int state;

		mod = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_get_set_way(token_list, command_line, mod, &set, &way);
		tag = mem_system_command_get_hex(token_list, command_line);
		state = mem_system_command_get_state(token_list, command_line);
		mem_system_command_end(token_list, command_line);

		/* Check that module serves address */
		if (!mod_serves_address(mod, tag))
			fatal("%s: %s: module does not serve address 0x%x.\n\t> %s",
				__FUNCTION__, mod->name, tag, command_line);

		/* Check that tag goes to specified set */
		mod_find_block(mod, tag, &set_check, NULL, &tag_check, NULL);
		if (set != set_check)
			fatal("%s: %s: tag 0x%x belongs to set %d.\n\t> %s",
				__FUNCTION__, mod->name, tag, set_check, command_line);
		if (tag != tag_check)
			fatal("%s: %s: tag should be multiple of block size.\n\t> %s",
				__FUNCTION__, mod->name, command_line);

		/* Set tag */
		cache_set_block(mod->cache, set, way, tag, state);
	}

	/* Command 'SetOwner' */
	else if (!strcasecmp(command, "SetOwner"))
	{
		struct mod_t *mod;
		struct mod_t *owner;

		int set;
		int way;

		int sub_block;
		int owner_index;

		/* Get fields */
		mod = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_get_set_way(token_list, command_line, mod, &set, &way);
		sub_block = mem_system_command_get_sub_block(token_list, command_line, mod, set, way);
		owner = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_end(token_list, command_line);

		/* Check that owner is an immediate higher-level module */
		if (owner)
		{
			if (owner->low_net != mod->high_net || !owner->low_net)
				fatal("%s: %s is not a higher-level module of %s.\n\t> %s",
					__FUNCTION__, owner->name, mod->name, command_line);
		}

		/* Set owner */
		owner_index = owner ? owner->low_net_node->index : -1;
		dir_entry_set_owner(mod->dir, set, way, sub_block, owner_index);
	}

	/* Command 'SetSharers' */
	else if (!strcasecmp(command, "SetSharers"))
	{
		struct mod_t *mod;
		struct mod_t *sharer;

		int set;
		int way;

		int sub_block;

		/* Get first fields */
		mod = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_get_set_way(token_list, command_line, mod, &set, &way);
		sub_block = mem_system_command_get_sub_block(token_list, command_line, mod, set, way);

		/* Get sharers */
		mem_system_command_expect(token_list, command_line);
		dir_entry_clear_all_sharers(mod->dir, set, way, sub_block);
		while (list_count(token_list))
		{
			/* Get sharer */
			sharer = mem_system_command_get_mod(token_list, command_line);
			if (!sharer)
				continue;

			/* Check that sharer is an immediate higher-level module */
			if (sharer->low_net != mod->high_net || !sharer->low_net)
				fatal("%s: %s is not a higher-level module of %s.\n\t> %s",
					__FUNCTION__, sharer->name, mod->name, command_line);

			/* Set sharer */
			dir_entry_set_sharer(mod->dir, set, way, sub_block, sharer->low_net_node->index);
		}
	}

	/* Command 'Access' */
	else if (!strcasecmp(command, "Access"))
	{
		struct mod_t *mod;
		enum mod_access_kind_t access_kind;
		unsigned int addr;
		long long cycle;

		/* Read fields */
		mod = mem_system_command_get_mod(token_list, command_line);
		cycle = mem_system_command_get_cycle(token_list, command_line);
		access_kind = mem_system_command_get_mod_access(token_list, command_line);
		addr = mem_system_command_get_hex(token_list, command_line);

		/* If command is scheduled for later, exit */
		if (cycle > esim_cycle)
		{
			str_token_list_free(token_list);
			esim_schedule_event(EV_MEM_SYSTEM_COMMAND, data, cycle - esim_cycle);
			return;
		}

		/* Access module */
		mod_access(mod, access_kind, addr, NULL, NULL, NULL, NULL);
	}

	/* Command not supported */
	else
		fatal("%s: %s: invalid command.\n\t> %s",
			__FUNCTION__, command, command_line);

	/* Free command */
	free(command_line);
	str_token_list_free(token_list);
}
Exemplo n.º 29
0
void moesi_handler_write_request(int event, void *data)
{
	struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack;
	struct ccache_t *ccache = stack->ccache, *target = stack->target;
	struct dir_t *dir;
	struct dir_entry_t *dir_entry;
	uint32_t dir_entry_tag, z;


	if (event == EV_MOESI_WRITE_REQUEST)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s write request\n", CYCLE, ID,
			stack->addr, ccache->name);

		/* Default return values */
		ret->err = 0;

		/* Send request to target */
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? ccache->loid : 0;
		dest = ccache->next == target ? 0 : target->loid;
		net_send_ev(net, src, dest, 8, EV_MOESI_WRITE_REQUEST_RECEIVE, stack);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_RECEIVE)
	{
		cache_debug("  %lld %lld 0x%x %s write request receive\n", CYCLE, ID,
			stack->addr, target->name);
		
		/* Find and lock */
		newstack = moesi_stack_create(stack->id, target, stack->addr,
			EV_MOESI_WRITE_REQUEST_ACTION, stack);
		newstack->blocking = target->next == ccache;
		newstack->read = 0;
		newstack->retry = 0;
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_ACTION)
	{
		cache_debug("  %lld %lld 0x%x %s write request action\n", CYCLE, ID,
			stack->tag, target->name);

		/* Check lock error. If write request is down-up, there should
		 * have been no error. */
		if (stack->err) {
			assert(ccache->next == target);
			ret->err = 1;
			stack->response = 8;
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
			return;
		}

		/* Invalidate the rest of upper level sharers */
		newstack = moesi_stack_create(stack->id, target, 0,
			EV_MOESI_WRITE_REQUEST_EXCLUSIVE, stack);
		newstack->except = ccache;
		newstack->set = stack->set;
		newstack->way = stack->way;
		esim_schedule_event(EV_MOESI_INVALIDATE, newstack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_EXCLUSIVE)
	{
		cache_debug("  %lld %lld 0x%x %s write request exclusive\n", CYCLE, ID,
			stack->tag, target->name);

		if (ccache->next == target)
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_UPDOWN, stack, 0);
		else
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_DOWNUP, stack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_UPDOWN)
	{
		cache_debug("  %lld %lld 0x%x %s write request updown\n", CYCLE, ID,
			stack->tag, target->name);

		/* status = M/E */
		if (stack->status == moesi_status_modified ||
			stack->status == moesi_status_exclusive) {
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH, stack, 0);
			return;
		}

		/* status = O/S/I */
		newstack = moesi_stack_create(stack->id, target, stack->tag,
			EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH, stack);
		newstack->target = target->next;
		esim_schedule_event(EV_MOESI_WRITE_REQUEST, newstack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s write request updown finish\n", CYCLE, ID,
			stack->tag, target->name);

		/* Error in write request to next cache level */
		if (stack->err) {
			ret->err = 1;
			stack->response = 8;
			dir_lock_unlock(stack->dir_lock);
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
			return;
		}

		/* Check that addr is a multiple of ccache.bsize.
		 * Set ccache as sharer and owner. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			assert(stack->addr % ccache->bsize == 0);
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
				continue;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			dir_entry_set_sharer(dir, dir_entry, ccache->loid);
			dir_entry->owner = ccache->loid;
			assert(dir_entry->sharers == 1);
		}

		/* Update LRU, set status: M->M, O/E/S/I->E */
		if (target->cache) {
			cache_access_block(target->cache, stack->set, stack->way);
			if (stack->status != moesi_status_modified)
				cache_set_block(target->cache, stack->set, stack->way,
					stack->tag, moesi_status_exclusive);
		}

		/* Unlock, response is the data of the size of the requester's block. */
		dir_lock_unlock(stack->dir_lock);
		stack->response = ccache->bsize + 8;
		esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_DOWNUP)
	{
		cache_debug("  %lld %lld 0x%x %s write request downup\n", CYCLE, ID,
			stack->tag, target->name);

		/* Compute response, set status to I, unlock */
		assert(stack->status != moesi_status_invalid);
		assert(!dir_entry_group_shared_or_owned(target->dir, stack->set, stack->way));
		stack->response = stack->status == moesi_status_modified || stack->status
			== moesi_status_owned ? target->bsize + 8 : 8;
		cache_set_block(target->cache, stack->set, stack->way, 0, moesi_status_invalid);
		dir_lock_unlock(stack->dir_lock);
		esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_REPLY)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s write request reply\n", CYCLE, ID,
			stack->tag, target->name);

		assert(stack->response);
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? 0 : target->loid;
		dest = ccache->next == target ? ccache->loid : 0;
		net_send_ev(net, src, dest, stack->response,
			EV_MOESI_WRITE_REQUEST_FINISH, stack);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s write request finish\n", CYCLE, ID,
			stack->tag, ccache->name);

		moesi_stack_return(stack);
		return;
	}

	abort();
}