Esempio n. 1
0
void moesi_handler_read_request(int event, void *data)
{
	struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack;
	struct ccache_t *ccache = stack->ccache, *target = stack->target;
	uint32_t dir_entry_tag, z;
	struct dir_t *dir;
	struct dir_entry_t *dir_entry;

	if (event == EV_MOESI_READ_REQUEST)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s read request\n", CYCLE, ID,
			stack->addr, ccache->name);

		/* Default return values*/
		ret->shared = 0;
		ret->err = 0;

		/* Send request to target */
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? ccache->loid : 0;
		dest = ccache->next == target ? 0 : target->loid;
		net_send_ev(net, src, dest, 8, EV_MOESI_READ_REQUEST_RECEIVE, stack);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_RECEIVE)
	{
		cache_debug("  %lld %lld 0x%x %s read request receive\n", CYCLE, ID,
			stack->addr, target->name);
		
		/* Find and lock */
		newstack = moesi_stack_create(stack->id, target, stack->addr,
			EV_MOESI_READ_REQUEST_ACTION, stack);
		newstack->blocking = target->next == ccache;
		newstack->read = 1;
		newstack->retry = 0;
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_ACTION)
	{
		cache_debug("  %lld %lld 0x%x %s read request action\n", CYCLE, ID,
			stack->tag, target->name);

		/* Check block locking error. If read request is down-up, there should not
		 * have been any error while locking. */
		if (stack->err) {
			assert(ccache->next == target);
			ret->err = 1;
			stack->response = 8;
			esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
			return;
		}
		esim_schedule_event(ccache->next == target ? EV_MOESI_READ_REQUEST_UPDOWN :
			EV_MOESI_READ_REQUEST_DOWNUP, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_UPDOWN)
	{
		struct ccache_t *owner;

		cache_debug("  %lld %lld 0x%x %s read request updown\n", CYCLE, ID,
			stack->tag, target->name);
		stack->pending = 1;
		
		if (stack->status) {
			
			/* Status = M/O/E/S
			 * Check: addr multiple of requester's bsize
			 * Check: no subblock requested by ccache is already owned by ccache */
			assert(stack->addr % ccache->bsize == 0);
			dir = ccache_get_dir(target, stack->tag);
			for (z = 0; z < dir->zsize; z++) {
				dir_entry_tag = stack->tag + z * cache_min_block_size;
				if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
					continue;
				dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
				assert(dir_entry->owner != ccache->loid);
			}

			/* Send read request to owners other than ccache for all subblocks. */
			for (z = 0; z < dir->zsize; z++) {
				dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
				dir_entry_tag = stack->tag + z * cache_min_block_size;
				if (!dir_entry->owner) /* no owner */
					continue;
				if (dir_entry->owner == ccache->loid) /* owner is ccache */
					continue;
				owner = net_get_node_data(target->hinet, dir_entry->owner);
				if (dir_entry_tag % owner->bsize) /* not the first owner subblock */
					continue;

				/* Send read request */
				stack->pending++;
				newstack = moesi_stack_create(stack->id, target, dir_entry_tag,
					EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack);
				newstack->target = owner;
				esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0);
			}
			esim_schedule_event(EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack, 0);

		} else {
			
			/* Status = I */
			assert(!dir_entry_group_shared_or_owned(target->dir,
				stack->set, stack->way));
			newstack = moesi_stack_create(stack->id, target, stack->tag,
				EV_MOESI_READ_REQUEST_UPDOWN_MISS, stack);
			newstack->target = target->next;
			esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0);
		}
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_UPDOWN_MISS)
	{
		cache_debug("  %lld %lld 0x%x %s read request updown miss\n", CYCLE, ID,
			stack->tag, target->name);
		
		/* Check error */
		if (stack->err) {
			dir_lock_unlock(stack->dir_lock);
			ret->err = 1;
			stack->response = 8;
			esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
			return;
		}

		/* Set block state to excl/shared depending on the return value 'shared'
		 * that comes from a read request into the next cache level.
		 * Also set the tag of the block. */
		cache_set_block(target->cache, stack->set, stack->way, stack->tag,
			stack->shared ? moesi_status_shared : moesi_status_exclusive);
		esim_schedule_event(EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_UPDOWN_FINISH)
	{
		int shared;

		/* Ignore while pending requests */
		assert(stack->pending > 0);
		stack->pending--;
		if (stack->pending)
			return;
		cache_debug("  %lld %lld 0x%x %s read request updown finish\n", CYCLE, ID,
			stack->tag, target->name);

		/* Set owner to 0 for all directory entries not owned by ccache. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			if (dir_entry->owner != ccache->loid)
				dir_entry->owner = 0;
		}

		/* For each subblock requested by ccache, set ccache as sharer, and
		 * check whether there is other cache sharing it. */
		shared = 0;
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
				continue;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			dir_entry_set_sharer(dir, dir_entry, ccache->loid);
			if (dir_entry->sharers > 1)
				shared = 1;
		}

		/* If no subblock requested by ccache is shared by other cache, set ccache
		 * as owner of all of them. Otherwise, notify requester that the block is
		 * shared by setting the 'shared' return value to true. */
		ret->shared = shared;
		if (!shared) {
			for (z = 0; z < dir->zsize; z++) {
				dir_entry_tag = stack->tag + z * cache_min_block_size;
				if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
					continue;
				dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
				dir_entry->owner = ccache->loid;
			}
		}

		/* Respond with data, update LRU, unlock */
		stack->response = ccache->bsize + 8;
		if (target->cache)
			cache_access_block(target->cache, stack->set, stack->way);
		dir_lock_unlock(stack->dir_lock);
		esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_DOWNUP)
	{
		struct ccache_t *owner;

		cache_debug("  %lld %lld 0x%x %s read request downup\n", CYCLE, ID,
			stack->tag, target->name);

		/* Check: status must not be invalid.
		 * By default, only one pending request.
		 * Response depends on status */
		assert(stack->status != moesi_status_invalid);
		stack->pending = 1;
		stack->response = stack->status == moesi_status_exclusive ||
			stack->status == moesi_status_shared ?
			8 : target->bsize + 8;

		/* Send a read request to the owner of each subblock. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			if (!dir_entry->owner)  /* no owner */
				continue;
			owner = net_get_node_data(target->hinet, dir_entry->owner);
			if (dir_entry_tag % owner->bsize)  /* not the first subblock */
				continue;
			stack->pending++;
			stack->response = target->bsize + 8;
			newstack = moesi_stack_create(stack->id, target, dir_entry_tag,
				EV_MOESI_READ_REQUEST_DOWNUP_FINISH, stack);
			newstack->target = owner;
			esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0);
		}

		esim_schedule_event(EV_MOESI_READ_REQUEST_DOWNUP_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_DOWNUP_FINISH)
	{
		/* Ignore while pending requests */
		assert(stack->pending > 0);
		stack->pending--;
		if (stack->pending)
			return;
		cache_debug("  %lld %lld 0x%x %s read request downup finish\n", CYCLE, ID,
			stack->tag, target->name);

		/* Set owner of subblocks to 0. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			dir_entry->owner = 0;
		}

		/* Set status to S, update LRU, unlock */
		cache_set_block(target->cache, stack->set, stack->way, stack->tag,
			moesi_status_shared);
		cache_access_block(target->cache, stack->set, stack->way);
		dir_lock_unlock(stack->dir_lock);
		esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_REPLY)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s read request reply\n", CYCLE, ID,
			stack->tag, target->name);

		assert(stack->response);
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? 0 : target->loid;
		dest = ccache->next == target ? ccache->loid : 0;
		net_send_ev(net, src, dest, stack->response,
			EV_MOESI_READ_REQUEST_FINISH, stack);
		return;
	}

	if (event == EV_MOESI_READ_REQUEST_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s read request finish\n", CYCLE, ID,
			stack->tag, ccache->name);

		moesi_stack_return(stack);
		return;
	}

	abort();
}
Esempio n. 2
0
/* Event handler for EV_MEM_SYSTEM_COMMAND.
 * The event data is a string of type 'char *' that needs to be deallocated
 * after processing this event. */
void mem_system_command_handler(int event, void *data)
{
	struct list_t *token_list;

	char *command_line = data;
	char command[MAX_STRING_SIZE];

	/* Get command */
	str_token(command, sizeof command, command_line, 0, " ");
	if (!command[0])
		fatal("%s: invalid command syntax.\n\t> %s",
			__FUNCTION__, command_line);

	/* Commands that need to be processed at the end of the simulation
	 * are ignored here. These are command prefixed with 'CheckXXX'. */
	if (!strncasecmp(command, "Check", 5))
	{
		esim_schedule_end_event(EV_MEM_SYSTEM_END_COMMAND, data);
		return;
	}

	/* Split command in tokens, skip command */
	token_list = str_token_list_create(command_line, " ");
	assert(list_count(token_list));
	str_token_list_shift(token_list);

	/* Command 'SetBlock' */
	if (!strcasecmp(command, "SetBlock"))
	{
		struct mod_t *mod;

		int set;
		int way;
		int tag;

		int set_check;
		int tag_check;

		int state;

		mod = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_get_set_way(token_list, command_line, mod, &set, &way);
		tag = mem_system_command_get_hex(token_list, command_line);
		state = mem_system_command_get_state(token_list, command_line);
		mem_system_command_end(token_list, command_line);

		/* Check that module serves address */
		if (!mod_serves_address(mod, tag))
			fatal("%s: %s: module does not serve address 0x%x.\n\t> %s",
				__FUNCTION__, mod->name, tag, command_line);

		/* Check that tag goes to specified set */
		mod_find_block(mod, tag, &set_check, NULL, &tag_check, NULL);
		if (set != set_check)
			fatal("%s: %s: tag 0x%x belongs to set %d.\n\t> %s",
				__FUNCTION__, mod->name, tag, set_check, command_line);
		if (tag != tag_check)
			fatal("%s: %s: tag should be multiple of block size.\n\t> %s",
				__FUNCTION__, mod->name, command_line);

		/* Set tag */
		cache_set_block(mod->cache, set, way, tag, state);
	}

	/* Command 'SetOwner' */
	else if (!strcasecmp(command, "SetOwner"))
	{
		struct mod_t *mod;
		struct mod_t *owner;

		int set;
		int way;

		int sub_block;
		int owner_index;

		/* Get fields */
		mod = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_get_set_way(token_list, command_line, mod, &set, &way);
		sub_block = mem_system_command_get_sub_block(token_list, command_line, mod, set, way);
		owner = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_end(token_list, command_line);

		/* Check that owner is an immediate higher-level module */
		if (owner)
		{
			if (owner->low_net != mod->high_net || !owner->low_net)
				fatal("%s: %s is not a higher-level module of %s.\n\t> %s",
					__FUNCTION__, owner->name, mod->name, command_line);
		}

		/* Set owner */
		owner_index = owner ? owner->low_net_node->index : -1;
		dir_entry_set_owner(mod->dir, set, way, sub_block, owner_index);
	}

	/* Command 'SetSharers' */
	else if (!strcasecmp(command, "SetSharers"))
	{
		struct mod_t *mod;
		struct mod_t *sharer;

		int set;
		int way;

		int sub_block;

		/* Get first fields */
		mod = mem_system_command_get_mod(token_list, command_line);
		mem_system_command_get_set_way(token_list, command_line, mod, &set, &way);
		sub_block = mem_system_command_get_sub_block(token_list, command_line, mod, set, way);

		/* Get sharers */
		mem_system_command_expect(token_list, command_line);
		dir_entry_clear_all_sharers(mod->dir, set, way, sub_block);
		while (list_count(token_list))
		{
			/* Get sharer */
			sharer = mem_system_command_get_mod(token_list, command_line);
			if (!sharer)
				continue;

			/* Check that sharer is an immediate higher-level module */
			if (sharer->low_net != mod->high_net || !sharer->low_net)
				fatal("%s: %s is not a higher-level module of %s.\n\t> %s",
					__FUNCTION__, sharer->name, mod->name, command_line);

			/* Set sharer */
			dir_entry_set_sharer(mod->dir, set, way, sub_block, sharer->low_net_node->index);
		}
	}

	/* Command 'Access' */
	else if (!strcasecmp(command, "Access"))
	{
		struct mod_t *mod;
		enum mod_access_kind_t access_kind;
		unsigned int addr;
		long long cycle;

		/* Read fields */
		mod = mem_system_command_get_mod(token_list, command_line);
		cycle = mem_system_command_get_cycle(token_list, command_line);
		access_kind = mem_system_command_get_mod_access(token_list, command_line);
		addr = mem_system_command_get_hex(token_list, command_line);

		/* If command is scheduled for later, exit */
		if (cycle > esim_cycle)
		{
			str_token_list_free(token_list);
			esim_schedule_event(EV_MEM_SYSTEM_COMMAND, data, cycle - esim_cycle);
			return;
		}

		/* Access module */
		mod_access(mod, access_kind, addr, NULL, NULL, NULL, NULL);
	}

	/* Command not supported */
	else
		fatal("%s: %s: invalid command.\n\t> %s",
			__FUNCTION__, command, command_line);

	/* Free command */
	free(command_line);
	str_token_list_free(token_list);
}
Esempio n. 3
0
void moesi_handler_write_request(int event, void *data)
{
	struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack;
	struct ccache_t *ccache = stack->ccache, *target = stack->target;
	struct dir_t *dir;
	struct dir_entry_t *dir_entry;
	uint32_t dir_entry_tag, z;


	if (event == EV_MOESI_WRITE_REQUEST)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s write request\n", CYCLE, ID,
			stack->addr, ccache->name);

		/* Default return values */
		ret->err = 0;

		/* Send request to target */
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? ccache->loid : 0;
		dest = ccache->next == target ? 0 : target->loid;
		net_send_ev(net, src, dest, 8, EV_MOESI_WRITE_REQUEST_RECEIVE, stack);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_RECEIVE)
	{
		cache_debug("  %lld %lld 0x%x %s write request receive\n", CYCLE, ID,
			stack->addr, target->name);
		
		/* Find and lock */
		newstack = moesi_stack_create(stack->id, target, stack->addr,
			EV_MOESI_WRITE_REQUEST_ACTION, stack);
		newstack->blocking = target->next == ccache;
		newstack->read = 0;
		newstack->retry = 0;
		esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_ACTION)
	{
		cache_debug("  %lld %lld 0x%x %s write request action\n", CYCLE, ID,
			stack->tag, target->name);

		/* Check lock error. If write request is down-up, there should
		 * have been no error. */
		if (stack->err) {
			assert(ccache->next == target);
			ret->err = 1;
			stack->response = 8;
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
			return;
		}

		/* Invalidate the rest of upper level sharers */
		newstack = moesi_stack_create(stack->id, target, 0,
			EV_MOESI_WRITE_REQUEST_EXCLUSIVE, stack);
		newstack->except = ccache;
		newstack->set = stack->set;
		newstack->way = stack->way;
		esim_schedule_event(EV_MOESI_INVALIDATE, newstack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_EXCLUSIVE)
	{
		cache_debug("  %lld %lld 0x%x %s write request exclusive\n", CYCLE, ID,
			stack->tag, target->name);

		if (ccache->next == target)
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_UPDOWN, stack, 0);
		else
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_DOWNUP, stack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_UPDOWN)
	{
		cache_debug("  %lld %lld 0x%x %s write request updown\n", CYCLE, ID,
			stack->tag, target->name);

		/* status = M/E */
		if (stack->status == moesi_status_modified ||
			stack->status == moesi_status_exclusive) {
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH, stack, 0);
			return;
		}

		/* status = O/S/I */
		newstack = moesi_stack_create(stack->id, target, stack->tag,
			EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH, stack);
		newstack->target = target->next;
		esim_schedule_event(EV_MOESI_WRITE_REQUEST, newstack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s write request updown finish\n", CYCLE, ID,
			stack->tag, target->name);

		/* Error in write request to next cache level */
		if (stack->err) {
			ret->err = 1;
			stack->response = 8;
			dir_lock_unlock(stack->dir_lock);
			esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
			return;
		}

		/* Check that addr is a multiple of ccache.bsize.
		 * Set ccache as sharer and owner. */
		dir = ccache_get_dir(target, stack->tag);
		for (z = 0; z < dir->zsize; z++) {
			assert(stack->addr % ccache->bsize == 0);
			dir_entry_tag = stack->tag + z * cache_min_block_size;
			if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize)
				continue;
			dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z);
			dir_entry_set_sharer(dir, dir_entry, ccache->loid);
			dir_entry->owner = ccache->loid;
			assert(dir_entry->sharers == 1);
		}

		/* Update LRU, set status: M->M, O/E/S/I->E */
		if (target->cache) {
			cache_access_block(target->cache, stack->set, stack->way);
			if (stack->status != moesi_status_modified)
				cache_set_block(target->cache, stack->set, stack->way,
					stack->tag, moesi_status_exclusive);
		}

		/* Unlock, response is the data of the size of the requester's block. */
		dir_lock_unlock(stack->dir_lock);
		stack->response = ccache->bsize + 8;
		esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_DOWNUP)
	{
		cache_debug("  %lld %lld 0x%x %s write request downup\n", CYCLE, ID,
			stack->tag, target->name);

		/* Compute response, set status to I, unlock */
		assert(stack->status != moesi_status_invalid);
		assert(!dir_entry_group_shared_or_owned(target->dir, stack->set, stack->way));
		stack->response = stack->status == moesi_status_modified || stack->status
			== moesi_status_owned ? target->bsize + 8 : 8;
		cache_set_block(target->cache, stack->set, stack->way, 0, moesi_status_invalid);
		dir_lock_unlock(stack->dir_lock);
		esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_REPLY)
	{
		struct net_t *net;
		int src, dest;
		cache_debug("  %lld %lld 0x%x %s write request reply\n", CYCLE, ID,
			stack->tag, target->name);

		assert(stack->response);
		assert(ccache->next == target || target->next == ccache);
		net = ccache->next == target ? ccache->lonet : ccache->hinet;
		src = ccache->next == target ? 0 : target->loid;
		dest = ccache->next == target ? ccache->loid : 0;
		net_send_ev(net, src, dest, stack->response,
			EV_MOESI_WRITE_REQUEST_FINISH, stack);
		return;
	}

	if (event == EV_MOESI_WRITE_REQUEST_FINISH)
	{
		cache_debug("  %lld %lld 0x%x %s write request finish\n", CYCLE, ID,
			stack->tag, ccache->name);

		moesi_stack_return(stack);
		return;
	}

	abort();
}