コード例 #1
0
ファイル: directory.c プロジェクト: 3upperm2n/gpuSimulators
void dir_entry_set_owner(struct dir_t *dir, int x, int y, int z, int node)
{
	struct dir_entry_t *dir_entry;

	/* Set owner */
	assert(node == DIR_ENTRY_OWNER_NONE || IN_RANGE(node, 0, dir->num_nodes - 1));
	dir_entry = dir_entry_get(dir, x, y, z);
	dir_entry->owner = node;

	/* Trace */
	mem_trace("mem.set_owner dir=\"%s\" x=%d y=%d z=%d owner=%d\n",
		dir->name, x, y, z, node);
}
コード例 #2
0
ファイル: directory.c プロジェクト: 3upperm2n/gpuSimulators
void dir_entry_clear_all_sharers(struct dir_t *dir, int x, int y, int z)
{
	struct dir_entry_t *dir_entry;
	int i;

	/* Clear sharers */
	dir_entry = dir_entry_get(dir, x, y, z);
	dir_entry->num_sharers = 0;
	for (i = 0; i < DIR_ENTRY_SHARERS_SIZE; i++)
		dir_entry->sharer[i] = 0;

	/* Debug */
	mem_trace("mem.clear_all_sharers dir=\"%s\" x=%d y=%d z=%d\n",
		dir->name, x, y, z);
}
コード例 #3
0
ファイル: directory.c プロジェクト: 3upperm2n/gpuSimulators
void dir_entry_clear_sharer(struct dir_t *dir, int x, int y, int z, int node)
{
	struct dir_entry_t *dir_entry;

	/* Nothing if sharer is not set */
	dir_entry = dir_entry_get(dir, x, y, z);
	assert(IN_RANGE(node, 0, dir->num_nodes - 1));
	if (!(dir_entry->sharer[node / 8] & (1 << (node % 8))))
		return;

	/* Clear sharer */
	dir_entry->sharer[node / 8] &= ~(1 << (node % 8));
	assert(dir_entry->num_sharers > 0);
	dir_entry->num_sharers--;

	/* Debug */
	mem_trace("mem.clear_sharer dir=\"%s\" x=%d y=%d z=%d sharer=%d\n",
		dir->name, x, y, z, node);
}
コード例 #4
0
ファイル: directory.c プロジェクト: 3upperm2n/gpuSimulators
void dir_entry_set_sharer(struct dir_t *dir, int x, int y, int z, int node)
{
	struct dir_entry_t *dir_entry;

	/* Nothing if sharer was already set */
	assert(IN_RANGE(node, 0, dir->num_nodes - 1));
	dir_entry = dir_entry_get(dir, x, y, z);
	if (dir_entry->sharer[node / 8] & (1 << (node % 8)))
		return;

	/* Set sharer */
	dir_entry->sharer[node / 8] |= 1 << (node % 8);
	dir_entry->num_sharers++;
	assert(dir_entry->num_sharers <= dir->num_nodes);

	/* Debug */
	mem_trace("mem.set_sharer dir=\"%s\" x=%d y=%d z=%d sharer=%d\n",
		dir->name, x, y, z, node);
}
コード例 #5
0
ファイル: directory.c プロジェクト: 3upperm2n/gpuSimulators
void dir_entry_unlock(struct dir_t *dir, int x, int y)
{
	struct dir_lock_t *dir_lock;
	struct mod_stack_t *stack;
	FILE *f;

	/* Get lock */
	assert(x < dir->xsize && y < dir->ysize);
	dir_lock = &dir->dir_lock[x * dir->ysize + y];

	/* Wake up first waiter */
	if (dir_lock->lock_queue)
	{
		/* Debug */
		f = debug_file(mem_debug_category);
		if (f)
		{
			mem_debug("    A-%lld resumed", dir_lock->lock_queue->id);
			if (dir_lock->lock_queue->dir_lock_next)
			{
				mem_debug(" - {");
				for (stack = dir_lock->lock_queue->dir_lock_next; stack;
						stack = stack->dir_lock_next)
					mem_debug(" A-%lld", stack->id);
				mem_debug(" } still waiting");
			}
			mem_debug("\n");
		}

		/* Wake up access */
		esim_schedule_event(dir_lock->lock_queue->dir_lock_event, dir_lock->lock_queue, 1);
		dir_lock->lock_queue = dir_lock->lock_queue->dir_lock_next;
	}

	/* Trace */
	mem_trace("mem.end_access_block cache=\"%s\" access=\"A-%lld\" set=%d way=%d\n",
		dir->name, dir_lock->stack_id, x, y);

	/* Unlock entry */
	dir_lock->lock = 0;
}
コード例 #6
0
void mod_handler_local_mem_load(int event, void *data)
{
	struct mod_stack_t *stack = data;
	struct mod_stack_t *new_stack;

	struct mod_t *mod = stack->mod;


	if (event == EV_MOD_LOCAL_MEM_LOAD)
	{
		struct mod_stack_t *master_stack;

		mem_debug("  %lld %lld 0x%x %s load\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.new_access name=\"A-%lld\" type=\"load\" "
			"state=\"%s:load\" addr=0x%x\n",
			stack->id, mod->name, stack->addr);

		/* Record access */
		mod_access_start(mod, stack, mod_access_load);

		/* Coalesce access */
		master_stack = mod_can_coalesce(mod, mod_access_load, stack->addr, stack);
		if (master_stack)
		{
			mod->reads++;
			mod_coalesce(mod, master_stack, stack);
			mod_stack_wait_in_stack(stack, master_stack, EV_MOD_LOCAL_MEM_LOAD_FINISH);
			return;
		}

		esim_schedule_event(EV_MOD_LOCAL_MEM_LOAD_LOCK, stack, 0);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_LOAD_LOCK)
	{
		struct mod_stack_t *older_stack;

		mem_debug("  %lld %lld 0x%x %s load lock\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:load_lock\"\n",
			stack->id, mod->name);

		/* If there is any older write, wait for it */
		older_stack = mod_in_flight_write(mod, stack);
		if (older_stack)
		{
			mem_debug("    %lld wait for write %lld\n",
				stack->id, older_stack->id);
			mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_LOAD_LOCK);
			return;
		}

		/* If there is any older access to the same address that this access could not
		 * be coalesced with, wait for it. */
		older_stack = mod_in_flight_address(mod, stack->addr, stack);
		if (older_stack)
		{
			mem_debug("    %lld wait for access %lld\n",
				stack->id, older_stack->id);
			mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_LOAD_LOCK);
			return;
		}

		/* Call find and lock to lock the port */
		new_stack = mod_stack_create(stack->id, mod, stack->addr,
			EV_MOD_LOCAL_MEM_LOAD_FINISH, stack);
		new_stack->read = 1;
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK, new_stack, 0);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_LOAD_FINISH)
	{
		mem_debug("%lld %lld 0x%x %s load finish\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:load_finish\"\n",
			stack->id, mod->name);
		mem_trace("mem.end_access name=\"A-%lld\"\n", stack->id);

		/* Increment witness variable */
                if (stack->witness_ptr) {
                        (*stack->witness_ptr)++;
		}

		/* Return event queue element into event queue */
		if (stack->event_queue && stack->event_queue_item)
			linked_list_add(stack->event_queue, stack->event_queue_item);

		/* Finish access */
		mod_access_finish(mod, stack);

		/* Return */
		mod_stack_return(stack);
		return;
	}

	abort();
}
コード例 #7
0
void mod_handler_local_mem_find_and_lock(int event, void *data)
{
	struct mod_stack_t *stack = data;
	struct mod_stack_t *ret = stack->ret_stack;

	struct mod_t *mod = stack->mod;


	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK)
	{
		mem_debug("  %lld %lld 0x%x %s find and lock\n",
			esim_cycle, stack->id, stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock\"\n",
			stack->id, mod->name);

		/* Get a port */
		mod_lock_port(mod, stack, EV_MOD_LOCAL_MEM_FIND_AND_LOCK_PORT);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_PORT)
	{
		mem_debug("  %lld %lld 0x%x %s find and lock port\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_port\"\n",
			stack->id, mod->name);

		/* Set parent stack flag expressing that port has already been locked.
		 * This flag is checked by new writes to find out if it is already too
		 * late to coalesce. */
		ret->port_locked = 1;

		/* Statistics */
		mod->accesses++;
		if (stack->read)
		{
			mod->reads++;
			mod->effective_reads++;
		}
		else
		{
			mod->writes++;
			mod->effective_writes++;

			/* Increment witness variable when port is locked */
			if (stack->witness_ptr)
			{
				(*stack->witness_ptr)++;
				stack->witness_ptr = NULL;
			}
		}

		/* Access latency */
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK_ACTION, stack, mod->latency);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_ACTION)
	{
		struct mod_port_t *port = stack->port;

		assert(port);
		mem_debug("  %lld %lld 0x%x %s find and lock action\n", esim_cycle, stack->id,
			stack->tag, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_action\"\n",
			stack->id, mod->name);

		/* Release port */
		mod_unlock_port(mod, port, stack);

		/* Continue */
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK_FINISH, stack, 0);
		return;
	}

	if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_FINISH)
	{
		mem_debug("  %lld %lld 0x%x %s find and lock finish (err=%d)\n", esim_cycle, stack->id,
			stack->tag, mod->name, stack->err);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_finish\"\n",
			stack->id, mod->name);

		mod_stack_return(stack);
		return;
	}

	abort();
}
コード例 #8
0
void mod_handler_local_mem_store(int event, void *data)
{
	struct mod_stack_t *stack = data;
	struct mod_stack_t *new_stack;

	struct mod_t *mod = stack->mod;


	if (event == EV_MOD_LOCAL_MEM_STORE)
	{
		struct mod_stack_t *master_stack;

		mem_debug("%lld %lld 0x%x %s store\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.new_access name=\"A-%lld\" type=\"store\" "
			"state=\"%s:store\" addr=0x%x\n",
			stack->id, mod->name, stack->addr);

		/* Record access */
		mod_access_start(mod, stack, mod_access_store);

		/* Coalesce access */
		master_stack = mod_can_coalesce(mod, mod_access_store, stack->addr, stack);
		if (master_stack)
		{
			mod->writes++;
			mod_coalesce(mod, master_stack, stack);
			mod_stack_wait_in_stack(stack, master_stack, EV_MOD_LOCAL_MEM_STORE_FINISH);

			/* Increment witness variable */
			if (stack->witness_ptr)
				(*stack->witness_ptr)++;

			return;
		}

		/* Continue */
		esim_schedule_event(EV_MOD_LOCAL_MEM_STORE_LOCK, stack, 0);
		return;
	}


	if (event == EV_MOD_LOCAL_MEM_STORE_LOCK)
	{
		struct mod_stack_t *older_stack;

		mem_debug("  %lld %lld 0x%x %s store lock\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:store_lock\"\n",
			stack->id, mod->name);

		/* If there is any older access, wait for it */
		older_stack = stack->access_list_prev;
		if (older_stack)
		{
			mem_debug("    %lld wait for access %lld\n",
				stack->id, older_stack->id);
			mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_STORE_LOCK);
			return;
		}

		/* Call find and lock */
		new_stack = mod_stack_create(stack->id, mod, stack->addr,
			EV_MOD_LOCAL_MEM_STORE_FINISH, stack);
		new_stack->read = 0;
		new_stack->witness_ptr = stack->witness_ptr;
		esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK, new_stack, 0);

		/* Set witness variable to NULL so that retries from the same
		 * stack do not increment it multiple times */
		stack->witness_ptr = NULL;

		return;
	}

	if (event == EV_MOD_LOCAL_MEM_STORE_FINISH)
	{
		mem_debug("%lld %lld 0x%x %s store finish\n", esim_cycle, stack->id,
			stack->addr, mod->name);
		mem_trace("mem.access name=\"A-%lld\" state=\"%s:store_finish\"\n",
			stack->id, mod->name);
		mem_trace("mem.end_access name=\"A-%lld\"\n", stack->id);

		/* Return event queue element into event queue */
		if (stack->event_queue && stack->event_queue_item)
			linked_list_add(stack->event_queue, stack->event_queue_item);

		/* Finish access */
		mod_access_finish(mod, stack);

		/* Return */
		mod_stack_return(stack);
		return;
	}

	abort();
}
コード例 #9
0
ファイル: directory.c プロジェクト: 3upperm2n/gpuSimulators
int dir_entry_lock(struct dir_t *dir, int x, int y, int event, struct mod_stack_t *stack)
{
	struct dir_lock_t *dir_lock;
	struct mod_stack_t *lock_queue_iter;

	/* Get lock */
	assert(x < dir->xsize && y < dir->ysize);
	dir_lock = &dir->dir_lock[x * dir->ysize + y];

	/* If the entry is already locked, enqueue a new waiter and
	 * return failure to lock. */
	if (dir_lock->lock)
	{
		/* Enqueue the stack to the end of the lock queue */
		stack->dir_lock_next = NULL;
		stack->dir_lock_event = event;
		stack->ret_stack->way = stack->way;

		if (!dir_lock->lock_queue)
		{
			/* Special case: queue is empty */
			dir_lock->lock_queue = stack;
		}
		else 
		{
			lock_queue_iter = dir_lock->lock_queue;

			/* FIXME - Code below is the queue insertion algorithm based on stack id.
			 * This causes a deadlock when, for example, A-10 keeps retrying an up-down access and
			 * gets always priority over A-20, which is waiting to finish a down-up access. */
#if 0
			while (stack->id > lock_queue_iter->id)
			{
				if (!lock_queue_iter->dir_lock_next)
					break;

				lock_queue_iter = lock_queue_iter->dir_lock_next;
			}
#endif
			/* ------------------------------------------------------------------------ */
			/* FIXME - Replaced with code below, just inserting at the end of the queue.
			 * But this seems to be what this function was doing before, isn't it? Why
			 * weren't we happy with this policy? */
			while (lock_queue_iter->dir_lock_next)
				lock_queue_iter = lock_queue_iter->dir_lock_next;
			/* ------------------------------------------------------------------------ */

			if (!lock_queue_iter->dir_lock_next) 
			{
				/* Stack goes at end of queue */
				lock_queue_iter->dir_lock_next = stack;
			}
			else 
			{
				/* Stack goes in front or middle of queue */
				stack->dir_lock_next = lock_queue_iter->dir_lock_next;
				lock_queue_iter->dir_lock_next = stack;
			}
		}
		mem_debug("    0x%x access suspended\n", stack->tag);
		return 0;
	}

	/* Trace */
	mem_trace("mem.new_access_block cache=\"%s\" access=\"A-%lld\" set=%d way=%d\n",
		dir->name, stack->id, x, y);

	/* Lock entry */
	dir_lock->lock = 1;
	dir_lock->stack_id = stack->id;
	return 1;
}