Example #1
0
static int X86ThreadCanFetch(X86Thread *self) {
	X86Cpu *cpu = self->cpu;
	X86Context *ctx = self->ctx;

	unsigned int phy_addr;
	unsigned int block;

	/* Context must be running */
	if (!ctx || !X86ContextGetState(ctx, X86ContextRunning))
		return 0;

	/* Fetch stalled or context evict signal activated */
	if (self->fetch_stall_until >= asTiming(cpu)->cycle || ctx->evict_signal)
		return 0;

	/* Fetch queue must have not exceeded the limit of stored bytes
	 * to be able to store new macro-instructions. */
	if (self->fetchq_occ >= x86_fetch_queue_size)
		return 0;

	/* If the next fetch address belongs to a new block, cache system
	 * must be accessible to read it. */
	block = self->fetch_neip & ~(self->inst_mod->block_size - 1);

	if (block != self->fetch_block) {
		phy_addr = mmu_translate(self->ctx->address_space_index,
				self->fetch_neip);
		if (!mod_can_access(self->inst_mod, phy_addr))
			return 0;
	}

	/* We can fetch */
	return 1;
}
Example #2
0
static int X86ThreadIssueSQ(X86Thread *self, int quantum) {
  X86Cpu *cpu = self->cpu;
  X86Core *core = self->core;

  struct x86_uop_t *store;
  struct linked_list_t *sq = self->sq;
  struct mod_client_info_t *client_info;

  /* Process SQ */
  linked_list_head(sq);
  while (!linked_list_is_end(sq) && quantum) {
    /* Get store */
    store = linked_list_get(sq);
    assert(store->uinst->opcode == x86_uinst_store);

    /* Only committed stores issue */
    if (store->in_rob) break;

    /* Check that memory system entry is ready */
    if (!mod_can_access(self->data_mod, store->phy_addr)) break;

    /* Remove store from store queue */
    X86ThreadRemoveFromSQ(self);

    /* create and fill the mod_client_info_t object */
    client_info = mod_client_info_create(self->data_mod);
    client_info->prefetcher_eip = store->eip;

    /* Issue store */
    mod_access(self->data_mod, mod_access_store, store->phy_addr, NULL,
               core->event_queue, store, client_info);

    /* The cache system will place the store at the head of the
     * event queue when it is ready. For now, mark "in_event_queue" to
     * prevent the uop from being freed. */
    store->in_event_queue = 1;
    store->issued = 1;
    store->issue_when = asTiming(cpu)->cycle;

    /* Statistics */
    core->num_issued_uinst_array[store->uinst->opcode]++;
    core->lsq_reads++;
    core->reg_file_int_reads += store->ph_int_idep_count;
    core->reg_file_fp_reads += store->ph_fp_idep_count;
    self->num_issued_uinst_array[store->uinst->opcode]++;
    self->lsq_reads++;
    self->reg_file_int_reads += store->ph_int_idep_count;
    self->reg_file_fp_reads += store->ph_fp_idep_count;
    cpu->num_issued_uinst_array[store->uinst->opcode]++;
    if (store->trace_cache) self->trace_cache->num_issued_uinst++;

    /* One more instruction, update quantum. */
    quantum--;

    /* MMU statistics */
    if (*mmu_report_file_name)
      mmu_access_page(store->phy_addr, mmu_access_write);
  }
  return quantum;
}
Example #3
0
static int issue_sq(int core, int thread, int quant)
{
	struct uop_t *store;
	struct linked_list_t *sq = THREAD.sq;

	/* Process SQ */
	linked_list_head(sq);
	while (!linked_list_is_end(sq) && quant)
	{
		/* Get store */
		store = linked_list_get(sq);
		assert(store->uinst->opcode == x86_uinst_store);

		/* Only committed stores issue */
		if (store->in_rob)
			break;

		/* Check that memory system entry is ready */
		if (!mod_can_access(THREAD.data_mod, store->phy_addr))
			break;

		/* Remove store from store queue */
		sq_remove(core, thread);

		/* Issue store */
		mod_access(THREAD.data_mod, mod_entry_cpu, mod_access_write,
			store->phy_addr, NULL, CORE.eventq, store);

		/* The cache system will place the store at the head of the
		 * event queue when it is ready. For now, mark "in_eventq" to
		 * prevent the uop from being freed. */
		store->in_eventq = 1;
		store->issued = 1;
		store->issue_when = cpu->cycle;
	
		/* Instruction issued */
		CORE.issued[store->uinst->opcode]++;
		CORE.lsq_reads++;
		CORE.rf_int_reads += store->ph_int_idep_count;
		CORE.rf_fp_reads += store->ph_fp_idep_count;
		THREAD.issued[store->uinst->opcode]++;
		THREAD.lsq_reads++;
		THREAD.rf_int_reads += store->ph_int_idep_count;
		THREAD.rf_fp_reads += store->ph_fp_idep_count;
		cpu->issued[store->uinst->opcode]++;
		quant--;
		
		/* MMU statistics */
		if (*mmu_report_file_name)
			mmu_access_page(store->phy_addr, mmu_access_write);

		/* Debug */
		esim_debug("uop action=\"update\", core=%d, seq=%llu,"
			" stg_issue=1, in_lsq=0, issued=1\n",
			store->core, (long long unsigned) store->di_seq);
	}
	return quant;
}
Example #4
0
static int x86_cpu_issue_sq(int core, int thread, int quant)
{
	struct x86_uop_t *store;
	struct linked_list_t *sq = X86_THREAD.sq;

	/* Process SQ */
	linked_list_head(sq);
	while (!linked_list_is_end(sq) && quant)
	{
		/* Get store */
		store = linked_list_get(sq);
		assert(store->uinst->opcode == x86_uinst_store);

		/* Only committed stores issue */
		if (store->in_rob)
			break;

		/* Check that memory system entry is ready */
		if (!mod_can_access(X86_THREAD.data_mod, store->phy_addr))
			break;

		/* Remove store from store queue */
		x86_sq_remove(core, thread);

		/* Issue store */
		mod_access(X86_THREAD.data_mod, mod_access_store,
			store->phy_addr, NULL, X86_CORE.event_queue, store);

		/* The cache system will place the store at the head of the
		 * event queue when it is ready. For now, mark "in_event_queue" to
		 * prevent the uop from being freed. */
		store->in_event_queue = 1;
		store->issued = 1;
		store->issue_when = x86_cpu->cycle;
	
		/* Instruction issued */
		X86_CORE.issued[store->uinst->opcode]++;
		X86_CORE.lsq_reads++;
		X86_CORE.reg_file_int_reads += store->ph_int_idep_count;
		X86_CORE.reg_file_fp_reads += store->ph_fp_idep_count;
		X86_THREAD.issued[store->uinst->opcode]++;
		X86_THREAD.lsq_reads++;
		X86_THREAD.reg_file_int_reads += store->ph_int_idep_count;
		X86_THREAD.reg_file_fp_reads += store->ph_fp_idep_count;
		x86_cpu->issued[store->uinst->opcode]++;
		quant--;
		
		/* MMU statistics */
		if (*mmu_report_file_name)
			mmu_access_page(store->phy_addr, mmu_access_write);
	}
	return quant;
}
Example #5
0
static int issue_lq(int core, int thread, int quant)
{
	struct linked_list_t *lq = THREAD.lq;
	struct uop_t *load;

	/* Debug */
	if (esim_debug_file)
		uop_lnlist_check_if_ready(lq);
	
	/* Process lq */
	linked_list_head(lq);
	while (!linked_list_is_end(lq) && quant)
	{
		/* Get element from load queue. If it is not ready, go to the next one */
		load = linked_list_get(lq);
		if (!load->ready && !rf_ready(load))
		{
			linked_list_next(lq);
			continue;
		}
		load->ready = 1;

		/* Check that memory system is accessible */
		if (!mod_can_access(THREAD.data_mod, load->phy_addr))
		{
			linked_list_next(lq);
			continue;
		}

		/* Remove from load queue */
		assert(load->uinst->opcode == x86_uinst_load);
		lq_remove(core, thread);

		/* Access memory system */
		mod_access(THREAD.data_mod, mod_entry_cpu, mod_access_read,
			load->phy_addr, NULL, CORE.eventq, load);

		/* The cache system will place the load at the head of the
		 * event queue when it is ready. For now, mark "in_eventq" to
		 * prevent the uop from being freed. */
		load->in_eventq = 1;
		load->issued = 1;
		load->issue_when = cpu->cycle;
		
		/* Instruction issued */
		CORE.issued[load->uinst->opcode]++;
		CORE.lsq_reads++;
		CORE.rf_int_reads += load->ph_int_idep_count;
		CORE.rf_fp_reads += load->ph_fp_idep_count;
		THREAD.issued[load->uinst->opcode]++;
		THREAD.lsq_reads++;
		THREAD.rf_int_reads += load->ph_int_idep_count;
		THREAD.rf_fp_reads += load->ph_fp_idep_count;
		cpu->issued[load->uinst->opcode]++;
		quant--;
		
		/* MMU statistics */
		if (*mmu_report_file_name)
			mmu_access_page(load->phy_addr, mmu_access_read);

		/* Debug */
		esim_debug("uop action=\"update\", core=%d, seq=%llu,"
			" stg_issue=1, in_lsq=0, issued=1\n",
			load->core, (long long unsigned) load->di_seq);
	}
	
	return quant;
}
Example #6
0
static int X86ThreadIssuePreQ(X86Thread *self, int quantum)
{
	X86Core *core = self->core;
	X86Cpu *cpu = self->cpu;

	struct linked_list_t *preq = self->preq;
	struct x86_uop_t *prefetch;

	/* Process preq */
	linked_list_head(preq);
	while (!linked_list_is_end(preq) && quantum)
	{
		/* Get element from prefetch queue. If it is not ready, go to the next one */
		prefetch = linked_list_get(preq);
		if (!prefetch->ready && !X86ThreadIsUopReady(self, prefetch))
		{
			linked_list_next(preq);
			continue;
		}

		/* 
		 * Make sure its not been prefetched recently. This is just to avoid unnecessary
		 * memory traffic. Even though the cache will realise a "hit" on redundant 
		 * prefetches, its still helpful to avoid going to the memory (cache). 
		 */
		if (prefetch_history_is_redundant(core->prefetch_history,
							   self->data_mod, prefetch->phy_addr))
		{
			/* remove from queue. do not prefetch. */
			assert(prefetch->uinst->opcode == x86_uinst_prefetch);
			X86ThreadRemovePreQ(self);
			prefetch->completed = 1;
			x86_uop_free_if_not_queued(prefetch);
			continue;
		}

		prefetch->ready = 1;

		/* Check that memory system is accessible */
		if (!mod_can_access(self->data_mod, prefetch->phy_addr))
		{
			linked_list_next(preq);
			continue;
		}

		/* Remove from prefetch queue */
		assert(prefetch->uinst->opcode == x86_uinst_prefetch);
		X86ThreadRemovePreQ(self);

		/* Access memory system */
		mod_access(self->data_mod, mod_access_prefetch,
			prefetch->phy_addr, NULL, core->event_queue, prefetch, NULL);

		/* Record prefetched address */
		prefetch_history_record(core->prefetch_history, prefetch->phy_addr);

		/* The cache system will place the prefetch at the head of the
		 * event queue when it is ready. For now, mark "in_event_queue" to
		 * prevent the uop from being freed. */
		prefetch->in_event_queue = 1;
		prefetch->issued = 1;
		prefetch->issue_when = asTiming(cpu)->cycle;
		
		/* Statistics */
		core->num_issued_uinst_array[prefetch->uinst->opcode]++;
		core->lsq_reads++;
		core->reg_file_int_reads += prefetch->ph_int_idep_count;
		core->reg_file_fp_reads += prefetch->ph_fp_idep_count;
		self->num_issued_uinst_array[prefetch->uinst->opcode]++;
		self->lsq_reads++;
		self->reg_file_int_reads += prefetch->ph_int_idep_count;
		self->reg_file_fp_reads += prefetch->ph_fp_idep_count;
		cpu->num_issued_uinst_array[prefetch->uinst->opcode]++;
		if (prefetch->trace_cache)
			self->trace_cache->num_issued_uinst++;

		/* One more instruction issued, update quantum. */
		quantum--;
		
		/* MMU statistics */
		MMUAccessPage(cpu->mmu, prefetch->phy_addr, mmu_access_read);

		/* Trace */
		x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
			prefetch->id_in_core, core->id);
	}
	
	return quantum;
}
Example #7
0
static int X86ThreadIssueLQ(X86Thread *self, int quant)
{
	X86Core *core = self->core;
	X86Cpu *cpu = self->cpu;

	struct linked_list_t *lq = self->lq;
	struct x86_uop_t *load;
	struct mod_client_info_t *client_info;

	/* Process lq */
	linked_list_head(lq);
	while (!linked_list_is_end(lq) && quant)
	{
		/* Get element from load queue. If it is not ready, go to the next one */
		load = linked_list_get(lq);
		if (!load->ready && !X86ThreadIsUopReady(self, load))
		{
			linked_list_next(lq);
			continue;
		}
		load->ready = 1;

		/* Check that memory system is accessible */
		if (!mod_can_access(self->data_mod, load->phy_addr))
		{
			linked_list_next(lq);
			continue;
		}

		/* Remove from load queue */
		assert(load->uinst->opcode == x86_uinst_load);
		X86ThreadRemoveFromLQ(self);

		/* create and fill the mod_client_info_t object */
		client_info = mod_client_info_create(self->data_mod);
		client_info->prefetcher_eip = load->eip;

		/* Access memory system */
		mod_access(self->data_mod, mod_access_load,
			load->phy_addr, NULL, core->event_queue, load, client_info);

		/* The cache system will place the load at the head of the
		 * event queue when it is ready. For now, mark "in_event_queue" to
		 * prevent the uop from being freed. */
		load->in_event_queue = 1;
		load->issued = 1;
		load->issue_when = asTiming(cpu)->cycle;
		
		/* Statistics */
		core->num_issued_uinst_array[load->uinst->opcode]++;
		core->lsq_reads++;
		core->reg_file_int_reads += load->ph_int_idep_count;
		core->reg_file_fp_reads += load->ph_fp_idep_count;
		self->num_issued_uinst_array[load->uinst->opcode]++;
		self->lsq_reads++;
		self->reg_file_int_reads += load->ph_int_idep_count;
		self->reg_file_fp_reads += load->ph_fp_idep_count;
		cpu->num_issued_uinst_array[load->uinst->opcode]++;
		if (load->trace_cache)
			self->trace_cache->num_issued_uinst++;

		/* One more instruction issued, update quantum. */
		quant--;
		
		/* MMU statistics */
		MMUAccessPage(cpu->mmu, load->phy_addr, mmu_access_read);

		/* Trace */
		x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
			load->id_in_core, core->id);
	}
	
	return quant;
}
Example #8
0
/*
 * ACC call #2 - accArith
 *
 * accArith - Arithmatic calculation for Whetstone Benchmark
 *
 * @return
 *	The function always returns 0 if running properly;
 *	ruturns -1 if illegal input value is detected
 */
static int x86_acc_func_accArith (struct x86_ctx_t *ctx)
{
	int core = 0; 
	int thread = 0;

	struct x86_regs_t *regs = ctx->regs;
	struct mem_t *mem = ctx->mem;


	unsigned int args_ptr;
	double func_args[4];
	/* Read arguments */
	args_ptr = regs->ecx;
	printf ("args_ptr = %u(0x%x)\n\n",args_ptr,args_ptr);
	func_args[0] = 1.0;
	func_args[1] = -1.0;
	func_args[2] = -1.0;
	func_args[3] = -1.0;

	/* Get function info */
	//mem_read(mem, args_ptr, sizeof(double), func_args);

/*
	mem_read(mem, args_ptr+4, 8, func_args[1] );
	mem_read(mem, args_ptr+8, 8, func_args[2] );
	mem_read(mem, args_ptr+12, 8, func_args[3] );
	mem_read(mem, args_ptr+16, 8, func_args[4] );
*/
	//func_args[0] = &args_ptr;
	//func_args[1] = &args_ptr+8;
	//func_args[2] = &args_ptr+16;
	//func_args[3] = &args_ptr+24;
	//func_args[4] = &args_ptr+60;
	printf("*******************************\n");
	printf("In Emulation\n");
	//printf("\t\tfunc_args = %u (0x%x)\n", func_args, func_args);
	printf ("Cycle when getting into this call is %lld\n\n", x86_cpu->cycle); 

	/***********************************************/


	struct linked_list_t *sq = X86_THREAD.sq;
	struct linked_list_t *lq = X86_THREAD.lq;
	struct x86_uop_t *store;
	struct x86_uop_t *load;
	int quant = x86_cpu_issue_width;
	

	linked_list_head(sq);
	while (!linked_list_is_end(sq)&& quant )
	{
		store = linked_list_get(sq);
		printf ("physical addr @ store: %d\n",store->phy_addr);
		//assert(store->uinst->opcode == x86_uinst_store);
		if (!store->ready && !x86_reg_file_ready(store))
		{
			linked_list_next(sq);
			continue;
		}


		store->ready = 1;
		//printf ("physical add: %d\n",load->phy_addr);
		if (!mod_can_access(X86_THREAD.data_mod, store->phy_addr))
		{
			//printf("Debug Point 5\n");
			linked_list_next(sq);
			continue;
		}
		
		int i = 9000;
		while (i--)
		{
			//printf("Debug Point 6\n");
			mod_access(X86_THREAD.data_mod, mod_access_store,
				store->phy_addr, NULL, X86_CORE.event_queue, store);
		}
		quant--;
		
		// MMU statistics
		if (*mmu_report_file_name)
			mmu_access_page(store->phy_addr, mmu_access_write);

	}

	quant = x86_cpu_issue_width;
	
	linked_list_head(lq);
	while (!linked_list_is_end(lq)&& quant )
	{
		load = linked_list_get(lq);
		printf ("physical add @ load: %d\n",load->phy_addr);
		//assert(store->uinst->opcode == x86_uinst_store);
		load->ready = 1;
		if (!load->ready && !x86_reg_file_ready(load))
		{
			printf("load debug point 1\n");
			linked_list_next(sq);
			continue;
		}


		load->ready = 1;
		//printf ("physical add: %d\n",load->phy_addr);
		if (!mod_can_access(X86_THREAD.data_mod, store->phy_addr))
		{
			printf("load debug point 2\n");
			linked_list_next(lq);
			continue;
		}
		
		int j = 9000;
		while (j--)
		{
			//printf("load debug point 3\n");
			mod_access(X86_THREAD.data_mod, mod_access_load,
				load->phy_addr, NULL, X86_CORE.event_queue, load);
		}
		quant--;

		//printf("load debug point 4: quant = %d\n", quant);

		// MMU statistics
		if (*mmu_report_file_name)
			mmu_access_page(load->phy_addr, mmu_access_read);

		// Trace
		x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
			load->id_in_core, load->core);
	}



	/***********************************************/
	
/*
	printf("\t\tfunc_args[0] = %u (0x%x)\n", func_args[0], func_args[0]);
	printf("\t\tfunc_args[1] = %u (0x%x)\n", func_args[1], func_args[1]);
	printf("\t\tfunc_args[2] = %u (0x%x)\n", func_args[2], func_args[2]);
	printf("\t\tfunc_args[3] = %u (0x%x)\n", func_args[3], func_args[3]);
	printf("\t\tfunc_args[4] = %u (0x%x)\n", func_args[4], func_args[4]);

	printf("get here 1\n");
*/
/*
	printf("Value:\n\t\tfunc_args[0] = %f (0x%x)\n", *func_args[0], *func_args[0]);
	printf("\t\tfunc_args[1] = %f (0x%x)\n", *func_args[1], *func_args[1]);
	printf("\t\tfunc_args[2] = %f (0x%x)\n", *func_args[2], *func_args[2]);
	printf("\t\tfunc_args[3] = %f (0x%x)\n", *func_args[3], *func_args[3]);
	printf("\t\tfunc_args[4] = %f (0x%x)\n", *func_args[4], *func_args[4]);
*/
/*
	double *A0 = func_args[0];
	double *A1 = func_args[1];
	double *A2 = func_args[2];
	double *A3 = func_args[3];
	double *A4 = func_args[4];
*/
	//double N = *A0;

	//printf("get here\n");
/*
	printf("\t\tN  = %f (0x%x)\n", *A0,*A0);
	printf("\t\tA1 = %f (0x%x)\n", *A1,*A1);
	printf("\t\tA2 = %f (0x%x)\n", *A2,*A2);
	printf("\t\tA3 = %f (0x%x)\n", *A3,*A3);
	printf("\t\tA4 = %f (0x%x)\n", *A4,*A4);
*/

	double T = 0.499975;

	printf ("func_args1 = %f, func_args2 = %f, func_args3 = %f, func_args4 = %f\n",func_args[0],func_args[1],func_args[2],func_args[3]);

/*
	func_args[0] = (func_args[0] + func_args[1] + func_args[2] - func_args[3])*T;
	func_args[1] = (func_args[0] + func_args[1] - func_args[2] + func_args[3])*T;
	func_args[2] = (func_args[0] - func_args[1] + func_args[2] - func_args[3])*T;
	func_args[3] = (-func_args[0] + func_args[1] + func_args[2] + func_args[3])*T;
*/
	return 0;
}
Example #9
0
static int x86_acc_func_accDTW (struct x86_ctx_t *ctx)
{
	struct x86_regs_t *regs = ctx->regs;
	struct mem_t *mem = ctx->mem;
	int core; 
	int thread;
	
	unsigned int args_ptr;
	//int x;
	struct arglist func_args;
	/* Read arguments */
	args_ptr = regs->ecx;


	/* Get function info */
	mem_read(mem, args_ptr, sizeof(arglist), &func_args);
	printf("\t\t**sample1 = %p (%p)\n", func_args.sample1, &(func_args.sample1[0][0]));
	printf("\t\tlength1      = %u (%p)\n", func_args.length1, &func_args.length1);
	printf("\t\t**sample2 = %p (%p)\n", func_args.sample2, &(func_args.sample2[0][0]));
	printf("\t\tlength2      = %u (%p)\n", func_args.length2, &func_args.length2);
	printf("\t\ti               = %u (%p)\n", func_args.i, &func_args.i);
	printf("\t\tj               = %u (%p)\n", func_args.j, &func_args.j);
	printf("\t\t*table       = %p (%p)\n", func_args.table, &(func_args.table[0]));

	/***********************************************/

#define L2ONLY
#define WITHACC


#ifdef L2ONLY
	printf ("Cache Behavior Simulation\n");
	char * mod_name = "mod-l2-0";
	X86_THREAD.data_mod = mem_system_get_mod (mod_name);
#endif

#ifdef WITHACC
	struct linked_list_t *sq = X86_THREAD.sq;
	struct linked_list_t *lq = X86_THREAD.lq;
	struct x86_uop_t *store;
	struct x86_uop_t *load;
	int quant = x86_cpu_issue_width;
	unsigned int count1, count2;

	for (count1 = 0; count1 < 124; count1 ++)
	{
		for(count2 = 0; count2 < 124; count2++)
		{
			linked_list_head(sq);
			while (!linked_list_is_end(sq)&& quant )
			{
				store = linked_list_get(sq);
				printf("\n\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n");
				printf("physical addr @ store: %d\n",store->phy_addr);
				assert(store->uinst->opcode == x86_uinst_store);
				if (!store->ready && !x86_reg_file_ready(store))
				{
					linked_list_next(sq);
					continue;
				}


				store->ready = 1;
				printf ("data module kind: %d\n",X86_THREAD.data_mod->kind);
				printf ("data module level: %d\n",X86_THREAD.data_mod->level);
				printf ("data module name: %s\n",X86_THREAD.data_mod->name);
				printf ("data module cache name: %s\n",X86_THREAD.data_mod->cache->name);
				if (!mod_can_access(X86_THREAD.data_mod, store->phy_addr))
				{
					linked_list_next(sq);
					continue;
				}
		
				int i = 3;
				while (i--)
				{
					printf("Store Debug Point 6\n");
					mod_access(X86_THREAD.data_mod, mod_access_store,
						store->phy_addr, NULL, X86_CORE.event_queue, store);
				}
				quant--;
		
				// MMU statistics
				if (*mmu_report_file_name)
					mmu_access_page(store->phy_addr, mmu_access_write);

			}

			quant = x86_cpu_issue_width;

			//printf("Load Simulation ... \n");	
			linked_list_head(lq);
			while (!linked_list_is_end(lq)&& quant )
			{
				load = linked_list_get(lq);
				printf ("physical add @ load: %d\n",load->phy_addr);
				assert(store->uinst->opcode == x86_uinst_store);
				load->ready = 1;
				if (!load->ready && !x86_reg_file_ready(load))
				{
					printf("load debug point 1\n");
					linked_list_next(sq);
					continue;
				}


				load->ready = 1;
				//printf ("physical add: %d\n",load->phy_addr);
				if (!mod_can_access(X86_THREAD.data_mod, store->phy_addr))
				{
					printf("load debug point 2\n");
					linked_list_next(lq);
					continue;
				}
		
				int j = 1;
				while (j--)
				{
					printf("load debug point 3\n");
					mod_access(X86_THREAD.data_mod, mod_access_load,
						load->phy_addr, NULL, X86_CORE.event_queue, load);
				}
				quant--;

				//printf("load debug point 4: quant = %d\n", quant);

				// MMU statistics
				if (*mmu_report_file_name)
					mmu_access_page(load->phy_addr, mmu_access_read);

				// Trace
				x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
					load->id_in_core, load->core);
			}
		}
	}
#endif

#ifdef L2ONLY
	mod_name = "mod-dl1-0";
	X86_THREAD.data_mod = mem_system_get_mod (mod_name);
#endif

	/***********************************************/
	int ret = DTWdistance(func_args.sample1, func_args.length1, func_args.sample2, 
		func_args.length2, func_args.i, func_args.j, func_args.table);
	return ret;

}
Example #10
0
static int x86_cpu_issue_lq(int core, int thread, int quant)
{
	struct linked_list_t *lq = X86_THREAD.lq;
	struct x86_uop_t *load;

	/* Process lq */
	linked_list_head(lq);
	while (!linked_list_is_end(lq) && quant)
	{
		/* Get element from load queue. If it is not ready, go to the next one */
		load = linked_list_get(lq);
		if (!load->ready && !x86_reg_file_ready(load))
		{
			linked_list_next(lq);
			continue;
		}
		load->ready = 1;

		/* Check that memory system is accessible */
		if (!mod_can_access(X86_THREAD.data_mod, load->phy_addr))
		{
			linked_list_next(lq);
			continue;
		}

		/* Remove from load queue */
		assert(load->uinst->opcode == x86_uinst_load);
		x86_lq_remove(core, thread);

		/* Access memory system */
		mod_access(X86_THREAD.data_mod, mod_access_load,
			load->phy_addr, NULL, X86_CORE.event_queue, load);

		/* The cache system will place the load at the head of the
		 * event queue when it is ready. For now, mark "in_event_queue" to
		 * prevent the uop from being freed. */
		load->in_event_queue = 1;
		load->issued = 1;
		load->issue_when = x86_cpu->cycle;
		
		/* Instruction issued */
		X86_CORE.issued[load->uinst->opcode]++;
		X86_CORE.lsq_reads++;
		X86_CORE.reg_file_int_reads += load->ph_int_idep_count;
		X86_CORE.reg_file_fp_reads += load->ph_fp_idep_count;
		X86_THREAD.issued[load->uinst->opcode]++;
		X86_THREAD.lsq_reads++;
		X86_THREAD.reg_file_int_reads += load->ph_int_idep_count;
		X86_THREAD.reg_file_fp_reads += load->ph_fp_idep_count;
		x86_cpu->issued[load->uinst->opcode]++;
		quant--;
		
		/* MMU statistics */
		if (*mmu_report_file_name)
			mmu_access_page(load->phy_addr, mmu_access_read);

		/* Trace */
		x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
			load->id_in_core, load->core);
	}
	
	return quant;
}