예제 #1
0
파일: cpu.c 프로젝트: xianggong/multi2sim42
void X86CpuAddToTraceList(X86Cpu *self, struct x86_uop_t *uop) {
  assert(x86_tracing());
  assert(!uop->in_uop_trace_list);

  uop->in_uop_trace_list = 1;
  linked_list_add(self->uop_trace_list, uop);
}
예제 #2
0
파일: uop-queue.c 프로젝트: ajithcj/miaow
void x86_uop_queue_recover(int core, int thread)
{
	struct list_t *uop_queue = X86_THREAD.uop_queue;
	struct x86_uop_t *uop;

	while (list_count(uop_queue))
	{
		uop = list_get(uop_queue, list_count(uop_queue) - 1);
		assert(uop->thread == thread);
		if (!uop->specmode)
			break;
		list_remove_at(uop_queue, list_count(uop_queue) - 1);
		uop->in_uop_queue = 0;

		/* Trace */
		if (x86_tracing())
		{
			x86_trace("x86.inst id=%lld core=%d stg=\"sq\"\n",
				uop->id_in_core, uop->core);
			x86_cpu_uop_trace_list_add(uop);
		}

		/* Free */
		x86_uop_free_if_not_queued(uop);
	}
}
예제 #3
0
파일: fetch-queue.c 프로젝트: ajithcj/miaow
void X86ThreadRecoverFetchQueue(X86Thread *self)
{
    X86Core *core = self->core;
    X86Cpu *cpu = self->cpu;

    struct list_t *fetchq = self->fetch_queue;
    struct x86_uop_t *uop;

    while (list_count(fetchq))
    {
        uop = list_get(fetchq, list_count(fetchq) - 1);
        assert(uop->thread == self);
        if (!uop->specmode)
            break;
        uop = X86ThreadRemoveFromFetchQueue(self, list_count(fetchq) - 1);

        /* Trace */
        if (x86_tracing())
        {
            x86_trace("x86.inst id=%lld core=%d stg=\"sq\"\n",
                      uop->id_in_core, core->id);
            X86CpuAddToTraceList(cpu, uop);
        }

        /* Free */
        x86_uop_free_if_not_queued(uop);
    }
}
예제 #4
0
void x86_cpu_recover(int core, int thread)
{
	struct x86_uop_t *uop;

	/* Remove instructions of this thread in fetch_queue, uop_queue, iq, sq, lq and event_queue. */
	x86_fetch_queue_recover(core, thread);
	x86_uop_queue_recover(core, thread);
	x86_iq_recover(core, thread);
	x86_lsq_recover(core, thread);
	x86_event_queue_recover(core, thread);

	/* Remove instructions from ROB, restoring the state of the
	 * physical register file. */
	for (;;)
	{
		/* Get instruction */
		uop = x86_rob_tail(core, thread);
		if (!uop)
			break;

		/* If we already removed all speculative instructions,
		 * the work is finished */
		assert(uop->core == core);
		assert(uop->thread == thread);
		if (!uop->specmode)
			break;
		
		/* Statistics */
		if (uop->fetch_trace_cache)
			X86_THREAD.trace_cache->squashed++;
		X86_THREAD.squashed++;
		X86_CORE.squashed++;
		x86_cpu->squashed++;
		
		/* Undo map */
		if (!uop->completed)
			x86_reg_file_write(uop);
		x86_reg_file_undo(uop);

		/* Trace */
		if (x86_tracing())
		{
			x86_trace("x86.inst id=%lld core=%d stg=\"sq\"\n",
				uop->id_in_core, uop->core);
			x86_cpu_uop_trace_list_add(uop);
		}

		/* Remove entry in ROB */
		x86_rob_remove_tail(core, thread);
	}

	/* If we actually fetched wrong instructions, recover kernel */
	if (x86_ctx_get_status(X86_THREAD.ctx, x86_ctx_spec_mode))
		x86_ctx_recover(X86_THREAD.ctx);
	
	/* Stall fetch and set eip to fetch. */
	X86_THREAD.fetch_stall_until = MAX(X86_THREAD.fetch_stall_until, x86_cpu->cycle + x86_cpu_recover_penalty - 1);
	X86_THREAD.fetch_neip = X86_THREAD.ctx->regs->eip;
}
예제 #5
0
void x86_cpu_uop_trace_list_add(struct x86_uop_t *uop)
{
	assert(x86_tracing());
	assert(!uop->in_uop_trace_list);

	uop->in_uop_trace_list = 1;
	linked_list_add(x86_cpu->uop_trace_list, uop);
}
예제 #6
0
/* Run the emulation of one x86 macro-instruction and create its uops.
 * If any of the uops is a control uop, this uop will be the return value of
 * the function. Otherwise, the first decoded uop is returned. */
static struct x86_uop_t *X86ThreadFetchInst(X86Thread *self,
		int fetch_trace_cache) {
	X86Cpu *cpu = self->cpu;
	X86Core *core = self->core;
	X86Context *ctx = self->ctx;

	struct x86_uop_t *uop;
	struct x86_uop_t *ret_uop;

	struct x86_uinst_t *uinst;
	int uinst_count;
	int uinst_index;

	/* Functional simulation */
	self->fetch_eip = self->fetch_neip;
	X86ContextSetEip(ctx, self->fetch_eip);
	X86ContextExecute(ctx);
	self->fetch_neip = self->fetch_eip + ctx->inst.size;

	/* If no micro-instruction was generated by this instruction, create a
	 * 'nop' micro-instruction. This makes sure that there is always a micro-
	 * instruction representing the regular control flow of macro-instructions
	 * of the program. It is important for the traces stored in the trace
	 * cache. */
	if (!x86_uinst_list->count)
		x86_uinst_new(ctx, x86_uinst_nop, 0, 0, 0, 0, 0, 0, 0);

	/* Micro-instructions created by the x86 instructions can be found now
	 * in 'x86_uinst_list'. */
	uinst_count = list_count(x86_uinst_list);
	uinst_index = 0;
	ret_uop = NULL;
	while (list_count(x86_uinst_list)) {
		/* Get uinst from head of list */
		uinst = list_remove_at(x86_uinst_list, 0);

		/* Create uop */
		uop = x86_uop_create();
		uop->uinst = uinst;
		assert(uinst->opcode >= 0 && uinst->opcode < x86_uinst_opcode_count);
		uop->flags = x86_uinst_info[uinst->opcode].flags;
		uop->id = cpu->uop_id_counter++;
		uop->id_in_core = core->uop_id_counter++;

		uop->ctx = ctx;
		uop->thread = self;

		uop->mop_count = uinst_count;
		uop->mop_size = ctx->inst.size;
		uop->mop_id = uop->id - uinst_index;
		uop->mop_index = uinst_index;

		uop->eip = self->fetch_eip;
		uop->in_fetch_queue = 1;
		uop->trace_cache = fetch_trace_cache;
		uop->specmode = X86ContextGetState(ctx, X86ContextSpecMode);
		uop->fetch_address = self->fetch_address;
		uop->fetch_access = self->fetch_access;
		uop->neip = ctx->regs->eip;
		uop->pred_neip = self->fetch_neip;
		uop->target_neip = ctx->target_eip;

		/* Process uop dependences and classify them in integer, floating-point,
		 * flags, etc. */
		x86_uop_count_deps(uop);

		/* Calculate physical address of a memory access */
		if (uop->flags & X86_UINST_MEM) {
			if (uinst->address == ctx->mem_mod_low && ctx->mem_mod_low!=0)
            	{ //fprintf(stderr, "%x, low\n", uinst->address);
                 ctx->mem_low = uop->data-(uop->data & (self->data_mod->block_size-1));
            	 uop->addr = uinst->address; 
            	}
            else if (uinst->address == ctx->mem_mod_high && ctx->mem_mod_high!=0 )
                 { //fprintf(stderr, "%x, high\n", uinst->address);
                 	ctx->mem_high = uop->data | (self->data_mod->block_size-1);
                   uop->addr = uinst->address;
                    }	

			else if (!FPGARegCheck(ctx, uop, uinst->address)) {

				if (self->standalone) {
					uop->phy_addr = uinst->address;
					uop->addr = uinst->address;
					mem_read_copy(ctx->mem, uop->addr, 4, &(uop->data));
				} else {
					uop->phy_addr = mmu_translate(
							self->ctx->address_space_index, uinst->address);
					uop->addr = uinst->address;
					mem_read_copy(ctx->mem, uop->addr, 4, &(uop->data));
				}
			}
		}

		/* Trace */
		if (x86_tracing()) {
			char str[MAX_STRING_SIZE];
			char inst_name[MAX_STRING_SIZE];
			char uinst_name[MAX_STRING_SIZE];

			char *str_ptr;

			int str_size;

			str_ptr = str;
			str_size = sizeof str;

			/* Command */
			str_printf(&str_ptr, &str_size, "x86.new_inst id=%lld core=%d",
					uop->id_in_core, core->id);

			/* Speculative mode */
			if (uop->specmode)
				str_printf(&str_ptr, &str_size, " spec=\"t\"");

			/* Macro-instruction name */
			if (!uinst_index) {
				x86_inst_dump_buf(&ctx->inst, inst_name, sizeof inst_name);
				str_printf(&str_ptr, &str_size, " asm=\"%s\"", inst_name);
			}

			/* Rest */
			x86_uinst_dump_buf(uinst, uinst_name, sizeof uinst_name);
			str_printf(&str_ptr, &str_size, " uasm=\"%s\" stg=\"fe\"",
					uinst_name);

			/* Dump */
			x86_trace("%s\n", str);
		}

		/* Select as returned uop */
		if (!ret_uop || (uop->flags & X86_UINST_CTRL))
			ret_uop = uop;

		/* Insert into fetch queue */

		list_add(self->fetch_queue, uop);
		if (fetch_trace_cache)
			self->trace_cache_queue_occ++;

		/* Statistics */
		cpu->num_fetched_uinst++;
		self->num_fetched_uinst++;
		if (fetch_trace_cache)
			self->trace_cache->num_fetched_uinst++;

		/* Next uinst */
		uinst_index++;

	}

	/* Increase fetch queue occupancy if instruction does not come from
	 * trace cache, and return. */
	if (ret_uop && !fetch_trace_cache)
		self->fetchq_occ += ret_uop->mop_size;
	return ret_uop;
}
예제 #7
0
void X86ThreadRecover(X86Thread *self)
{
	X86Cpu *cpu = self->cpu;
	X86Core *core = self->core;
	struct x86_uop_t *uop;

	/* Remove instructions of this thread in fetch queue, uop queue,
	 * instruction queue, store queue, load queue, and event queue. */
	X86ThreadRecoverFetchQueue(self);
	X86ThreadRecoverUopQueue(self);
	X86ThreadRecoverIQ(self);
	X86ThreadRecoverLSQ(self);
	X86ThreadRecoverEventQueue(self);

	/* Remove instructions from ROB, restoring the state of the
	 * physical register file. */
	for (;;)
	{
		/* Get instruction */
		uop = X86ThreadGetROBTail(self);
		if (!uop)
			break;

		/* If we already removed all speculative instructions,
		 * the work is finished */
		assert(uop->thread == self);
		if (!uop->specmode)
			break;
		
		/* Statistics */
		if (uop->trace_cache)
			self->trace_cache->num_squashed_uinst++;
		self->num_squashed_uinst++;
		core->num_squashed_uinst++;
		cpu->num_squashed_uinst++;
		
		/* Undo map */
		if (!uop->completed)
			X86ThreadWriteUop(self, uop);
		X86ThreadUndoUop(self, uop);

		/* Trace */
		if (x86_tracing())
		{
			x86_trace("x86.inst id=%lld core=%d stg=\"sq\"\n",
				uop->id_in_core, core->id);
			x86_cpu_uop_trace_list_add(uop);
		}

		/* Remove entry in ROB */
		X86ThreadRemoveROBTail(self);
	}

	/* Check state of fetch stage and mapped context, if still any */
	if (self->ctx)
	{
		/* If we actually fetched wrong instructions, recover emulator */
		if (X86ContextGetState(self->ctx, X86ContextSpecMode))
			X86ContextRecover(self->ctx);
	
		/* Stall fetch and set eip to fetch. */
		self->fetch_stall_until = MAX(self->fetch_stall_until,
				asTiming(x86_cpu)->cycle + x86_cpu_recover_penalty - 1);
		self->fetch_neip = self->ctx->regs->eip;
	}
}