Пример #1
0
static int X86ThreadDispatch(X86Thread *self, int quantum) {
  X86Core *core = self->core;
  X86Cpu *cpu = self->cpu;

  struct x86_uop_t *uop;
  enum x86_dispatch_stall_t stall;

  while (quantum) {
    /* Check if we can decode */
    stall = X86ThreadCanDispatch(self);
    if (stall != x86_dispatch_stall_used) {
      core->dispatch_stall[stall] += quantum;
      break;
    }

    /* Get entry from uop queue */
    uop = list_remove_at(self->uop_queue, 0);
    assert(x86_uop_exists(uop));
    uop->in_uop_queue = 0;

    /* Rename */
    X86ThreadRenameUop(self, uop);

    /* Insert in ROB */
    X86CoreEnqueueInROB(core, uop);
    core->rob_writes++;
    self->rob_writes++;

    /* Non memory instruction into IQ */
    if (!(uop->flags & X86_UINST_MEM)) {
      X86ThreadInsertInIQ(self, uop);
      core->iq_writes++;
      self->iq_writes++;
    }

    /* Memory instructions into the LSQ */
    if (uop->flags & X86_UINST_MEM) {
      X86ThreadInsertInLSQ(self, uop);
      core->lsq_writes++;
      self->lsq_writes++;
    }

    /* Statistics */
    core->dispatch_stall[uop->specmode ? x86_dispatch_stall_spec
                                       : x86_dispatch_stall_used]++;
    self->num_dispatched_uinst_array[uop->uinst->opcode]++;
    core->num_dispatched_uinst_array[uop->uinst->opcode]++;
    cpu->num_dispatched_uinst_array[uop->uinst->opcode]++;
    if (uop->trace_cache) self->trace_cache->num_dispatched_uinst++;

    /* Another instruction dispatched, update quantum. */
    quantum--;

    /* Trace */
    x86_trace("x86.inst id=%lld core=%d stg=\"di\"\n", uop->id_in_core,
              core->id);
  }

  return quantum;
}
Пример #2
0
static void x86_cpu_decode_thread(int core, int thread)
{
    struct list_t *fetchq = X86_THREAD.fetch_queue;
    struct list_t *uopq = X86_THREAD.uop_queue;
    struct x86_uop_t *uop;
    int i;

    for (i = 0; i < x86_cpu_decode_width; i++)
    {
        /* Empty fetch queue, full uop_queue */
        if (!list_count(fetchq))
            break;
        if (list_count(uopq) >= x86_uop_queue_size)
            break;
        uop = list_get(fetchq, 0);
        assert(x86_uop_exists(uop));

        /* If instructions come from the trace cache, i.e., are located in
         * the trace cache queue, copy all of them
         * into the uop queue in one single decode slot. */
        if (uop->trace_cache)
        {
            do {
                x86_fetch_queue_remove(core, thread, 0);
                list_add(uopq, uop);
                uop->in_uop_queue = 1;
                uop = list_get(fetchq, 0);
            } while (uop && uop->trace_cache);
            break;
        }

        /* Decode one macro-instruction coming from a block in the instruction
         * cache. If the cache access finished, extract it from the fetch queue. */
        assert(!uop->mop_index);
        if (!mod_in_flight_access(X86_THREAD.inst_mod, uop->fetch_access, uop->fetch_address))
        {
            do {
                /* Move from fetch queue to uop queue */
                x86_fetch_queue_remove(core, thread, 0);
                list_add(uopq, uop);
                uop->in_uop_queue = 1;

                /* Trace */
                x86_trace("x86.inst id=%lld core=%d stg=\"dec\"\n",
                          uop->id_in_core, uop->core);

                /* Next */
                uop = list_get(fetchq, 0);

            } while (uop && uop->mop_index);
        }
    }
}
Пример #3
0
/* Remove the uop in the current position of the linked list representing
 * the load queue of the specified thread. */
void X86ThreadRemoveFromLQ(X86Thread *self) {
  X86Core *core = self->core;

  struct linked_list_t *lq = self->lq;
  struct x86_uop_t *uop;

  uop = linked_list_get(lq);
  assert(x86_uop_exists(uop));
  linked_list_remove(lq);
  uop->in_lq = 0;

  assert(core->lsq_count && self->lsq_count);
  core->lsq_count--;
  self->lsq_count--;
}
Пример #4
0
struct x86_uop_t *X86CoreExtractFromEventQueue(X86Core *self)
{
	struct linked_list_t *event_queue = self->event_queue;
	struct x86_uop_t *uop;

	if (!linked_list_count(event_queue))
		return NULL;

	linked_list_head(event_queue);
	uop = linked_list_get(event_queue);
	assert(x86_uop_exists(uop));
	assert(uop->in_event_queue);
	linked_list_remove(event_queue);
	uop->in_event_queue = 0;
	return uop;
}
Пример #5
0
/* Remove an uop in the current position of the prefetch queue */
void X86ThreadRemovePreQ(X86Thread *self) {
  X86Core *core = self->core;

  struct linked_list_t *preq = self->preq;
  struct x86_uop_t *uop;

  uop = linked_list_get(preq);
  assert(x86_uop_exists(uop));
  assert(uop->in_preq);
  linked_list_remove(preq);
  uop->in_preq = 0;

  assert(core->lsq_count && self->lsq_count);
  core->lsq_count--;
  self->lsq_count--;
}
Пример #6
0
static int X86ThreadIssueIQ(X86Thread *self, int quant)
{
	X86Cpu *cpu = self->cpu;
	X86Core *core = self->core;

	struct linked_list_t *iq = self->iq;
	struct x86_uop_t *uop;
	int lat;

	/* Find instruction to issue */
	linked_list_head(iq);
	while (!linked_list_is_end(iq) && quant)
	{
		/* Get element from IQ */
		uop = linked_list_get(iq);
		assert(x86_uop_exists(uop));
		assert(!(uop->flags & X86_UINST_MEM));
		if (!uop->ready && !X86ThreadIsUopReady(self, uop))
		{
			linked_list_next(iq);
			continue;
		}
		uop->ready = 1;  /* avoid next call to 'X86ThreadIsUopReady' */
		
		/* Run the instruction in its corresponding functional unit.
		 * If the instruction does not require a functional unit, 'X86CoreReserveFunctionalUnit'
		 * returns 1 cycle latency. If there is no functional unit available,
		 * 'X86CoreReserveFunctionalUnit' returns 0. */
		lat = X86CoreReserveFunctionalUnit(core, uop);
		if (!lat)
		{
			linked_list_next(iq);
			continue;
		}
		
		/* Instruction was issued to the corresponding fu.
		 * Remove it from IQ */
		X86ThreadRemoveFromIQ(self);
		
		/* Schedule inst in Event Queue */
		assert(!uop->in_event_queue);
		assert(lat > 0);
		uop->issued = 1;
		uop->issue_when = asTiming(cpu)->cycle;
		uop->when = asTiming(cpu)->cycle + lat;
		X86CoreInsertInEventQueue(core, uop);
		
		/* Statistics */
		core->num_issued_uinst_array[uop->uinst->opcode]++;
		core->iq_reads++;
		core->reg_file_int_reads += uop->ph_int_idep_count;
		core->reg_file_fp_reads += uop->ph_fp_idep_count;
		self->num_issued_uinst_array[uop->uinst->opcode]++;
		self->iq_reads++;
		self->reg_file_int_reads += uop->ph_int_idep_count;
		self->reg_file_fp_reads += uop->ph_fp_idep_count;
		cpu->num_issued_uinst_array[uop->uinst->opcode]++;
		if (uop->trace_cache)
			self->trace_cache->num_issued_uinst++;

		/* One more instruction issued, update quantum. */
		quant--;

		/* Trace */
		x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
			uop->id_in_core, core->id);
	}
	
	return quant;
}
Пример #7
0
static int x86_cpu_issue_iq(int core, int thread, int quant)
{
	struct linked_list_t *iq = X86_THREAD.iq;
	struct x86_uop_t *uop;
	int lat;

	/* Find instruction to issue */
	linked_list_head(iq);
	while (!linked_list_is_end(iq) && quant)
	{
		/* Get element from IQ */
		uop = linked_list_get(iq);
		assert(x86_uop_exists(uop));
		assert(!(uop->flags & X86_UINST_MEM));
		if (!uop->ready && !x86_reg_file_ready(uop))
		{
			linked_list_next(iq);
			continue;
		}
		uop->ready = 1;  /* avoid next call to 'x86_reg_file_ready' */
		
		/* Run the instruction in its corresponding functional unit.
		 * If the instruction does not require a functional unit, 'x86_fu_reserve'
		 * returns 1 cycle latency. If there is no functional unit available,
		 * 'x86_fu_reserve' returns 0. */
		lat = x86_fu_reserve(uop);
		if (!lat)
		{
			linked_list_next(iq);
			continue;
		}
		
		/* Instruction was issued to the corresponding fu.
		 * Remove it from IQ */
		x86_iq_remove(core, thread);
		
		/* Schedule inst in Event Queue */
		assert(!uop->in_event_queue);
		assert(lat > 0);
		uop->issued = 1;
		uop->issue_when = x86_cpu->cycle;
		uop->when = x86_cpu->cycle + lat;
		x86_event_queue_insert(X86_CORE.event_queue, uop);
		
		/* Instruction issued */
		X86_CORE.issued[uop->uinst->opcode]++;
		X86_CORE.iq_reads++;
		X86_CORE.reg_file_int_reads += uop->ph_int_idep_count;
		X86_CORE.reg_file_fp_reads += uop->ph_fp_idep_count;
		X86_THREAD.issued[uop->uinst->opcode]++;
		X86_THREAD.iq_reads++;
		X86_THREAD.reg_file_int_reads += uop->ph_int_idep_count;
		X86_THREAD.reg_file_fp_reads += uop->ph_fp_idep_count;
		x86_cpu->issued[uop->uinst->opcode]++;
		quant--;

		/* Trace */
		x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
			uop->id_in_core, uop->core);
	}
	
	return quant;
}
Пример #8
0
static int x86_cpu_dispatch_thread(int core, int thread, int quant)
{
	struct x86_uop_t *uop;
	enum x86_dispatch_stall_t stall;

	while (quant)
	{
		/* Check if we can decode */
		stall = x86_cpu_can_dispatch_thread(core, thread);
		if (stall != x86_dispatch_stall_used)
		{
			X86_CORE.dispatch_stall[stall] += quant;
			break;
		}
	
		/* Get entry from uop queue */
		uop = list_remove_at(X86_THREAD.uop_queue, 0);
		assert(x86_uop_exists(uop));
		uop->in_uop_queue = 0;
		
		/* Rename */
		x86_reg_file_rename(uop);
		
		/* Insert in ROB */
		x86_rob_enqueue(uop);
		X86_CORE.rob_writes++;
		X86_THREAD.rob_writes++;
		
		/* Non memory instruction into IQ */
		if (!(uop->flags & X86_UINST_MEM))
		{
			x86_iq_insert(uop);
			X86_CORE.iq_writes++;
			X86_THREAD.iq_writes++;
		}
		
		/* Memory instructions into the LSQ */
		if (uop->flags & X86_UINST_MEM)
		{
			x86_lsq_insert(uop);
			X86_CORE.lsq_writes++;
			X86_THREAD.lsq_writes++;
		}
		
		/* Statistics */
		X86_CORE.dispatch_stall[uop->specmode ? x86_dispatch_stall_spec : x86_dispatch_stall_used]++;
		X86_THREAD.num_dispatched_uinst_array[uop->uinst->opcode]++;
		X86_CORE.num_dispatched_uinst_array[uop->uinst->opcode]++;
		x86_cpu->num_dispatched_uinst_array[uop->uinst->opcode]++;
		if (uop->trace_cache)
			X86_THREAD.trace_cache->num_dispatched_uinst++;
		
		/* Another instruction dispatched, update quantum. */
		quant--;

		/* Trace */
		x86_trace("x86.inst id=%lld core=%d stg=\"di\"\n",
			uop->id_in_core, uop->core);

	}

	return quant;
}