/* Remove all speculative uops from a given load/store queue in the
 * given thread. */
void X86ThreadRecoverLSQ(X86Thread *self) {
  struct linked_list_t *lq = self->lq;
  struct linked_list_t *sq = self->sq;
  struct x86_uop_t *uop;

  /* Recover load queue */
  linked_list_head(lq);
  while (!linked_list_is_end(lq)) {
    uop = linked_list_get(lq);
    if (uop->specmode) {
      X86ThreadRemoveFromLQ(self);
      x86_uop_free_if_not_queued(uop);
      continue;
    }
    linked_list_next(lq);
  }

  /* Recover store queue */
  linked_list_head(sq);
  while (!linked_list_is_end(sq)) {
    uop = linked_list_get(sq);
    if (uop->specmode) {
      X86ThreadRemoveFromSQ(self);
      x86_uop_free_if_not_queued(uop);
      continue;
    }
    linked_list_next(sq);
  }
}
Beispiel #2
0
void x86_uop_queue_recover(int core, int thread)
{
	struct list_t *uop_queue = X86_THREAD.uop_queue;
	struct x86_uop_t *uop;

	while (list_count(uop_queue))
	{
		uop = list_get(uop_queue, list_count(uop_queue) - 1);
		assert(uop->thread == thread);
		if (!uop->specmode)
			break;
		list_remove_at(uop_queue, list_count(uop_queue) - 1);
		uop->in_uop_queue = 0;

		/* Trace */
		if (x86_tracing())
		{
			x86_trace("x86.inst id=%lld core=%d stg=\"sq\"\n",
				uop->id_in_core, uop->core);
			x86_cpu_uop_trace_list_add(uop);
		}

		/* Free */
		x86_uop_free_if_not_queued(uop);
	}
}
Beispiel #3
0
void X86ThreadRecoverFetchQueue(X86Thread *self)
{
    X86Core *core = self->core;
    X86Cpu *cpu = self->cpu;

    struct list_t *fetchq = self->fetch_queue;
    struct x86_uop_t *uop;

    while (list_count(fetchq))
    {
        uop = list_get(fetchq, list_count(fetchq) - 1);
        assert(uop->thread == self);
        if (!uop->specmode)
            break;
        uop = X86ThreadRemoveFromFetchQueue(self, list_count(fetchq) - 1);

        /* Trace */
        if (x86_tracing())
        {
            x86_trace("x86.inst id=%lld core=%d stg=\"sq\"\n",
                      uop->id_in_core, core->id);
            X86CpuAddToTraceList(cpu, uop);
        }

        /* Free */
        x86_uop_free_if_not_queued(uop);
    }
}
void X86CoreFreeEventQueue(X86Core *self)
{
	struct x86_uop_t *uop;

	while (linked_list_count(self->event_queue))
	{
		uop = X86CoreExtractFromEventQueue(self);
		x86_uop_free_if_not_queued(uop);
	}
	linked_list_free(self->event_queue);
}
Beispiel #5
0
void X86ThreadFreeFetchQueue(X86Thread *self)
{
    struct list_t *fetchq;
    struct x86_uop_t *uop;

    fetchq = self->fetch_queue;
    while (list_count(fetchq)) {
        uop = list_remove_at(fetchq, 0);
        uop->in_fetch_queue = 0;
        x86_uop_free_if_not_queued(uop);
    }
    list_free(fetchq);
}
void X86ThreadFreeLSQ(X86Thread *self) {
  struct linked_list_t *lq;
  struct linked_list_t *sq;
  struct linked_list_t *preq;
  struct x86_uop_t *uop;

  /* Load queue */
  lq = self->lq;
  linked_list_head(lq);
  while (linked_list_count(lq)) {
    uop = linked_list_get(lq);
    uop->in_lq = 0;
    linked_list_remove(lq);
    x86_uop_free_if_not_queued(uop);
  }
  linked_list_free(lq);

  /* Store queue */
  sq = self->sq;
  linked_list_head(sq);
  while (linked_list_count(sq)) {
    uop = linked_list_get(sq);
    uop->in_sq = 0;
    linked_list_remove(sq);
    x86_uop_free_if_not_queued(uop);
  }
  linked_list_free(sq);

  /* Prefetch queue */
  preq = self->preq;
  linked_list_head(preq);
  while (linked_list_count(preq)) {
    uop = linked_list_get(preq);
    uop->in_preq = 0;
    linked_list_remove(preq);
    x86_uop_free_if_not_queued(uop);
  }
  linked_list_free(preq);
}
Beispiel #7
0
void x86_uop_queue_done()
{
	int core, thread;
	struct list_t *uop_queue;
	struct x86_uop_t *uop;

	X86_CORE_FOR_EACH X86_THREAD_FOR_EACH
	{
		uop_queue = X86_THREAD.uop_queue;
		while (list_count(uop_queue))
		{
			uop = list_remove_at(uop_queue, 0);
			uop->in_uop_queue = 0;
			x86_uop_free_if_not_queued(uop);
		}
		list_free(uop_queue);
	}
}
void X86ThreadRecoverEventQueue(X86Thread *self)
{
	X86Core *core = self->core;

	struct linked_list_t *event_queue = core->event_queue;
	struct x86_uop_t *uop;

	linked_list_head(event_queue);
	while (!linked_list_is_end(event_queue))
	{
		uop = linked_list_get(event_queue);
		if (uop->thread == self && uop->specmode)
		{
			linked_list_remove(event_queue);
			uop->in_event_queue = 0;
			x86_uop_free_if_not_queued(uop);
			continue;
		}
		linked_list_next(event_queue);
	}
}
Beispiel #9
0
void x86_cpu_uop_trace_list_empty(void)
{
	struct linked_list_t *uop_trace_list;
	struct x86_uop_t *uop;

	uop_trace_list = x86_cpu->uop_trace_list;
	while (uop_trace_list->count)
	{
		/* Remove from list */
		linked_list_head(uop_trace_list);
		uop = linked_list_get(uop_trace_list);
		linked_list_remove(uop_trace_list);
		assert(uop->in_uop_trace_list);

		/* Trace */
		x86_trace("x86.end_inst id=%lld core=%d\n",
			uop->id_in_core, uop->core);

		/* Free uop */
		uop->in_uop_trace_list = 0;
		x86_uop_free_if_not_queued(uop);
	}
}
Beispiel #10
0
void X86CpuEmptyTraceList(X86Cpu *self) {
  X86Thread *thread;
  X86Core *core;
  struct linked_list_t *uop_trace_list;
  struct x86_uop_t *uop;

  uop_trace_list = self->uop_trace_list;
  while (uop_trace_list->count) {
    /* Remove from list */
    linked_list_head(uop_trace_list);
    uop = linked_list_get(uop_trace_list);
    thread = uop->thread;
    core = thread->core;
    linked_list_remove(uop_trace_list);
    assert(uop->in_uop_trace_list);

    /* Trace */
    x86_trace("x86.end_inst id=%lld core=%d\n", uop->id_in_core, core->id);

    /* Free uop */
    uop->in_uop_trace_list = 0;
    x86_uop_free_if_not_queued(uop);
  }
}
Beispiel #11
0
static int X86ThreadIssuePreQ(X86Thread *self, int quantum)
{
	X86Core *core = self->core;
	X86Cpu *cpu = self->cpu;

	struct linked_list_t *preq = self->preq;
	struct x86_uop_t *prefetch;

	/* Process preq */
	linked_list_head(preq);
	while (!linked_list_is_end(preq) && quantum)
	{
		/* Get element from prefetch queue. If it is not ready, go to the next one */
		prefetch = linked_list_get(preq);
		if (!prefetch->ready && !X86ThreadIsUopReady(self, prefetch))
		{
			linked_list_next(preq);
			continue;
		}

		/* 
		 * Make sure its not been prefetched recently. This is just to avoid unnecessary
		 * memory traffic. Even though the cache will realise a "hit" on redundant 
		 * prefetches, its still helpful to avoid going to the memory (cache). 
		 */
		if (prefetch_history_is_redundant(core->prefetch_history,
							   self->data_mod, prefetch->phy_addr))
		{
			/* remove from queue. do not prefetch. */
			assert(prefetch->uinst->opcode == x86_uinst_prefetch);
			X86ThreadRemovePreQ(self);
			prefetch->completed = 1;
			x86_uop_free_if_not_queued(prefetch);
			continue;
		}

		prefetch->ready = 1;

		/* Check that memory system is accessible */
		if (!mod_can_access(self->data_mod, prefetch->phy_addr))
		{
			linked_list_next(preq);
			continue;
		}

		/* Remove from prefetch queue */
		assert(prefetch->uinst->opcode == x86_uinst_prefetch);
		X86ThreadRemovePreQ(self);

		/* Access memory system */
		mod_access(self->data_mod, mod_access_prefetch,
			prefetch->phy_addr, NULL, core->event_queue, prefetch, NULL);

		/* Record prefetched address */
		prefetch_history_record(core->prefetch_history, prefetch->phy_addr);

		/* The cache system will place the prefetch at the head of the
		 * event queue when it is ready. For now, mark "in_event_queue" to
		 * prevent the uop from being freed. */
		prefetch->in_event_queue = 1;
		prefetch->issued = 1;
		prefetch->issue_when = asTiming(cpu)->cycle;
		
		/* Statistics */
		core->num_issued_uinst_array[prefetch->uinst->opcode]++;
		core->lsq_reads++;
		core->reg_file_int_reads += prefetch->ph_int_idep_count;
		core->reg_file_fp_reads += prefetch->ph_fp_idep_count;
		self->num_issued_uinst_array[prefetch->uinst->opcode]++;
		self->lsq_reads++;
		self->reg_file_int_reads += prefetch->ph_int_idep_count;
		self->reg_file_fp_reads += prefetch->ph_fp_idep_count;
		cpu->num_issued_uinst_array[prefetch->uinst->opcode]++;
		if (prefetch->trace_cache)
			self->trace_cache->num_issued_uinst++;

		/* One more instruction issued, update quantum. */
		quantum--;
		
		/* MMU statistics */
		MMUAccessPage(cpu->mmu, prefetch->phy_addr, mmu_access_read);

		/* Trace */
		x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n",
			prefetch->id_in_core, core->id);
	}
	
	return quantum;
}