static int X86ThreadIssueSQ(X86Thread *self, int quantum) { X86Cpu *cpu = self->cpu; X86Core *core = self->core; struct x86_uop_t *store; struct linked_list_t *sq = self->sq; struct mod_client_info_t *client_info; /* Process SQ */ linked_list_head(sq); while (!linked_list_is_end(sq) && quantum) { /* Get store */ store = linked_list_get(sq); assert(store->uinst->opcode == x86_uinst_store); /* Only committed stores issue */ if (store->in_rob) break; /* Check that memory system entry is ready */ if (!mod_can_access(self->data_mod, store->phy_addr)) break; /* Remove store from store queue */ X86ThreadRemoveFromSQ(self); /* create and fill the mod_client_info_t object */ client_info = mod_client_info_create(self->data_mod); client_info->prefetcher_eip = store->eip; /* Issue store */ mod_access(self->data_mod, mod_access_store, store->phy_addr, NULL, core->event_queue, store, client_info); /* The cache system will place the store at the head of the * event queue when it is ready. For now, mark "in_event_queue" to * prevent the uop from being freed. */ store->in_event_queue = 1; store->issued = 1; store->issue_when = asTiming(cpu)->cycle; /* Statistics */ core->num_issued_uinst_array[store->uinst->opcode]++; core->lsq_reads++; core->reg_file_int_reads += store->ph_int_idep_count; core->reg_file_fp_reads += store->ph_fp_idep_count; self->num_issued_uinst_array[store->uinst->opcode]++; self->lsq_reads++; self->reg_file_int_reads += store->ph_int_idep_count; self->reg_file_fp_reads += store->ph_fp_idep_count; cpu->num_issued_uinst_array[store->uinst->opcode]++; if (store->trace_cache) self->trace_cache->num_issued_uinst++; /* One more instruction, update quantum. */ quantum--; /* MMU statistics */ if (*mmu_report_file_name) mmu_access_page(store->phy_addr, mmu_access_write); } return quantum; }
static int X86ThreadIssueLQ(X86Thread *self, int quant) { X86Core *core = self->core; X86Cpu *cpu = self->cpu; struct linked_list_t *lq = self->lq; struct x86_uop_t *load; struct mod_client_info_t *client_info; /* Process lq */ linked_list_head(lq); while (!linked_list_is_end(lq) && quant) { /* Get element from load queue. If it is not ready, go to the next one */ load = linked_list_get(lq); if (!load->ready && !X86ThreadIsUopReady(self, load)) { linked_list_next(lq); continue; } load->ready = 1; /* Check that memory system is accessible */ if (!mod_can_access(self->data_mod, load->phy_addr)) { linked_list_next(lq); continue; } /* Remove from load queue */ assert(load->uinst->opcode == x86_uinst_load); X86ThreadRemoveFromLQ(self); /* create and fill the mod_client_info_t object */ client_info = mod_client_info_create(self->data_mod); client_info->prefetcher_eip = load->eip; /* Access memory system */ mod_access(self->data_mod, mod_access_load, load->phy_addr, NULL, core->event_queue, load, client_info); /* The cache system will place the load at the head of the * event queue when it is ready. For now, mark "in_event_queue" to * prevent the uop from being freed. */ load->in_event_queue = 1; load->issued = 1; load->issue_when = asTiming(cpu)->cycle; /* Statistics */ core->num_issued_uinst_array[load->uinst->opcode]++; core->lsq_reads++; core->reg_file_int_reads += load->ph_int_idep_count; core->reg_file_fp_reads += load->ph_fp_idep_count; self->num_issued_uinst_array[load->uinst->opcode]++; self->lsq_reads++; self->reg_file_int_reads += load->ph_int_idep_count; self->reg_file_fp_reads += load->ph_fp_idep_count; cpu->num_issued_uinst_array[load->uinst->opcode]++; if (load->trace_cache) self->trace_cache->num_issued_uinst++; /* One more instruction issued, update quantum. */ quant--; /* MMU statistics */ MMUAccessPage(cpu->mmu, load->phy_addr, mmu_access_read); /* Trace */ x86_trace("x86.inst id=%lld core=%d stg=\"i\"\n", load->id_in_core, core->id); } return quant; }