/* In-order instruction commit. Individual uops cannot commit until it is guaranteed that the entire Mop's worth of uops will commit. */ void core_commit_atom_t::step(void) { struct core_knobs_t * knobs = core->knobs; int commit_count = 0; enum commit_stall_t stall_reason = CSTALL_NONE; int branches_committed = 0; /* This is just a deadlock watchdog. If something got messed up in the pipeline and no forward progress is being made, this code will eventually detect it and flush the pipeline in an attempt to un-wedge the processor. If the processor then deadlocks again without having first made any more forward progress, we give up and kill the simulator. */ if((core->sim_cycle - core->exec->last_completed) > deadlock_threshold) { if(core->exec->last_completed_count == core->stat.eio_commit_insn) { char buf[256]; fflush(stdout); snprintf(buf,sizeof(buf),"At cycle %lld, core[%d] has not completed a uop in %d cycles... definite deadlock",(long long)core->sim_cycle,core->current_thread->id,deadlock_threshold); #ifdef ZTRACE ztrace_print("DEADLOCK DETECTED: TERMINATING SIMULATION"); #endif zesto_fatal(buf,(void)0); } else { struct Mop_t * Mop = ROB[ROB_head]->Mop; #ifdef ZDEBUG fprintf(stdout,"\n[%d][Core%d]COMMIT deadlock ",core->sim_cycle,core->current_thread->id,core->sim_cycle); md_print_insn(Mop,stdout); fprintf(stdout," MOP=%d last_Mop_seq= %d \n",Mop->oracle.seq,core->exec->last_Mop_seq); #endif fflush(stdout); warn("At cycle %lld, core[%d] has not completed a uop in %d cycles... possible deadlock, flushing pipeline",(long long)core->sim_cycle,core->current_thread->id,deadlock_threshold); #ifdef ZTRACE ztrace_print("DEADLOCK DETECTED: FLUSHING PIPELINE"); #endif /* flush the entire pipeline, correct path or not... */ core->oracle->complete_flush(); /*core->commit->*/recover(); core->exec->recover(); core->alloc->recover(); core->decode->recover(); core->fetch->recover(core->current_thread->regs.regs_NPC); ZESTO_STAT(stat_add_sample(core->stat.commit_stall, (int)CSTALL_EMPTY);) ZESTO_STAT(core->stat.commit_deadlock_flushes++;) core->exec->last_completed = core->sim_cycle; /* so we don't do this again next cycle */ core->exec->last_completed_count = core->stat.eio_commit_insn; }
/* In-order instruction commit. Individual uops cannot commit until it is guaranteed that the entire Mop's worth of uops will commit. */ void core_commit_IO_DPM_t::IO_step(void) { struct core_knobs_t * knobs = core->knobs; int commit_count = 0; stall_reason = CSTALL_NONE; int branches_committed = 0; /* This is just a deadlock watchdog. If something got messed up in the pipeline and no forward progress is being made, this code will eventually detect it. A global watchdog will check if any core is making progress and accordingly if not.*/ if(core->current_thread->active && ((core->sim_cycle - core->exec->last_completed) > deadlock_threshold)) { deadlocked = true; #ifdef ZTRACE ztrace_print(core->id, "Possible deadlock detected."); #endif return; } /* deallocate at most one store from the (senior) STQ per cycle */ core->exec->STQ_deallocate_senior(); /* MAIN COMMIT LOOP */ for(commit_count=0;commit_count<knobs->commit.width;commit_count++) { if(ROB_num <= 0) /* nothing to commit */ { stall_reason = CSTALL_EMPTY; break; } struct Mop_t * Mop = ROB[ROB_head]->Mop; /* For branches, don't commit until the corresponding jeclear (if any) has been processed by the front-end. */ if(Mop->commit.jeclear_in_flight) { stall_reason = CSTALL_JECLEAR_INFLIGHT; break; } if(Mop->decode.is_ctrl && knobs->commit.branch_limit && (branches_committed >= knobs->commit.branch_limit)) { stall_reason = CSTALL_MAX_BRANCHES; break; } if(Mop->oracle.spec_mode) // zesto_fatal("oldest instruction in processor is on wrong-path",(void)0); zesto_assert(false, (void)0); /* Are all uops in the Mop completed? */ if(Mop->commit.complete_index != -1) /* still some outstanding insts */ { struct uop_t * uop = &Mop->uop[Mop->commit.complete_index]; while(uop->timing.when_completed <= core->sim_cycle || uop->decode.is_sta || uop->decode.is_std) { /* stores get added to the STQ at commit */ if(uop->decode.is_sta) { if(!core->exec->exec_fused_ST(uop)) { stall_reason = CSTALL_STQ; break; } } zesto_assert(uop->timing.when_completed <= core->sim_cycle, (void)0); Mop->commit.complete_index += uop->decode.has_imm ? 3 : 1; if(Mop->commit.complete_index >= Mop->decode.flow_length) { Mop->commit.complete_index = -1; /* Mark this Mop as all done */ #ifdef ZTRACE ztrace_print(Mop,"c|complete|all uops completed execution"); #endif if(Mop->fetch.bpred_update) { core->fetch->bpred->update(Mop->fetch.bpred_update, Mop->decode.opflags, Mop->fetch.PC, Mop->fetch.ftPC, Mop->decode.targetPC, Mop->oracle.NextPC, Mop->oracle.taken_branch); core->fetch->bpred->return_state_cache(Mop->fetch.bpred_update); Mop->fetch.bpred_update = NULL; } break; } uop = &Mop->uop[Mop->commit.complete_index]; } } if(stall_reason != CSTALL_NONE) break; if(Mop->commit.complete_index == -1) /* commit the uops if the Mop is done */ { struct uop_t * uop = ROB[ROB_head]; zesto_assert(uop->timing.when_completed <= core->sim_cycle,(void)0); zesto_assert(uop->alloc.ROB_index == ROB_head,(void)0); zesto_assert(uop == &Mop->uop[Mop->commit.commit_index],(void)0); if(uop->decode.BOM && (uop->Mop->timing.when_commit_started == TICK_T_MAX)) uop->Mop->timing.when_commit_started = core->sim_cycle; //SK - load deallocation moved to end of payload pipe if(uop->decode.is_sta) core->exec->STQ_deallocate_sta(); if(uop->decode.is_std) /* we alloc on STA, dealloc on STD */ { if(!core->exec->STQ_deallocate_std(uop)) { stall_reason = CSTALL_STQ; break; } } /* any remaining transactions in-flight (only for loads) should now be ignored - such load requests may exist, for example as a result of a load that completes early due to a hit in the STQ while the cache request is still making its way through the memory hierarchy. */ if(uop->decode.is_load) uop->exec.action_id = core->new_action_id(); #ifdef ZTRACE ztrace_print(uop,"c|commit|uop committed"); #endif if(uop->decode.EOM) uop->Mop->timing.when_commit_finished = core->sim_cycle; /* remove uop from ROB */ if((!uop->decode.in_fusion) || (uop->decode.fusion_next == NULL)) /* fusion dealloc's on fusion-tail */ { ROB[ROB_head] = NULL; ROB_num --; ROB_eff_num --; ROB_head = modinc(ROB_head,knobs->commit.ROB_size); //(ROB_head+1) % knobs->commit.ROB_size; if(uop->decode.in_fusion) { ZESTO_STAT(core->stat.commit_fusions++;) } } else /* fusion body doesn't count toward commit width */ {
/* In-order instruction commit. Individual uops cannot commit until it is guaranteed that the entire Mop's worth of uops will commit. */ void core_commit_STM_t::step(void) { struct core_knobs_t * knobs = core->knobs; int commit_count = 0; enum commit_stall_t stall_reason = CSTALL_NONE; /* This is just a deadlock watchdog. If something got messed up in the pipeline and no forward progress is being made, this code will eventually detect it. A global watchdog will check if any core is making progress and accordingly if not.*/ if(core->active && ((core->sim_cycle - core->exec->last_completed) > deadlock_threshold)) { deadlocked = true; #ifdef ZTRACE ztrace_print(core->id, "Possible deadlock detected."); #endif return; } /* MAIN COMMIT LOOP */ for(commit_count=0;commit_count<knobs->commit.width;commit_count++) { if(ROB_num <= 0) /* nothing to commit */ { stall_reason = commit_count?CSTALL_NONE:CSTALL_EMPTY; break; } struct Mop_t * Mop = ROB[ROB_head]->Mop; if(Mop->oracle.spec_mode) fatal("oldest instruction in processor is on wrong-path"); /* Are all uops in the Mop completed? */ if(Mop->commit.complete_index != -1) /* still some outstanding insts */ { while(Mop->uop[Mop->commit.complete_index].timing.when_completed <= core->sim_cycle) { struct uop_t * uop = &Mop->uop[Mop->commit.complete_index]; Mop->commit.complete_index += uop->decode.has_imm ? 3 : 1; if(Mop->commit.complete_index >= (int) Mop->decode.flow_length) { Mop->commit.complete_index = -1; /* Mark this Mop as all done */ if(Mop->fetch.bpred_update) { core->fetch->bpred->update(Mop->fetch.bpred_update, Mop->decode.opflags, Mop->fetch.PC, Mop->fetch.ftPC, Mop->decode.targetPC, Mop->oracle.NextPC, Mop->oracle.taken_branch); core->fetch->bpred->return_state_cache(Mop->fetch.bpred_update); Mop->fetch.bpred_update = NULL; } break; } } } if(Mop->commit.complete_index == -1) /* commit the uops if the Mop is done */ { struct uop_t * uop = ROB[ROB_head]; zesto_assert(uop->timing.when_completed <= core->sim_cycle,(void)0); zesto_assert(uop->alloc.ROB_index == ROB_head,(void)0); zesto_assert(uop == &Mop->uop[Mop->commit.commit_index],(void)0); if(uop->decode.BOM && (uop->Mop->timing.when_commit_started == TICK_T_MAX)) uop->Mop->timing.when_commit_started = core->sim_cycle; if(uop->decode.is_load) core->exec->LDQ_deallocate(uop); else if(uop->decode.is_sta) core->exec->STQ_deallocate_sta(); else if(uop->decode.is_std) /* we alloc on STA, dealloc on STD */ { if(!core->exec->STQ_deallocate_std(uop)) break; } /* any remaining transactions in-flight (only for loads) should now be ignored - such load requests may exist, for example as a result of a load that completes early due to a hit in the STQ while the cache request is still making its way through the memory hierarchy. */ if(uop->decode.is_load) uop->exec.action_id = core->new_action_id(); if(uop->decode.EOM) uop->Mop->timing.when_commit_finished = core->sim_cycle; /* remove uop from ROB */ ROB[ROB_head] = NULL; ROB_num --; ROB_head = modinc(ROB_head,knobs->commit.ROB_size); //(ROB_head+1) % knobs->commit.ROB_size; uop->alloc.ROB_index = -1; /* this cleans up idep/odep ptrs, register mappings, and commit stores to the real (non-spec) memory system */ core->oracle->commit_uop(uop); /* mark uop as committed in Mop */ Mop->commit.commit_index += uop->decode.has_imm ? 3 : 1; if(Mop->commit.commit_index >= (int) Mop->decode.flow_length) { Mop->commit.commit_index = -1; /* The entire Mop has been committed */ /* Update stats */ if(Mop->uop[Mop->decode.last_uop_index].decode.EOM) { ZESTO_STAT(core->stat.commit_insn++;) } ZESTO_STAT(core->stat.commit_uops += Mop->stat.num_uops;) ZESTO_STAT(core->stat.commit_refs += Mop->stat.num_refs;)