void FE_stage()
{
  /* only part of FE_stage function is implemented */ 
  /* please complete the rest of FE_stage function */ 
	
	
	if(FE_latch->stage_stall==false)	//check if no pipeline stalled
	{
		Op* new_op = get_free_op();	//get a placeholder op out of the pool
		if(get_op(new_op))		//copy op from trace into the placeholder op
		{
			if((new_op->opcode>=OP_FMEM)&&(new_op->opcode<=OP_FCMO)){
				float_count++;
			}
			if((new_op->opcode>=OP_IADD)&&(new_op->opcode<=OP_MM)){
				integer_count++;
			}
			if((new_op->opcode>=OP_CF)&&(new_op->opcode<=OP_LDA)){
				if(new_op->opcode==OP_CF) branch_count++;				
				integer_count++;
			}
			if((new_op->opcode>=OP_LDA)&&(new_op->opcode<=OP_LD)){
				load_count++;
			}
			if((new_op->opcode==OP_MM)||(new_op->opcode==OP_IMUL)){
				multiple_count++;
			}

			if(new_op->opcode==OP_ST){
				store_count++;
			}			
			if((new_op->opcode == OP_CF) && new_op->cf_type==CF_CBR && use_bpred==true)
			{
				uint64_t pc = new_op->instruction_addr;
				bool predicted_dir=0;		
				predicted_dir=bpred_access(branchpred,pc);
				bpred_update(branchpred,pc,predicted_dir,new_op->actually_taken);
				if(new_op->actually_taken != predicted_dir)
				{
					new_op->mispredicted_branch=true;
				}
				bpred_mispred_count=branchpred->mispred;
				bpred_okpred_count=branchpred->okpred;
			}

			FE_latch->op=new_op;
			FE_latch->op_valid=true;
		}
		else
			free_op(new_op);
	}
	
  //   next_pc = pc + op->inst_size;  // you need this code for building a branch predictor 

}
Exemple #2
0
void FE_stage()
{

	if(stop_fetching)
		return;

	if(have_to_send_EOS)
	{
		if(sendEOS())
		{
			stop_fetching = true;
			have_to_send_EOS = false;
		}
		return;
	}

	#if 0
	if(FE_latch->op_valid || FE_latch->pipeline_stall_enabled)
	{
		/* Data inside the latch is valid and next stage is still using it.
		Or ID stage has enabled pipeline stalling because of a branch instruction.
		Do not fetch */
		return;
	}
	/* This condition is rewritten for multithreading. See following statements.
	~(a OR b) ===>  ~a AND ~b */
	#endif

	static UINT64 fetch_arbiter = 0;
	int stream_id = -1;
	Op *op;
	bool op_exists = false, stalled[HW_MAX_THREAD];

	for(int i = 0 ; i < HW_MAX_THREAD ; i++)
		stalled[i] = true;

	/* Find next available empty queue slot to fill */
	for(int i = 0 ; i < thread_count ; i++)
	{
		stream_id = fetch_arbiter++ % thread_count;
		if(!FE_latch->op_valid_thread[stream_id] && !FE_latch->pipeline_stall_enabled_thread[stream_id])
		{
			stalled[stream_id] = false;
			op = get_free_op();
			op_exists = get_op(op, stream_id);
			if(op_exists)
				break;
			else
				free_op(op);
		}
	}
	
	if(!op_exists)
	{
		/* No op fetched - this could be due to following : 
		   1. all threads were stalled
		   2. some threads were stalled and others have run out of instructions
		   3. no instructions available to fetch
		*/

		// checking case 1
		bool all_stalled = true;
		for(int i = 0 ; i < thread_count ; i++)
		{
			if(!stalled[i])
				all_stalled = false;
		}
		if(all_stalled)
			return;

		// checking case 2 & 3
		bool eois = true;	// end of instruction streams
		for(int i = 0 ; i < thread_count ; i++)
		{
			if(!end_of_stream[i])
				eois = false;
		}
		if(!eois)
			return;
		else
		{
			/* Must take actions for initiating simulator shut down */
			// first it should be seen if there is some space in queue.
			// if no, then try to send in next cycle
			if(sendEOS())
				stop_fetching = true;
			else
				have_to_send_EOS = true;
			return;
		}
	}

	/* If the op is an branch other than conditional branch, assume that it is predicted correctly,
	if the branch predictor is used */
	//  if(use_bpred && (op->cf_type >= CF_BR) && (op->cf_type < NUM_CF_TYPES) && (op->cf_type != CF_CBR))
	//	  bpred_okpred_count++;
	/* Above 2 lines commented because its not the way solution is implemented */

	/* If we are using branch predictor and type of opcode is conditional branch,
	get a prediction and update GHR and PHT */
	if(use_bpred && (op->cf_type == CF_CBR))
	{
		int prediction = bpred_access(branchpred, op->instruction_addr, op->thread_id);
		if(prediction == op->actually_taken)
		{
			bpred_okpred_count++;
			bpred_okpred_count_thread[op->thread_id]++;
		}
		else
		{
			bpred_mispred_count++;
			bpred_mispred_count_thread[op->thread_id]++;
			/* stall the pipeline if we mispredict */
			FE_latch->pipeline_stall_enabled_thread[op->thread_id] = true;;
			FE_latch->stall_enforcer_thread[op->thread_id] = op->inst_id;
		}
		bpred_update(branchpred,op->instruction_addr,prediction,op->actually_taken, op->thread_id);
	}

	/* hwsim : get the instruction and pass to ID phase */
	# if 0
	/* Deprecated  after adding MT support */
	FE_latch->op = op;				/* pass the op to ID stage */
	FE_latch->op_valid = true;			/* Mark it as valid */
	#endif

	FE_latch->op_queue[op->thread_id] = op;
	FE_latch->op_valid_thread[op->thread_id] = true;

}