コード例 #1
0
ファイル: qgstappsrc.cpp プロジェクト: Esclapion/qt-mobility
void QGstAppSrc::streamDestroyed()
{
    if (sender() == m_stream) {
        m_stream = 0;
        sendEOS();
    }
}
コード例 #2
0
ファイル: qgstappsrc.cpp プロジェクト: Esclapion/qt-mobility
void QGstAppSrc::pushDataToAppSrc()
{
    if (!isStreamValid() || !m_setup)
        return;

    if (m_dataRequested && !m_enoughData) {
        qint64 size;
        if (m_dataRequestSize == (unsigned int)-1)
            size = qMin(m_stream->bytesAvailable(), queueSize());
        else
            size = qMin(m_stream->bytesAvailable(), (qint64)m_dataRequestSize);
        void *data = g_malloc(size);
        GstBuffer* buffer = gst_app_buffer_new(data, size, g_free, data);
        buffer->offset = m_stream->pos();
        qint64 bytesRead = m_stream->read((char*)GST_BUFFER_DATA(buffer), size);
        buffer->offset_end =  buffer->offset + bytesRead - 1;

        if (bytesRead > 0) {
            m_dataRequested = false;
            m_enoughData = false;
            GstFlowReturn ret = gst_app_src_push_buffer (GST_APP_SRC (element()), buffer);
            if (ret == GST_FLOW_ERROR) {
                qWarning()<<"appsrc: push buffer error";
            } else if (ret == GST_FLOW_WRONG_STATE) {
                qWarning()<<"appsrc: push buffer wrong state";
            } else if (ret == GST_FLOW_RESEND) {
                qWarning()<<"appsrc: push buffer resend";
            }
        }
    } else if (m_stream->atEnd()) {
        sendEOS();
    }
}
コード例 #3
0
ファイル: sim.cpp プロジェクト: rvbelapure/comparch
void FE_stage()
{

	if(stop_fetching)
		return;

	if(have_to_send_EOS)
	{
		if(sendEOS())
		{
			stop_fetching = true;
			have_to_send_EOS = false;
		}
		return;
	}

	#if 0
	if(FE_latch->op_valid || FE_latch->pipeline_stall_enabled)
	{
		/* Data inside the latch is valid and next stage is still using it.
		Or ID stage has enabled pipeline stalling because of a branch instruction.
		Do not fetch */
		return;
	}
	/* This condition is rewritten for multithreading. See following statements.
	~(a OR b) ===>  ~a AND ~b */
	#endif

	static UINT64 fetch_arbiter = 0;
	int stream_id = -1;
	Op *op;
	bool op_exists = false, stalled[HW_MAX_THREAD];

	for(int i = 0 ; i < HW_MAX_THREAD ; i++)
		stalled[i] = true;

	/* Find next available empty queue slot to fill */
	for(int i = 0 ; i < thread_count ; i++)
	{
		stream_id = fetch_arbiter++ % thread_count;
		if(!FE_latch->op_valid_thread[stream_id] && !FE_latch->pipeline_stall_enabled_thread[stream_id])
		{
			stalled[stream_id] = false;
			op = get_free_op();
			op_exists = get_op(op, stream_id);
			if(op_exists)
				break;
			else
				free_op(op);
		}
	}
	
	if(!op_exists)
	{
		/* No op fetched - this could be due to following : 
		   1. all threads were stalled
		   2. some threads were stalled and others have run out of instructions
		   3. no instructions available to fetch
		*/

		// checking case 1
		bool all_stalled = true;
		for(int i = 0 ; i < thread_count ; i++)
		{
			if(!stalled[i])
				all_stalled = false;
		}
		if(all_stalled)
			return;

		// checking case 2 & 3
		bool eois = true;	// end of instruction streams
		for(int i = 0 ; i < thread_count ; i++)
		{
			if(!end_of_stream[i])
				eois = false;
		}
		if(!eois)
			return;
		else
		{
			/* Must take actions for initiating simulator shut down */
			// first it should be seen if there is some space in queue.
			// if no, then try to send in next cycle
			if(sendEOS())
				stop_fetching = true;
			else
				have_to_send_EOS = true;
			return;
		}
	}

	/* If the op is an branch other than conditional branch, assume that it is predicted correctly,
	if the branch predictor is used */
	//  if(use_bpred && (op->cf_type >= CF_BR) && (op->cf_type < NUM_CF_TYPES) && (op->cf_type != CF_CBR))
	//	  bpred_okpred_count++;
	/* Above 2 lines commented because its not the way solution is implemented */

	/* If we are using branch predictor and type of opcode is conditional branch,
	get a prediction and update GHR and PHT */
	if(use_bpred && (op->cf_type == CF_CBR))
	{
		int prediction = bpred_access(branchpred, op->instruction_addr, op->thread_id);
		if(prediction == op->actually_taken)
		{
			bpred_okpred_count++;
			bpred_okpred_count_thread[op->thread_id]++;
		}
		else
		{
			bpred_mispred_count++;
			bpred_mispred_count_thread[op->thread_id]++;
			/* stall the pipeline if we mispredict */
			FE_latch->pipeline_stall_enabled_thread[op->thread_id] = true;;
			FE_latch->stall_enforcer_thread[op->thread_id] = op->inst_id;
		}
		bpred_update(branchpred,op->instruction_addr,prediction,op->actually_taken, op->thread_id);
	}

	/* hwsim : get the instruction and pass to ID phase */
	# if 0
	/* Deprecated  after adding MT support */
	FE_latch->op = op;				/* pass the op to ID stage */
	FE_latch->op_valid = true;			/* Mark it as valid */
	#endif

	FE_latch->op_queue[op->thread_id] = op;
	FE_latch->op_valid_thread[op->thread_id] = true;

}