示例#1
0
HRESULT CVideoTransformFilter::Receive(IMediaSample *pSample)
{
    // If the next filter downstream is the video renderer, then it may
    // be able to operate in DirectDraw mode which saves copying the data
    // and gives higher performance.  In that case the buffer which we
    // get from GetDeliveryBuffer will be a DirectDraw buffer, and
    // drawing into this buffer draws directly onto the display surface.
    // This means that any waiting for the correct time to draw occurs
    // during GetDeliveryBuffer, and that once the buffer is given to us
    // the video renderer will count it in its statistics as a frame drawn.
    // This means that any decision to drop the frame must be taken before
    // calling GetDeliveryBuffer.

    ASSERT(CritCheckIn(&m_csReceive));
    AM_MEDIA_TYPE *pmtOut, *pmt;
#ifdef _DEBUG
    FOURCCMap fccOut;
#endif
    HRESULT hr;
    ASSERT(pSample);
    IMediaSample * pOutSample;

    // If no output pin to deliver to then no point sending us data
    ASSERT (m_pOutput != NULL) ;

    // The source filter may dynamically ask us to start transforming from a
    // different media type than the one we're using now.  If we don't, we'll
    // draw garbage. (typically, this is a palette change in the movie,
    // but could be something more sinister like the compression type changing,
    // or even the video size changing)

#define rcS1 ((VIDEOINFOHEADER *)(pmt->pbFormat))->rcSource
#define rcT1 ((VIDEOINFOHEADER *)(pmt->pbFormat))->rcTarget

    pSample->GetMediaType(&pmt);
    if (pmt != NULL && pmt->pbFormat != NULL) {

	// spew some debug output
	ASSERT(!IsEqualGUID(pmt->majortype, GUID_NULL));
#ifdef _DEBUG
        fccOut.SetFOURCC(&pmt->subtype);
	LONG lCompression = HEADER(pmt->pbFormat)->biCompression;
	LONG lBitCount = HEADER(pmt->pbFormat)->biBitCount;
	LONG lStride = (HEADER(pmt->pbFormat)->biWidth * lBitCount + 7) / 8;
	lStride = (lStride + 3) & ~3;
        DbgLog((LOG_TRACE,3,TEXT("*Changing input type on the fly to")));
        DbgLog((LOG_TRACE,3,TEXT("FourCC: %lx Compression: %lx BitCount: %ld"),
		fccOut.GetFOURCC(), lCompression, lBitCount));
        DbgLog((LOG_TRACE,3,TEXT("biHeight: %ld rcDst: (%ld, %ld, %ld, %ld)"),
		HEADER(pmt->pbFormat)->biHeight,
		rcT1.left, rcT1.top, rcT1.right, rcT1.bottom));
        DbgLog((LOG_TRACE,3,TEXT("rcSrc: (%ld, %ld, %ld, %ld) Stride: %ld"),
		rcS1.left, rcS1.top, rcS1.right, rcS1.bottom,
		lStride));
#endif

	// now switch to using the new format.  I am assuming that the
	// derived filter will do the right thing when its media type is
	// switched and streaming is restarted.

	StopStreaming();
	m_pInput->CurrentMediaType() = *pmt;
	DeleteMediaType(pmt);
	// if this fails, playback will stop, so signal an error
	hr = StartStreaming();
	if (FAILED(hr)) {
	    return AbortPlayback(hr);
	}
    }

    // Now that we have noticed any format changes on the input sample, it's
    // OK to discard it.

    if (ShouldSkipFrame(pSample)) {
        MSR_NOTE(m_idSkip);
        m_bSampleSkipped = TRUE;
        return NOERROR;
    }

    // Set up the output sample
    hr = InitializeOutputSample(pSample, &pOutSample);

    if (FAILED(hr)) {
        return hr;
    }

    m_bSampleSkipped = FALSE;

    // The renderer may ask us to on-the-fly to start transforming to a
    // different format.  If we don't obey it, we'll draw garbage

#define rcS ((VIDEOINFOHEADER *)(pmtOut->pbFormat))->rcSource
#define rcT ((VIDEOINFOHEADER *)(pmtOut->pbFormat))->rcTarget

    pOutSample->GetMediaType(&pmtOut);
    if (pmtOut != NULL && pmtOut->pbFormat != NULL) {

	// spew some debug output
	ASSERT(!IsEqualGUID(pmtOut->majortype, GUID_NULL));
#ifdef _DEBUG
        fccOut.SetFOURCC(&pmtOut->subtype);
	LONG lCompression = HEADER(pmtOut->pbFormat)->biCompression;
	LONG lBitCount = HEADER(pmtOut->pbFormat)->biBitCount;
	LONG lStride = (HEADER(pmtOut->pbFormat)->biWidth * lBitCount + 7) / 8;
	lStride = (lStride + 3) & ~3;
        DbgLog((LOG_TRACE,3,TEXT("*Changing output type on the fly to")));
        DbgLog((LOG_TRACE,3,TEXT("FourCC: %lx Compression: %lx BitCount: %ld"),
		fccOut.GetFOURCC(), lCompression, lBitCount));
        DbgLog((LOG_TRACE,3,TEXT("biHeight: %ld rcDst: (%ld, %ld, %ld, %ld)"),
		HEADER(pmtOut->pbFormat)->biHeight,
		rcT.left, rcT.top, rcT.right, rcT.bottom));
        DbgLog((LOG_TRACE,3,TEXT("rcSrc: (%ld, %ld, %ld, %ld) Stride: %ld"),
		rcS.left, rcS.top, rcS.right, rcS.bottom,
		lStride));
#endif

	// now switch to using the new format.  I am assuming that the
	// derived filter will do the right thing when its media type is
	// switched and streaming is restarted.

	StopStreaming();
	m_pOutput->CurrentMediaType() = *pmtOut;
	DeleteMediaType(pmtOut);
	hr = StartStreaming();

	if (SUCCEEDED(hr)) {
 	    // a new format, means a new empty buffer, so wait for a keyframe
	    // before passing anything on to the renderer.
	    // !!! a keyframe may never come, so give up after 30 frames
            DbgLog((LOG_TRACE,3,TEXT("Output format change means we must wait for a keyframe")));
	    m_nWaitForKey = 30;

	// if this fails, playback will stop, so signal an error
	} else {

            //  Must release the sample before calling AbortPlayback
            //  because we might be holding the win16 lock or
            //  ddraw lock
            pOutSample->Release();
	    AbortPlayback(hr);
            return hr;
	}
    }

    // After a discontinuity, we need to wait for the next key frame
    if (pSample->IsDiscontinuity() == S_OK) {
        DbgLog((LOG_TRACE,3,TEXT("Non-key discontinuity - wait for keyframe")));
	m_nWaitForKey = 30;
    }

    // Start timing the transform (and log it if PERF is defined)

    if (SUCCEEDED(hr)) {
        m_tDecodeStart = timeGetTime();
        MSR_START(m_idTransform);

        // have the derived class transform the data
        hr = Transform(pSample, pOutSample);

        // Stop the clock (and log it if PERF is defined)
        MSR_STOP(m_idTransform);
        m_tDecodeStart = timeGetTime()-m_tDecodeStart;
        m_itrAvgDecode = m_tDecodeStart*(10000/16) + 15*(m_itrAvgDecode/16);

        // Maybe we're waiting for a keyframe still?
        if (m_nWaitForKey)
            m_nWaitForKey--;
        if (m_nWaitForKey && pSample->IsSyncPoint() == S_OK)
	    m_nWaitForKey = FALSE;

        // if so, then we don't want to pass this on to the renderer
        if (m_nWaitForKey && hr == NOERROR) {
            DbgLog((LOG_TRACE,3,TEXT("still waiting for a keyframe")));
	    hr = S_FALSE;
	}
    }

    if (FAILED(hr)) {
        DbgLog((LOG_TRACE,1,TEXT("Error from video transform")));
    } else {
        // the Transform() function can return S_FALSE to indicate that the
        // sample should not be delivered; we only deliver the sample if it's
        // really S_OK (same as NOERROR, of course.)
        // Try not to return S_FALSE to a direct draw buffer (it's wasteful)
        // Try to take the decision earlier - before you get it.

        if (hr == NOERROR) {
    	    hr = m_pOutput->Deliver(pOutSample);
        } else {
            // S_FALSE returned from Transform is a PRIVATE agreement
            // We should return NOERROR from Receive() in this case because returning S_FALSE
            // from Receive() means that this is the end of the stream and no more data should
            // be sent.
            if (S_FALSE == hr) {

                //  We must Release() the sample before doing anything
                //  like calling the filter graph because having the
                //  sample means we may have the DirectDraw lock
                //  (== win16 lock on some versions)
                pOutSample->Release();
                m_bSampleSkipped = TRUE;
                if (!m_bQualityChanged) {
                    m_bQualityChanged = TRUE;
                    NotifyEvent(EC_QUALITY_CHANGE,0,0);
                }
                return NOERROR;
            }
        }
    }

    // release the output buffer. If the connected pin still needs it,
    // it will have addrefed it itself.
    pOutSample->Release();
    ASSERT(CritCheckIn(&m_csReceive));

    return hr;
}
示例#2
0
HRESULT CBufferFilter::Receive(IMediaSample* pSample)
{
	/*  Check for other streams and pass them on */
	AM_SAMPLE2_PROPERTIES* const pProps = m_pInput->SampleProps();
	if(pProps->dwStreamId != AM_STREAM_MEDIA)
		return m_pOutput->Deliver(pSample);

	HRESULT hr;
	ASSERT(pSample);
	IMediaSample* pOutSample;

	ASSERT(m_pOutput != NULL);

	// Set up the output sample
	hr = InitializeOutputSample(pSample, &pOutSample);

	if(FAILED(hr))
		return hr;

	// Start timing the transform (if PERF is defined)
	MSR_START(m_idTransform);

	// have the derived class transform the data

	hr = Transform(pSample, pOutSample);

	// Stop the clock and log it (if PERF is defined)
	MSR_STOP(m_idTransform);

	if(FAILED(hr)) {
		DbgLog((LOG_TRACE,1,TEXT("Error from transform")));
	}
	else {
		// the Transform() function can return S_FALSE to indicate that the
		// sample should not be delivered; we only deliver the sample if it's
		// really S_OK (same as NOERROR, of course.)
		if(hr == NOERROR) {
			hr = m_pOutput->Deliver(pOutSample);
			m_bSampleSkipped = FALSE;   // last thing no longer dropped
		}
		else {
			// S_FALSE returned from Transform is a PRIVATE agreement
			// We should return NOERROR from Receive() in this cause because returning S_FALSE
			// from Receive() means that this is the end of the stream and no more data should
			// be sent.
			if(S_FALSE == hr) {

				//  Release the sample before calling notify to avoid
				//  deadlocks if the sample holds a lock on the system
				//  such as DirectDraw buffers do
				pOutSample->Release();
				m_bSampleSkipped = TRUE;
				if(!m_bQualityChanged) {
					NotifyEvent(EC_QUALITY_CHANGE,0,0);
					m_bQualityChanged = TRUE;
				}
				return NOERROR;
			}
		}
	}

	// release the output buffer. If the connected pin still needs it,
	// it will have addrefed it itself.
	pOutSample->Release();

	return hr;
}
示例#3
0
HRESULT
CTransInPlaceFilter::Receive(IMediaSample *pSample)
{
    /*  Check for other streams and pass them on */
    AM_SAMPLE2_PROPERTIES * const pProps = m_pInput->SampleProps();
    if (pProps->dwStreamId != AM_STREAM_MEDIA) {
        return m_pOutput->Deliver(pSample);
    }
    HRESULT hr;

    // Start timing the TransInPlace (if PERF is defined)
    MSR_START(m_idTransInPlace);

    if (UsingDifferentAllocators()) {

        // We have to copy the data.

        pSample = Copy(pSample);

        if (pSample==NULL) {
            MSR_STOP(m_idTransInPlace);
            return E_UNEXPECTED;
        }
    }

    // have the derived class transform the data
    hr = Transform(pSample);

    // Stop the clock and log it (if PERF is defined)
    MSR_STOP(m_idTransInPlace);

    if (FAILED(hr)) {
        DbgLog((LOG_TRACE, 1, TEXT("Error from TransInPlace")));
        if (UsingDifferentAllocators()) {
            pSample->Release();
        }
        return hr;
    }

    // the Transform() function can return S_FALSE to indicate that the
    // sample should not be delivered; we only deliver the sample if it's
    // really S_OK (same as NOERROR, of course.)
    if (hr == NOERROR) {
        hr = m_pOutput->Deliver(pSample);
    } else {
        //  But it would be an error to return this private workaround
        //  to the caller ...
        if (S_FALSE == hr) {
            // S_FALSE returned from Transform is a PRIVATE agreement
            // We should return NOERROR from Receive() in this cause because
            // returning S_FALSE from Receive() means that this is the end
            // of the stream and no more data should be sent.
            m_bSampleSkipped = TRUE;
            if (!m_bQualityChanged) {
                NotifyEvent(EC_QUALITY_CHANGE,0,0);
                m_bQualityChanged = TRUE;
            }
            hr = NOERROR;
        }
    }

    // release the output buffer. If the connected pin still needs it,
    // it will have addrefed it itself.
    if (UsingDifferentAllocators()) {
        pSample->Release();
    }

    return hr;

} // Receive