bool DirectShowTimedSample::schedule(
        IReferenceClock *clock, REFERENCE_TIME startTime, HANDLE handle)
{
    REFERENCE_TIME sampleStartTime;
    REFERENCE_TIME sampleEndTime;
    if (m_sample->GetTime(&sampleStartTime, &sampleEndTime) == S_OK) {
        if (clock->AdviseTime(
                startTime, sampleStartTime, reinterpret_cast<HEVENT>(handle), &m_cookie) == S_OK) {
            return true;
        }
    }
    return false;
}
IMediaSample *DirectShowSampleScheduler::takeSample(bool *eos)
{
    QMutexLocker locker(&m_mutex);

    if (m_head && m_head->isReady(m_clock)) {
        IMediaSample *sample = m_head->sample();
        sample->AddRef();

        *eos =  m_head->isLast();

        m_head = m_head->remove();

        if (!m_head)
            m_tail = 0;

        m_semaphore.release(1);

        return sample;
    } else {
        return 0;
    }
}
예제 #3
0
// issue the i/o if not overlapped, and block until i/o complete.
// returns error code of file i/o
//
//
HRESULT
CAsyncRequest::Complete()
{
    m_pStream->Lock();

    m_hr = m_pStream->SetPointer(m_llPos);
    if(S_OK == m_hr)
    {
        DWORD dwActual;

        m_hr = m_pStream->Read(m_pBuffer, m_lLength, m_bAligned, &dwActual);
        if(m_hr == OLE_S_FIRST)
        {
            if(m_pContext)
            {
                IMediaSample *pSample = reinterpret_cast<IMediaSample *>(m_pContext);
                pSample->SetDiscontinuity(TRUE);
                m_hr = S_OK;
            }
        }

        if(FAILED(m_hr))
        {
        }
        else if(dwActual != (DWORD)m_lLength)
        {
            // tell caller size changed - probably because of EOF
            m_lLength = (LONG) dwActual;
            m_hr = S_FALSE;
        }
        else
        {
            m_hr = S_OK;
        }
    }

    m_pStream->Unlock();
    return m_hr;
}
예제 #4
0
//
// collect the next ready sample
STDMETHODIMP
CAsyncOutputPin::WaitForNext(
	DWORD dwTimeout,
	IMediaSample** ppSample,  // completed sample
	DWORD_PTR * pdwUser)        // user context
{
	CheckPointer(ppSample, E_POINTER);

	LONG cbActual;
	IMediaSample* pSample = 0;

	HRESULT hr = m_pIo->WaitForNext(dwTimeout,
									(LPVOID*) & pSample,
									pdwUser,
									&cbActual);

	if (SUCCEEDED(hr))
		pSample->SetActualDataLength(cbActual);

	*ppSample = pSample;
	return hr;
}
예제 #5
0
//  Remove and Release() all queued and Batched samples
void COutputQueue::FreeSamples()
{
    CAutoLock lck(this);
    if (IsQueued()) {
        while (TRUE) {
            IMediaSample *pSample = m_List->RemoveHead();
	    // inform derived class we took something off the queue
	    if (m_hEventPop) {
                //DbgLog((LOG_TRACE,3,TEXT("Queue: Delivered  SET EVENT")));
	        SetEvent(m_hEventPop);
	    }

            if (pSample == NULL) {
                break;
            }
            if (!IsSpecialSample(pSample)) {
                pSample->Release();
            } else {
                if (pSample == NEW_SEGMENT) {
                    //  Free NEW_SEGMENT packet
                    NewSegmentPacket *ppacket =
                        (NewSegmentPacket *) m_List->RemoveHead();
		    // inform derived class we took something off the queue
		    if (m_hEventPop) {
                        //DbgLog((LOG_TRACE,3,TEXT("Queue: Delivered  SET EVENT")));
		        SetEvent(m_hEventPop);
		    }

                    ASSERT(ppacket != NULL);
                    delete ppacket;
                }
            }
        }
    }
    for (int i = 0; i < m_nBatched; i++) {
        m_ppSamples[i]->Release();
    }
    m_nBatched = 0;
}
예제 #6
0
파일: pullpin.cpp 프로젝트: BitMax/openitg
HRESULT
CPullPin::CollectAndDeliver(
    REFERENCE_TIME tStart,
    REFERENCE_TIME tStop) {
    IMediaSample* pSample = NULL;   // better be sure pSample is set
    DWORD_PTR dwUnused;
    HRESULT hr = m_pReader->WaitForNext(INFINITE,
        &pSample,
        &dwUnused);
    if(FAILED(hr)) {
        if(pSample) {
            pSample->Release();
        }
    }
    else {
        hr = DeliverSample(pSample, tStart, tStop);
    }
    if(FAILED(hr)) {
        CleanupCancelled();
        OnError(hr);
    }
    return hr;

}
예제 #7
0
HRESULT OutputPin::Push(void *buf, long size)
{
    HRESULT hr;
    IMediaSample *pSample;
    VIDEOINFOHEADER *vi;
    AM_MEDIA_TYPE *pmt;
    BYTE *dst_buf;

    /**
     * Hold the critical section here as the pin might get disconnected
     * during the Deliver() method call.
     */
    m_pLock->Lock();

    hr = GetDeliveryBuffer(&pSample, NULL, NULL, 0);
    if (FAILED(hr))
        goto on_error;

    pSample->GetMediaType(&pmt);
    if (pmt) {
        mediaType.Set(*pmt);
        bufSize = pmt->lSampleSize;
    }

    pSample->GetPointer(&dst_buf);
    vi = (VIDEOINFOHEADER *)mediaType.pbFormat;
    if (vi->rcSource.right == vi->bmiHeader.biWidth) {
        assert(pSample->GetSize() >= size);
        memcpy(dst_buf, buf, size);
    } else {
        unsigned i, bpp;
        unsigned dststride, srcstride;
        BYTE *src_buf = (BYTE *)buf;

        bpp = size / abs(vi->bmiHeader.biHeight) / vi->rcSource.right;
        dststride = vi->bmiHeader.biWidth * bpp;
        srcstride = vi->rcSource.right * bpp;
        for (i = abs(vi->bmiHeader.biHeight); i > 0; i--) {
            memcpy(dst_buf, src_buf, srcstride);
            dst_buf += dststride;
            src_buf += srcstride;
        }
    }
    pSample->SetActualDataLength(size);

    hr = Deliver(pSample);

    pSample->Release();

on_error:
    m_pLock->Unlock();
    return hr;
}
예제 #8
0
HRESULT COutputPin::DeliverSample(GstBuffer *pBuffer)
{
    HRESULT hr = S_OK;
    IMediaSample *pSample = NULL;
    REFERENCE_TIME start = -1;
    REFERENCE_TIME stop = -1;

    hr = m_pAlloc->SetGstBuffer(pBuffer);
    if (FAILED(hr))
        return hr;

    hr = GetDeliveryBuffer(&pSample, NULL, NULL, 0);
    if (FAILED(hr))
        return hr;

    // Set media time
    pSample->SetMediaTime(NULL, NULL);

    // Set time
    if (GST_BUFFER_TIMESTAMP_IS_VALID(pBuffer))
    {
        start = GST_BUFFER_TIMESTAMP(pBuffer) / 100;

        if (GST_BUFFER_DURATION_IS_VALID(pBuffer))
        {
            stop = (GST_BUFFER_TIMESTAMP(pBuffer) + GST_BUFFER_DURATION(pBuffer)) / 100;
        }
        else
        {
            stop = start + 1;
        }

        if (stop <= start) // Sometimes it may happen
            stop = start + 1;

        pSample->SetTime(&start, &stop);
    }
    else
    {
        pSample->SetTime(NULL, NULL);
    }

    if (GST_BUFFER_IS_DISCONT(pBuffer))
        pSample->SetDiscontinuity(TRUE);

    hr = Deliver(pSample);
    pSample->Release();
    if (FAILED(hr))
        return hr;

    return S_OK;
}
예제 #9
0
STDMETHODIMP CDshowFakeOutputPin::PushBuffer(byte *buffer, __int64 start, __int64 stop, unsigned int size, bool discount)
{
  IMediaSample *pSample = NULL;
  
  if (start != -1) {
    start /= 100;
    stop /= 100;
  }

  HRESULT hres = GetDeliveryBuffer(&pSample, NULL, NULL, 0);
  if (hres == S_OK && pSample)
  {
    BYTE *sample_buffer;
    pSample->GetPointer(&sample_buffer);
    if(sample_buffer)
    {
      memcpy (sample_buffer, buffer, size);
      pSample->SetActualDataLength(size);
    }
    if (discount)
      pSample->SetDiscontinuity(TRUE);
    else
      pSample->SetDiscontinuity(FALSE);
    
    pSample->SetSyncPoint(TRUE);
    pSample->SetPreroll(FALSE);
  
    if (start != -1)
      pSample->SetTime(&start, &stop);

    hres = Deliver(pSample);
    pSample->Release();
  }

  return S_OK;
}
예제 #10
0
//----------------------------------------------------------------------------
//! @brief	  	次のサンプルを得る
//! @param		pSample : サンプルを返すポインタのポインタ
//! @return		エラーコード
//----------------------------------------------------------------------------
HRESULT CWMOutput::GetNextSample( IMediaSample **pSample )
{
	HRESULT hr;
	if( m_StreamNum == 0 || pSample == NULL )
		return S_FALSE;	// このストリームはない

	INSSBuffer	*pWMSample = NULL;
	QWORD	cnsSampleTime;
	QWORD	cnsDuration;
	DWORD	dwFlags;

	if( FAILED(hr = WMReader()->GetNextSample( m_StreamNum, &pWMSample, &cnsSampleTime, &cnsDuration, &dwFlags, NULL, NULL )) )
	{
		if( hr == NS_E_NO_MORE_SAMPLES ) return S_FALSE;
		return hr;
	}

	REFERENCE_TIME	startTime = (REFERENCE_TIME)cnsSampleTime;
	REFERENCE_TIME	endTime = (REFERENCE_TIME)(cnsSampleTime + cnsDuration);
	IMediaSample *pOutSample = reinterpret_cast<CWMBuffer*>(pWMSample)->GetSample();
	pOutSample->AddRef();
	pWMSample->Release();
	pOutSample->SetMediaTime(&startTime, &endTime);
#if 0
	if( startTime < Reader()->m_StartTime )
		pOutSample->SetPreroll(TRUE);
	else
		pOutSample->SetPreroll(FALSE);
#endif
	startTime -= Reader()->m_StartTime;
	endTime -= Reader()->m_StartTime;
	pOutSample->SetTime(&startTime, &endTime);
	pOutSample->SetSyncPoint(dwFlags & WM_SF_CLEANPOINT);
	*pSample = pOutSample;

	return hr;
}
예제 #11
0
HRESULT CAudioPin::DoBufferProcessingLoop(void)
{
  if (!m_bConnected) 
  {
    return S_OK;
    m_bThreadRunning = false;
  }
    
  Command com;
  OnThreadStartPlay();
  m_bThreadRunning = true;
  SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_NORMAL);

  do 
  {
    while (!CheckRequest(&com)) 
    {
      IMediaSample *pSample;
      HRESULT hr = GetDeliveryBuffer(&pSample,NULL,NULL,0);
      if (FAILED(hr)) 
      {
        Sleep(1);
        continue;	// go round again. Perhaps the error will go away
        // or the allocator is decommited & we will be asked to
        // exit soon.
      }

      // Virtual function user will override.
      hr = FillBuffer(pSample);

      if (hr == S_OK) 
      {
        // Some decoders seem to crash when we provide empty samples 
        if ((pSample->GetActualDataLength() > 0) && !m_pTsReaderFilter->IsStopping() && m_bConnected)
        {
          hr = Deliver(pSample); 
          m_sampleCount++ ;
        }
        else
        {
          m_bDiscontinuity = true;
        }
		
        pSample->Release();

        // downstream filter returns S_FALSE if it wants us to
        // stop or an error if it's reporting an error.
        if(hr != S_OK)
        {
          DbgLog((LOG_TRACE, 2, TEXT("Deliver() returned %08x; stopping"), hr));
          m_bThreadRunning = false;
          return S_OK;
        }
      } 
      else if (hr == S_FALSE) 
      {
        // derived class wants us to stop pushing data
        pSample->Release();
        DeliverEndOfStream();
        m_bThreadRunning = false;
        return S_OK;
      } 
      else 
      {
        // derived class encountered an error
        pSample->Release();
        DbgLog((LOG_ERROR, 1, TEXT("Error %08lX from FillBuffer!!!"), hr));
        DeliverEndOfStream();
        m_pFilter->NotifyEvent(EC_ERRORABORT, hr, 0);
        m_bThreadRunning = false;
        return hr;
      }
     // all paths release the sample
    }
    // For all commands sent to us there must be a Reply call!
	  if (com == CMD_RUN || com == CMD_PAUSE) 
    {
      Reply(NOERROR);
	  } 
    else if (com != CMD_STOP) 
    {
      Reply((DWORD) E_UNEXPECTED);
      DbgLog((LOG_ERROR, 1, TEXT("Unexpected command!!!")));
	  }
  } while (com != CMD_STOP);

  m_bThreadRunning = false;  
  return S_FALSE;
}
예제 #12
0
//
// DoBufferProcessingLoop
//
// Grabs a buffer and calls the users processing function.
// Overridable, so that different delivery styles can be catered for.
HRESULT CDynamicSourceStream::DoBufferProcessingLoop(void)
{
    Command com;
    bool fOutputFormatChanged = false;

    OnThreadStartPlay();

    do
    {
        while(!CheckRequest(&com))
        {


            // CAutoUsingOutputPin::CAutoUsingOutputPin() only changes the value of hr
            // if an error occurs.
            HRESULT hr = S_OK;

            CAutoUsingOutputPin auopUsingOutputPin(this, &hr);
            if(FAILED(hr))
            {
                FatalError(hr);
                return hr;
            }

            if(m_fReconnectOutputPin)
            {
                hr = DynamicReconnect(NULL);

                m_fReconnectOutputPin = false;

                if(FAILED(hr))
                {
                    FatalError(hr);
                    return hr;
                }

                fOutputFormatChanged = true;
            }

            IMediaSample *pSample;

            hr = GetDeliveryBuffer(&pSample,NULL,NULL,0);
            if(FAILED(hr))
            {
                Sleep(1);
                continue;   // go round again. Perhaps the error will go away
                // or the allocator is decommited & we will be asked to
                // exit soon.
            }

            if(fOutputFormatChanged)
            {
                pSample->SetDiscontinuity(TRUE);
                fOutputFormatChanged = false;
            }

            // Virtual function user will override.
            hr = FillBuffer(pSample);

            if(hr == S_OK)
            {
                hr = Deliver(pSample);
                pSample->Release();

                // downstream filter returns S_FALSE if it wants us to
                // stop or an error if it's reporting an error.
                if(hr != S_OK)
                {
                    DbgLog((LOG_TRACE, 2, TEXT("Deliver() returned %08x; stopping"), hr));
                    return S_OK;
                }

            }
            else if(hr == S_FALSE)
            {
                // derived class wants us to stop pushing data
                pSample->Release();
                DeliverEndOfStream();
                return S_OK;
            }
            else
            {
                // derived class encountered an error
                pSample->Release();
                DbgLog((LOG_ERROR, 1, TEXT("Error %08lX from FillBuffer!!!"), hr));

                FatalError(hr);
                return hr;
            }
            // all paths release the sample
        }

        // For all commands sent to us there must be a Reply call!
        if(com == CMD_RUN || com == CMD_PAUSE)
        {
            Reply(NOERROR);
        }
        else if(com != CMD_STOP)
        {
            Reply((DWORD) E_UNEXPECTED);
            DbgLog((LOG_ERROR, 1, TEXT("Unexpected command!!!")));
        }



    } while(com != CMD_STOP);

    return S_FALSE;
}
예제 #13
0
STDMETHODIMP CLAVSubtitleConsumer::ProcessFrame(LAVFrame *pFrame)
{
  CheckPointer(m_pProvider, E_FAIL);
  HRESULT hr = S_OK;
  LPDIRECT3DSURFACE9 pSurface = nullptr;

  // Wait for the requested frame
  m_evFrame.Wait();

  if (m_SubtitleFrame != nullptr) {
    int count = 0;
    if (FAILED(m_SubtitleFrame->GetBitmapCount(&count))) {
      count = 0;
    }

    if (count == 0) {
      SafeRelease(&m_SubtitleFrame);
      return S_FALSE;
    }

    BYTE *data[4] = {0};
    ptrdiff_t stride[4] = {0};
    LAVPixelFormat format = pFrame->format;
    int bpp = pFrame->bpp;

    if (pFrame->format == LAVPixFmt_DXVA2) {
      // Copy the surface, if required
      if (!(pFrame->flags & LAV_FRAME_FLAG_BUFFER_MODIFY)) {
        IMediaSample *pOrigSample = (IMediaSample *)pFrame->data[0];
        LPDIRECT3DSURFACE9 pOrigSurface = (LPDIRECT3DSURFACE9)pFrame->data[3];

        hr = m_pLAVVideo->GetD3DBuffer(pFrame);
        if (FAILED(hr)) {
          DbgLog((LOG_TRACE, 10, L"CLAVSubtitleConsumer::ProcessFrame: getting a new D3D buffer failed"));
        } else {
          IMediaSample *pNewSample = (IMediaSample *)pFrame->data[0];
          pSurface = (LPDIRECT3DSURFACE9)pFrame->data[3];
          IDirect3DDevice9 *pDevice = nullptr;
          if (SUCCEEDED(hr = pSurface->GetDevice(&pDevice))) {
            hr = pDevice->StretchRect(pOrigSurface, nullptr, pSurface, nullptr, D3DTEXF_NONE);
            if (SUCCEEDED(hr)) {
              pFrame->flags |= LAV_FRAME_FLAG_BUFFER_MODIFY|LAV_FRAME_FLAG_DXVA_NOADDREF;
              pOrigSurface = nullptr;

              // Release the surface, we only want to hold a ref on the media buffer
              pSurface->Release();
            }
            SafeRelease(&pDevice);
          }
          if (FAILED(hr)) {
            DbgLog((LOG_TRACE, 10, L"CLAVSubtitleConsumer::ProcessFrame: processing d3d buffer failed, restoring previous buffer"));
            pNewSample->Release();
            pSurface->Release();
            pFrame->data[0] = (BYTE *)pOrigSample;
            pFrame->data[3] = (BYTE *)pOrigSurface;
          }
        }
      }
      pSurface = (LPDIRECT3DSURFACE9)pFrame->data[3];

      D3DSURFACE_DESC surfaceDesc;
      pSurface->GetDesc(&surfaceDesc);

      D3DLOCKED_RECT LockedRect;
      hr = pSurface->LockRect(&LockedRect, nullptr, 0);
      if (FAILED(hr)) {
        DbgLog((LOG_TRACE, 10, L"pSurface->LockRect failed (hr: %X)", hr));
        SafeRelease(&m_SubtitleFrame);
        return E_FAIL;
      }

      data[0] = (BYTE *)LockedRect.pBits;
      data[1] = data[0] + (surfaceDesc.Height * LockedRect.Pitch);
      stride[0] = LockedRect.Pitch;
      stride[1] = LockedRect.Pitch;

      format = LAVPixFmt_NV12;
      bpp = 8;
    } else {
      if (!(pFrame->flags & LAV_FRAME_FLAG_BUFFER_MODIFY)) {
        CopyLAVFrameInPlace(pFrame);
      }
      memcpy(&data, &pFrame->data, sizeof(pFrame->data));
      memcpy(&stride, &pFrame->stride, sizeof(pFrame->stride));
    }

    RECT videoRect;
    ::SetRect(&videoRect, 0, 0, pFrame->width, pFrame->height);

    RECT subRect;
    m_SubtitleFrame->GetOutputRect(&subRect);

    ULONGLONG id;
    POINT position;
    SIZE size;
    const uint8_t *rgbData;
    int pitch;
    for (int i = 0; i < count; i++) {
      if (FAILED(m_SubtitleFrame->GetBitmap(i, &id, &position, &size, (LPCVOID *)&rgbData, &pitch))) {
        DbgLog((LOG_TRACE, 10, L"GetBitmap() failed on index %d", i));
        break;
      }
      ProcessSubtitleBitmap(format, bpp, videoRect, data, stride, subRect, position, size, rgbData, pitch);
    }

    if (pSurface)
      pSurface->UnlockRect();

    SafeRelease(&m_SubtitleFrame);
    return S_OK;
  }

  return S_FALSE;
}
예제 #14
0
파일: vtrans.cpp 프로젝트: hiplayer/mpc_hc
HRESULT CVideoTransformFilter::Receive(IMediaSample *pSample)
{
    // If the next filter downstream is the video renderer, then it may
    // be able to operate in DirectDraw mode which saves copying the data
    // and gives higher performance.  In that case the buffer which we
    // get from GetDeliveryBuffer will be a DirectDraw buffer, and
    // drawing into this buffer draws directly onto the display surface.
    // This means that any waiting for the correct time to draw occurs
    // during GetDeliveryBuffer, and that once the buffer is given to us
    // the video renderer will count it in its statistics as a frame drawn.
    // This means that any decision to drop the frame must be taken before
    // calling GetDeliveryBuffer.

    ASSERT(CritCheckIn(&m_csReceive));
    AM_MEDIA_TYPE *pmtOut, *pmt;
#ifdef _DEBUG
    FOURCCMap fccOut;
#endif
    HRESULT hr;
    ASSERT(pSample);
    IMediaSample * pOutSample;

    // If no output pin to deliver to then no point sending us data
    ASSERT (m_pOutput != NULL) ;

    // The source filter may dynamically ask us to start transforming from a
    // different media type than the one we're using now.  If we don't, we'll
    // draw garbage. (typically, this is a palette change in the movie,
    // but could be something more sinister like the compression type changing,
    // or even the video size changing)

#define rcS1 ((VIDEOINFOHEADER *)(pmt->pbFormat))->rcSource
#define rcT1 ((VIDEOINFOHEADER *)(pmt->pbFormat))->rcTarget

    pSample->GetMediaType(&pmt);
    if (pmt != NULL && pmt->pbFormat != NULL) {

	// spew some debug output
	ASSERT(!IsEqualGUID(pmt->majortype, GUID_NULL));
#ifdef _DEBUG
        fccOut.SetFOURCC(&pmt->subtype);
	LONG lCompression = HEADER(pmt->pbFormat)->biCompression;
	LONG lBitCount = HEADER(pmt->pbFormat)->biBitCount;
	LONG lStride = (HEADER(pmt->pbFormat)->biWidth * lBitCount + 7) / 8;
	lStride = (lStride + 3) & ~3;
        DbgLog((LOG_TRACE,3,TEXT("*Changing input type on the fly to")));
        DbgLog((LOG_TRACE,3,TEXT("FourCC: %lx Compression: %lx BitCount: %ld"),
		fccOut.GetFOURCC(), lCompression, lBitCount));
        DbgLog((LOG_TRACE,3,TEXT("biHeight: %ld rcDst: (%ld, %ld, %ld, %ld)"),
		HEADER(pmt->pbFormat)->biHeight,
		rcT1.left, rcT1.top, rcT1.right, rcT1.bottom));
        DbgLog((LOG_TRACE,3,TEXT("rcSrc: (%ld, %ld, %ld, %ld) Stride: %ld"),
		rcS1.left, rcS1.top, rcS1.right, rcS1.bottom,
		lStride));
#endif

	// now switch to using the new format.  I am assuming that the
	// derived filter will do the right thing when its media type is
	// switched and streaming is restarted.

	StopStreaming();
	m_pInput->CurrentMediaType() = *pmt;
	DeleteMediaType(pmt);
	// if this fails, playback will stop, so signal an error
	hr = StartStreaming();
	if (FAILED(hr)) {
	    return AbortPlayback(hr);
	}
    }

    // Now that we have noticed any format changes on the input sample, it's
    // OK to discard it.

    if (ShouldSkipFrame(pSample)) {
        MSR_NOTE(m_idSkip);
        m_bSampleSkipped = TRUE;
        return NOERROR;
    }

    // Set up the output sample
    hr = InitializeOutputSample(pSample, &pOutSample);

    if (FAILED(hr)) {
        return hr;
    }

    m_bSampleSkipped = FALSE;

    // The renderer may ask us to on-the-fly to start transforming to a
    // different format.  If we don't obey it, we'll draw garbage

#define rcS ((VIDEOINFOHEADER *)(pmtOut->pbFormat))->rcSource
#define rcT ((VIDEOINFOHEADER *)(pmtOut->pbFormat))->rcTarget

    pOutSample->GetMediaType(&pmtOut);
    if (pmtOut != NULL && pmtOut->pbFormat != NULL) {

	// spew some debug output
	ASSERT(!IsEqualGUID(pmtOut->majortype, GUID_NULL));
#ifdef _DEBUG
        fccOut.SetFOURCC(&pmtOut->subtype);
	LONG lCompression = HEADER(pmtOut->pbFormat)->biCompression;
	LONG lBitCount = HEADER(pmtOut->pbFormat)->biBitCount;
	LONG lStride = (HEADER(pmtOut->pbFormat)->biWidth * lBitCount + 7) / 8;
	lStride = (lStride + 3) & ~3;
        DbgLog((LOG_TRACE,3,TEXT("*Changing output type on the fly to")));
        DbgLog((LOG_TRACE,3,TEXT("FourCC: %lx Compression: %lx BitCount: %ld"),
		fccOut.GetFOURCC(), lCompression, lBitCount));
        DbgLog((LOG_TRACE,3,TEXT("biHeight: %ld rcDst: (%ld, %ld, %ld, %ld)"),
		HEADER(pmtOut->pbFormat)->biHeight,
		rcT.left, rcT.top, rcT.right, rcT.bottom));
        DbgLog((LOG_TRACE,3,TEXT("rcSrc: (%ld, %ld, %ld, %ld) Stride: %ld"),
		rcS.left, rcS.top, rcS.right, rcS.bottom,
		lStride));
#endif

	// now switch to using the new format.  I am assuming that the
	// derived filter will do the right thing when its media type is
	// switched and streaming is restarted.

	StopStreaming();
	m_pOutput->CurrentMediaType() = *pmtOut;
	DeleteMediaType(pmtOut);
	hr = StartStreaming();

	if (SUCCEEDED(hr)) {
 	    // a new format, means a new empty buffer, so wait for a keyframe
	    // before passing anything on to the renderer.
	    // !!! a keyframe may never come, so give up after 30 frames
            DbgLog((LOG_TRACE,3,TEXT("Output format change means we must wait for a keyframe")));
	    m_nWaitForKey = 30;

	// if this fails, playback will stop, so signal an error
	} else {

            //  Must release the sample before calling AbortPlayback
            //  because we might be holding the win16 lock or
            //  ddraw lock
            pOutSample->Release();
	    AbortPlayback(hr);
            return hr;
	}
    }

    // After a discontinuity, we need to wait for the next key frame
    if (pSample->IsDiscontinuity() == S_OK) {
        DbgLog((LOG_TRACE,3,TEXT("Non-key discontinuity - wait for keyframe")));
	m_nWaitForKey = 30;
    }

    // Start timing the transform (and log it if PERF is defined)

    if (SUCCEEDED(hr)) {
        m_tDecodeStart = timeGetTime();
        MSR_START(m_idTransform);

        // have the derived class transform the data
        hr = Transform(pSample, pOutSample);

        // Stop the clock (and log it if PERF is defined)
        MSR_STOP(m_idTransform);
        m_tDecodeStart = timeGetTime()-m_tDecodeStart;
        m_itrAvgDecode = m_tDecodeStart*(10000/16) + 15*(m_itrAvgDecode/16);

        // Maybe we're waiting for a keyframe still?
        if (m_nWaitForKey)
            m_nWaitForKey--;
        if (m_nWaitForKey && pSample->IsSyncPoint() == S_OK)
	    m_nWaitForKey = FALSE;

        // if so, then we don't want to pass this on to the renderer
        if (m_nWaitForKey && hr == NOERROR) {
            DbgLog((LOG_TRACE,3,TEXT("still waiting for a keyframe")));
	    hr = S_FALSE;
	}
    }

    if (FAILED(hr)) {
        DbgLog((LOG_TRACE,1,TEXT("Error from video transform")));
    } else {
        // the Transform() function can return S_FALSE to indicate that the
        // sample should not be delivered; we only deliver the sample if it's
        // really S_OK (same as NOERROR, of course.)
        // Try not to return S_FALSE to a direct draw buffer (it's wasteful)
        // Try to take the decision earlier - before you get it.

        if (hr == NOERROR) {
    	    hr = m_pOutput->Deliver(pOutSample);
        } else {
            // S_FALSE returned from Transform is a PRIVATE agreement
            // We should return NOERROR from Receive() in this case because returning S_FALSE
            // from Receive() means that this is the end of the stream and no more data should
            // be sent.
            if (S_FALSE == hr) {

                //  We must Release() the sample before doing anything
                //  like calling the filter graph because having the
                //  sample means we may have the DirectDraw lock
                //  (== win16 lock on some versions)
                pOutSample->Release();
                m_bSampleSkipped = TRUE;
                if (!m_bQualityChanged) {
                    m_bQualityChanged = TRUE;
                    NotifyEvent(EC_QUALITY_CHANGE,0,0);
                }
                return NOERROR;
            }
        }
    }

    // release the output buffer. If the connected pin still needs it,
    // it will have addrefed it itself.
    pOutSample->Release();
    ASSERT(CritCheckIn(&m_csReceive));

    return hr;
}
예제 #15
0
DWORD WINAPI ReceiveThread(PVOID param)
{
	HRESULT hr;
	ReceiveParam *receiveParam = (ReceiveParam*)param;
	HANDLE PushSemaphore = receiveParam->PushSemaphore;
	HANDLE PushDataMutex = receiveParam->PushDataMutex;
	std::map<REFERENCE_TIME,IMediaSample*>& SampleList = *receiveParam->SampleList;
	NetReceiveFilter* filter = receiveParam->filter;
	SOCKET socket ;
	delete receiveParam;

	LONG packSize;
	//	CMediaSample *tmpSample = (CMediaSample*) malloc(sizeof(CMediaSample));
	REFERENCE_TIME startTime = 0,endTime = 0; //马上播放
	REFERENCE_TIME mediaStartTime = 0,mediaEndTime = 0;
	AM_SAMPLE2_PROPERTIES sample2Properties;

	NetReceiveOutputPin* outputPin = reinterpret_cast<NetReceiveOutputPin*>(filter->GetPin(0));
	assert(outputPin != NULL);

	filter->waitForNewSocket();

	while(true)
	{

		IMediaSample *sample = filter->GetFreeSample();
		if (sample == NULL)
		{
			ErrorPrint("Get free sample error");
			return 1;
		}

		PBYTE dataPointer = NULL;
		hr = sample->GetPointer(&dataPointer);
		if (FAILED(hr))
		{
			ErrorPrint("Get data pointer error",hr);
			sample->Release();
			return 1;
		}

		CAutoLock lock(filter->getSocketLock());
		socket = filter->getSocket();

		if (!receiveData(socket, (char*)&sample2Properties, sizeof(sample2Properties)))
		{
			ErrorPrint("Get pack Properties error");
			sample->Release();
			filter->waitForNewSocket();
			continue;
		}
		packSize = sample2Properties.lActual;

		if (packSize > 100 * 1024)
		{
			std::cout<<"Exceed 100K:"<<packSize/1024<<std::endl;
		}

		AM_MEDIA_TYPE mediaType;
		filter->GetPin(0)->ConnectionMediaType(&mediaType);

		if (filter->getPlayMode() == 1)
		{
// 			static const unsigned long  offset = 10000000; //将延迟增加,尽量缓冲一些
// 			sample2Properties.tStart +=offset;
// 			sample2Properties.tStop += offset;

			sample2Properties.cbData = sizeof(sample2Properties) - 9;
			sample2Properties.pbBuffer= dataPointer;

			IMediaSample2 *mediaSample2;
			hr = sample->QueryInterface(IID_IMediaSample2, (void**)&mediaSample2);
			if (FAILED(hr))
			{
				ErrorPrint("Get media sample2 interface error",hr);
				sample->Release();
				return 1;
			}
			ComReleaser mediaSample2Releaser(mediaSample2);

			hr = mediaSample2->SetProperties(sample2Properties.cbData, (BYTE*)&sample2Properties);
			if (FAILED(hr))
			{
				ErrorPrint("Set sample properties error");
			}
			sample->SetTime(&sample2Properties.tStart, &sample2Properties.tStop);

			sample->GetTime(&startTime,&endTime);
		}
		else
		{
			startTime = 0;
			endTime = 0;
		}

		ASSERT(packSize <= sample->GetSize());
		sample->SetActualDataLength(packSize);
		sample->SetTime(&startTime, &endTime);

		if(!receiveData(socket, (char*)dataPointer, packSize))
		{
			ErrorPrint("Receive pack errors");
			sample->Release();
			filter->waitForNewSocket();
			continue;
		}

		//通知PUSH线程进行数据传送
		WaitForSingleObject(PushDataMutex, INFINITE);
		SampleList.insert(std::make_pair(startTime, sample));
		if(filter->getPlayMode() == 0) //如果尽快播放,则只要有一个sample就通知push线程
		{
			if (SampleList.size() == 1)
			{
				ReleaseSemaphore(PushSemaphore, 1, NULL);
			}
		}
		else if (filter->getPlayMode() == 1)//如果考虑时间戳,我们则缓冲尽量多的sample,但也不能太多
		{
			if (SampleList.size() >= 24 * 10)
			{
				ReleaseSemaphore(PushSemaphore, 1, NULL);
			}
		}
		ReleaseMutex(PushDataMutex);

		outputPin->newTransSample(sample2Properties, dataPointer); //通知进行sample的转发
	}

	return 0;
}
예제 #16
0
HRESULT CMPIptvSourceStream::DoBufferProcessingLoop(void) 
{
  Command com;

  OnThreadStartPlay();

  WSADATA wsaData;
  WSAStartup(MAKEWORD(2, 2), &wsaData);

#ifdef logging
  LogDebug("Starting grabber thread");
#endif
  sockaddr_in addr;
  memset(&addr, 0, sizeof(addr));
  addr.sin_family = AF_INET;
  if (localip) {
    addr.sin_addr.s_addr = inet_addr(localip);
  } else {
    addr.sin_addr.s_addr = htonl(INADDR_ANY);
  }
  addr.sin_port = htons((u_short)port);

  ip_mreq imr; 
  imr.imr_multiaddr.s_addr = inet_addr(ip);
  if (localip) {
    imr.imr_interface.s_addr = inet_addr(localip);
  } else {
    imr.imr_interface.s_addr = INADDR_ANY;
  }
  unsigned long nonblocking = 1;

  if((m_socket = socket(AF_INET, SOCK_DGRAM, 0)) >= 0)
  {
    /*		u_long argp = 1;
    ioctlsocket(m_socket, FIONBIO, &argp);
    */
    DWORD dw = TRUE;
    int dwLen = sizeof(dw);
    if(setsockopt(m_socket, SOL_SOCKET, SO_REUSEADDR, (const char*)&dw, sizeof(dw)) < 0)
    {
      closesocket(m_socket);
      m_socket = -1;
    }

    if(setsockopt(m_socket, SOL_SOCKET, SO_BROADCAST, (const char*)&dw, sizeof(dw)) < 0)
    {
      closesocket(m_socket);
      m_socket = -1;
    }

    getsockopt(m_socket, SOL_SOCKET, SO_RCVBUF, (char *)&dw, &dwLen);
#ifdef logging
    LogDebug("Socket receive buffer is: %d (%d)", dw, dwLen);

    LogDebug("Trying to set receive buffer to %d", IPTV_SOCKET_BUFFER_SIZE);
#endif
    dw = IPTV_SOCKET_BUFFER_SIZE;
    if(setsockopt(m_socket, SOL_SOCKET, SO_RCVBUF, (const char*)&dw, sizeof(dw)) < 0)
    {
      closesocket(m_socket);
      m_socket = -1;
    }

    dwLen = sizeof(dw);
    getsockopt(m_socket, SOL_SOCKET, SO_RCVBUF, (char *)&dw, &dwLen);
#ifdef logging
    LogDebug("New socket receive buffer is: %d (%d)", dw, dwLen);
#endif
    if (ioctlsocket(m_socket, FIONBIO, &nonblocking) != 0) {
      closesocket(m_socket);
      m_socket = -1;
    }

    if(bind(m_socket, (struct sockaddr*)&addr, sizeof(addr)) < 0)
    {
      closesocket(m_socket);
      m_socket = -1;
    }

    if(IN_MULTICAST(htonl(imr.imr_multiaddr.s_addr)))
    {
      int ret = setsockopt(m_socket, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const char*)&imr, sizeof(imr));
      if(ret < 0) ret = ::WSAGetLastError();
      ret = ret;
    }
  }

  SetThreadPriority(m_hThread, THREAD_PRIORITY_TIME_CRITICAL);

  int fromlen = sizeof(addr);

  m_buffsize = 0;
  timeval tv; //Will be used for select() below
  tv.tv_sec = 0;
  tv.tv_usec = 100000; //100 msec
  do 
  {
    BOOL requestAvail;
    while ((requestAvail = CheckRequest(&com)) == FALSE) 
    {
      DWORD startRecvTime;
      startRecvTime = GetTickCount();
#ifdef FILL_DIRECTLY_INTO_BUFFER
      IMediaSample *pSample;
      char *pData;
      long cbData;

      HRESULT hr = GetDeliveryBuffer(&pSample,NULL,NULL,0);
      if (FAILED(hr))
        continue;	
      CheckPointer(pSample, E_POINTER);
    // Access the sample's data buffer
      pSample->GetPointer((BYTE **)&pData);
      cbData = pSample->GetSize();
#endif
      do 
      {
        //Try to read the complete remaining buffer size
        //But stop reading after 100ms have passed (slow streams like internet radio)
#ifdef FILL_DIRECTLY_INTO_BUFFER
        int len = recvfrom(m_socket, &pData[m_buffsize], cbData - m_buffsize, 0, (SOCKADDR*)&addr, &fromlen);
#else
        int len = recvfrom(m_socket, &m_buffer[m_buffsize], IPTV_BUFFER_SIZE - m_buffsize, 0, (SOCKADDR*)&addr, &fromlen);
#endif
        if(len <= 0)
        {
          //Wait until there's something in the receive buffer
          fd_set myFDsocket;
          myFDsocket.fd_count = 1;
          myFDsocket.fd_array[0] = m_socket;
          int selectRet = select(0, &myFDsocket, NULL, NULL, &tv);
#ifdef logging
          LogDebug("select return code: %d", selectRet);
#endif
          continue; //On error or nothing read just repeat the loop
        }
#ifdef logging
        LogDebug("Read %d bytes at pos %d of %d", len, m_buffsize, IPTV_BUFFER_SIZE); 
#endif
        m_buffsize += len;
#ifdef FILL_DIRECTLY_INTO_BUFFER
      } while ((requestAvail = CheckRequest(&com)) == FALSE && m_buffsize < (cbData           * 3 / 4) && abs((signed long)(GetTickCount() - startRecvTime)) < 100);
#else
      } while ((requestAvail = CheckRequest(&com)) == FALSE && m_buffsize < (IPTV_BUFFER_SIZE * 3 / 4) && abs((signed long)(GetTickCount() - startRecvTime)) < 100);
#endif
      if (requestAvail) break;
#ifndef FILL_DIRECTLY_INTO_BUFFER
      if (m_buffsize == 0) continue; //100ms passed but no buffer received
      IMediaSample *pSample;
      HRESULT hr = GetDeliveryBuffer(&pSample,NULL,NULL,0);
      if (FAILED(hr))
      {
        continue;	
        // go round again. Perhaps the error will go away
        // or the allocator is decommited & we will be asked to
        // exit soon.
      }
#endif
      // fill buffer
      hr = FillBuffer(pSample);

      if (hr == S_OK) 
      {
        hr = Deliver(pSample);
        pSample->Release();

        // downstream filter returns S_FALSE if it wants us to
        // stop or an error if it's reporting an error.
        if(hr != S_OK)
        {
#ifdef logging
          LogDebug("Deliver() returned %08x; stopping", hr);
#endif
          if(m_socket >= 0) {closesocket(m_socket); m_socket = -1;}
          WSACleanup();
          return S_OK;
        }

      } else if (hr == S_FALSE) {
        // derived class wants us to stop pushing data
        pSample->Release();
        DeliverEndOfStream();
        if(m_socket >= 0) {closesocket(m_socket); m_socket = -1;}
        WSACleanup();
        return S_OK;
      } else {
        // derived class encountered an error
        pSample->Release();
#ifdef logging
        LogDebug("Error %08lX from FillBuffer!!!", hr);
#endif
        DeliverEndOfStream();
        m_pFilter->NotifyEvent(EC_ERRORABORT, hr, 0);
        if(m_socket >= 0) {closesocket(m_socket); m_socket = -1;}
        WSACleanup();
        return hr;
      }

      // all paths release the sample
    }

    // For all commands sent to us there must be a Reply call!

    if (com == CMD_RUN || com == CMD_PAUSE) {
      Reply(NOERROR);
    } else if (com != CMD_STOP) {
      Reply((DWORD) E_UNEXPECTED);
#ifdef logging
      LogDebug("Unexpected command %d!!!", com);
#endif
    }
  } while (com != CMD_STOP);
예제 #17
0
HRESULT 
MediaChunk::Write(Atom* patm)
{
    // record chunk start position
    LONGLONG posChunk = patm->Position() + patm->Length();

	if (m_bOldIndexFormat)
	{
		long cBytes = 0;

		// ensure that we don't break in the middle of a sample (Maxim Kartavenkov)
		const int MAX_PCM_SIZE = 22050;
		int max_bytes = MAX_PCM_SIZE - (MAX_PCM_SIZE % m_pTrack->Handler()->BlockAlign());

		list<IMediaSample*>::iterator it = m_Samples.begin();
		long cAvail = 0;
		BYTE* pBuffer = NULL;

		for (;;)
		{
			if (!cAvail)
			{
				if (it == m_Samples.end())
				{
					break;
				}
				IMediaSample* pSample = *it++;
				pSample->GetPointer(&pBuffer);
				cAvail = pSample->GetActualDataLength();
				REFERENCE_TIME tStart, tStop;
				if (SUCCEEDED(pSample->GetTime(&tStart, &tStop)))
				{
					m_pTrack->SetOldIndexStart(tStart);
				}
			}
			long cThis = max_bytes - cBytes;
			if (cThis > cAvail)
			{
				cThis = cAvail;
			}
			
			int cActual = 0;
			m_pTrack->Handler()->WriteData(patm, pBuffer, cThis, &cActual);
			cBytes += cActual;
			cAvail -= cActual;
			pBuffer += cActual;

			if (cBytes >= max_bytes)
			{
				m_pTrack->OldIndex(posChunk, cBytes);
				posChunk = patm->Position() + patm->Length();				
				cBytes = 0;
			}
		}
		if (cBytes)
		{
			m_pTrack->OldIndex(posChunk, cBytes);
		}
		return S_OK;
	}

    // Remember that large H264 samples may be broken 
    // across several buffers, with Sync flag at start and
    // time on last buffer.
    bool bSync = false;
    long cBytes = 0;
	long nSamples = 0;

    // loop once through the samples writing the data
    list<IMediaSample*>::iterator it;
    for (it = m_Samples.begin(); it != m_Samples.end(); it++)
    {
        IMediaSample* pSample = *it;

        // record positive sync flag, but for
        // multiple-buffer samples, only one sync flag will be present
        // so don't overwrite with later negatives.
        if (pSample->IsSyncPoint() == S_OK)
        {
            bSync = true;
        }

		// write payload, including any transformation (eg BSF to length-prepended)
        BYTE* pBuffer;
        pSample->GetPointer(&pBuffer);
		int cActual = 0;
		m_pTrack->Handler()->WriteData(patm, pBuffer, pSample->GetActualDataLength(), &cActual);
		cBytes += cActual;
        REFERENCE_TIME tStart, tEnd;
        HRESULT hr = pSample->GetTime(&tStart, &tEnd);
        if (SUCCEEDED(hr))
        {
			// this is the last buffer in the sample
			m_pTrack->IndexSample(bSync, tStart, tEnd, cBytes);
            // reset for new sample
            bSync = false;
			cBytes = 0;
			nSamples++;
        }
    }

    // add chunk position to index
	m_pTrack->IndexChunk(posChunk, nSamples);

    return S_OK;
}
예제 #18
0
static MSWindows::DWORD WINAPI RunningThreadProc(
  LPVOID lpParameter   // thread data
)
{
//	ErrorCode hr;

	CMNGDecoder::OutputPin* p = (CMNGDecoder::OutputPin*)lpParameter;

	IMediaSample* sample;
	p->m_pAllocator->GetBuffer(0, &sample);
	if (sample)
	{
		LSampleData sampledata;
		sample->LockBits(&sampledata);

		ZLIBDecoder* decoder = new ZLIBDecoder(p->GetFilter());

	//	decoder->start();
	//	decoder->start_block();

		unsigned int pixelBytes;
		if (p->GetFilter()->m_ihdr.ColorType == 0)	// grayscale
		{
			pixelBytes = 1;
		}
		else if (p->GetFilter()->m_ihdr.ColorType == 2)	// rgb
		{
			pixelBytes = 3;
		}
		else if (p->GetFilter()->m_ihdr.ColorType == 3)	// indexed
		{
			pixelBytes = 1;
		}
		else if (p->GetFilter()->m_ihdr.ColorType == 4)	// grayscale with alpha
		{
			pixelBytes = 2;
		}
		else if (p->GetFilter()->m_ihdr.ColorType == 6)	// rgb with alpha
		{
			pixelBytes = 4;
		}
		else
			ASSERT(0);

		unsigned int scanlineBytes = pixelBytes * p->GetFilter()->m_ihdr.Width;

		// We use two buffers for two scanlines, and we reserve a pixel to left of the start of the scanline
		uint8* scanlineBuffer[2];
		scanlineBuffer[0] = new uint8[scanlineBytes+pixelBytes];
		scanlineBuffer[1] = new uint8[scanlineBytes+pixelBytes];
		// clear the 'previous' scanline to zero
		std::memset(scanlineBuffer[1], 0, scanlineBytes+pixelBytes);
		std::memset(scanlineBuffer[0], 0, pixelBytes);	// clear the pixel before the start of the scanline to zero

		uint8* scanlinePtr[2];
		scanlinePtr[0] = scanlineBuffer[0]+pixelBytes;
		scanlinePtr[1] = scanlineBuffer[1]+pixelBytes;

		unsigned int width = p->GetFilter()->m_ihdr.Width;
		unsigned int height = p->GetFilter()->m_ihdr.Height;
		unsigned int curscan = 0;

		for (unsigned int row = 0; row < height; row++)
		{
			uint8* dest = sampledata.idata + sampledata.rowbytes*row;

			// filter byte
			uint8 filterType;
			decoder->Read(&filterType, 1);

			decoder->Read(scanlinePtr[curscan], scanlineBytes);

			switch (filterType)
			{
			case 0:	// None
				// Do nothing
				break;

			case 1:	// Sub
				{
					uint8* curx = scanlinePtr[curscan];
					uint8* recona = curx - pixelBytes;
					unsigned int x;
					switch (pixelBytes)
					{
					case 4:
						for (x = 0; x < width; x++)
						{
							*curx++ += *recona++;
							*curx++ += *recona++;
							*curx++ += *recona++;
							*curx++ += *recona++;
						}
						break;

					case 3:
						for (x = 0; x < width; x++)
						{
							*curx++ += *recona++;
							*curx++ += *recona++;
							*curx++ += *recona++;
						}
						break;

					case 2:
						for (x = 0; x < width; x++)
						{
							*curx++ += *recona++;
							*curx++ += *recona++;
						}
						break;

					case 1:
						for (x = 0; x < width; x++)
						{
							*curx++ += *recona++;
						}
						break;
					}
				}
				break;

			case 2:	// Up
				{
					uint8* curx = scanlinePtr[curscan];
					uint8* reconb = scanlinePtr[!curscan];
					unsigned int x;
					switch (pixelBytes)
					{
					case 4:
						for (x = 0; x < width; x++)
						{
							*curx++ += *reconb++;
							*curx++ += *reconb++;
							*curx++ += *reconb++;
							*curx++ += *reconb++;
						}
						break;

					case 3:
						for (x = 0; x < width; x++)
						{
							*curx++ += *reconb++;
							*curx++ += *reconb++;
							*curx++ += *reconb++;
						}
						break;

					case 2:
						for (x = 0; x < width; x++)
						{
							*curx++ += *reconb++;
							*curx++ += *reconb++;
						}
						break;

					case 1:
						for (x = 0; x < width; x++)
						{
							*curx++ += *reconb++;
						}
						break;
					}
				}
				break;

			case 3:	// Average
				{
					uint8* curx = scanlinePtr[curscan];
					uint8* recona = curx - pixelBytes;
					uint8* reconb = scanlinePtr[!curscan];
					unsigned int x;
					switch (pixelBytes)
					{
					case 4:
						for (x = 0; x < width; x++)
						{
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
						}
						break;

					case 3:
						for (x = 0; x < width; x++)
						{
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
						}
						break;

					case 2:
						for (x = 0; x < width; x++)
						{
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
						}
						break;

					case 1:
						for (x = 0; x < width; x++)
						{
							*curx++ += ((int)*recona++ + (int)*reconb++) / 2;
						}
						break;
					}
				}
				break;

			case 4:	// Paeth
				{
					uint8* curx = scanlinePtr[curscan];
					uint8* recona = curx - pixelBytes;
					uint8* reconb = scanlinePtr[!curscan];
					uint8* reconc = reconb - pixelBytes;
					for (unsigned int x = 0; x < width*pixelBytes; x++)
					{
						*curx++ += Imaging::PaethPredictor(*recona++, *reconb++, *reconc++);
					}
				}
				break;

			default:
				ASSERT(0);
			}

			uint8* src = scanlinePtr[curscan];

			if (p->GetFilter()->m_ihdr.ColorType == 2)	// rgb
			{
				for (unsigned int x = 0; x < width; x++)
				{
					dest[0] = src[2];
					dest[1] = src[1];
					dest[2] = src[0];

					dest += 3;
					src += 3;
				}
			}
			else if (p->GetFilter()->m_ihdr.ColorType == 3)	// indexed color
			{
				for (unsigned int x = 0; x < width; x++)
				{
					dest[0] = p->GetFilter()->m_paletteEntry[*src].b;
					dest[1] = p->GetFilter()->m_paletteEntry[*src].g;
					dest[2] = p->GetFilter()->m_paletteEntry[*src].r;

					dest += 3;
					src += 1;
				}
			}
			else if (p->GetFilter()->m_ihdr.ColorType == 6)	// rgb+alpha
			{
				for (unsigned int x = 0; x < width; x++)
				{
					// premultiply
					dest[0] = src[2] * (int)src[3] / 255;
					dest[1] = src[1] * (int)src[3] / 255;
					dest[2] = src[0] * (int)src[3] / 255;
					dest[3] = src[3];
					dest += 4;
					src += 4;
				}
			}

			curscan = !curscan;	// swap between the two scanline buffers
		}

		delete [] scanlineBuffer[0];
		delete [] scanlineBuffer[1];

		sample->UnlockBits();

		p->m_pInputPin->Receive(sample);
	}

	p->ConnectedTo()->EndOfStream();

	return 0;
}
예제 #19
0
HRESULT CBufferFilter::Receive(IMediaSample* pSample)
{
	/*  Check for other streams and pass them on */
	AM_SAMPLE2_PROPERTIES* const pProps = m_pInput->SampleProps();
	if(pProps->dwStreamId != AM_STREAM_MEDIA)
		return m_pOutput->Deliver(pSample);

	HRESULT hr;
	ASSERT(pSample);
	IMediaSample* pOutSample;

	ASSERT(m_pOutput != NULL);

	// Set up the output sample
	hr = InitializeOutputSample(pSample, &pOutSample);

	if(FAILED(hr))
		return hr;

	// Start timing the transform (if PERF is defined)
	MSR_START(m_idTransform);

	// have the derived class transform the data

	hr = Transform(pSample, pOutSample);

	// Stop the clock and log it (if PERF is defined)
	MSR_STOP(m_idTransform);

	if(FAILED(hr)) {
		DbgLog((LOG_TRACE,1,TEXT("Error from transform")));
	}
	else {
		// the Transform() function can return S_FALSE to indicate that the
		// sample should not be delivered; we only deliver the sample if it's
		// really S_OK (same as NOERROR, of course.)
		if(hr == NOERROR) {
			hr = m_pOutput->Deliver(pOutSample);
			m_bSampleSkipped = FALSE;   // last thing no longer dropped
		}
		else {
			// S_FALSE returned from Transform is a PRIVATE agreement
			// We should return NOERROR from Receive() in this cause because returning S_FALSE
			// from Receive() means that this is the end of the stream and no more data should
			// be sent.
			if(S_FALSE == hr) {

				//  Release the sample before calling notify to avoid
				//  deadlocks if the sample holds a lock on the system
				//  such as DirectDraw buffers do
				pOutSample->Release();
				m_bSampleSkipped = TRUE;
				if(!m_bQualityChanged) {
					NotifyEvent(EC_QUALITY_CHANGE,0,0);
					m_bQualityChanged = TRUE;
				}
				return NOERROR;
			}
		}
	}

	// release the output buffer. If the connected pin still needs it,
	// it will have addrefed it itself.
	pOutSample->Release();

	return hr;
}
예제 #20
0
HRESULT CWavPackDSSplitterInputPin::DeliverOneFrame(WavPack_parser* wpp)
{
    IMediaSample *pSample;
    BYTE *Buffer = NULL;
    HRESULT hr;
    unsigned long FrameLenBytes = 0, FrameLenSamples = 0, FrameIndex = 0;

    // Get a new media sample
    hr = m_pParentFilter->m_pOutputPin->GetDeliveryBuffer(&pSample, NULL, NULL, 0); 
    if (FAILED(hr))
    {
        DebugLog("CWavPackDSSplitterInputPin::DoProcessingLoop GetDeliveryBuffer failed 0x%08X",hr);
        return hr;
    }
    
    hr = pSample->GetPointer(&Buffer);
    if (FAILED(hr))
    {
        DebugLog("CWavPackDSSplitterInputPin::DoProcessingLoop GetPointer failed 0x%08X",hr);
        pSample->Release();
        return hr;
    }
    
    FrameLenBytes = wavpack_parser_read_frame(wpp, Buffer,
        &FrameIndex, &FrameLenSamples);
    if(!FrameLenBytes)
    {
        // Something bad happened, let's end here
        pSample->Release();
        m_pParentFilter->m_pOutputPin->DeliverEndOfStream();
        // TODO : check if we need to stop the thread
        DebugLog("CWavPackDSSplitterInputPin::DoProcessingLoop wavpack_parser_read_frame error");
        return hr;
    }
    pSample->SetActualDataLength(FrameLenBytes);
    
    if(wpp->is_correction == TRUE)
    {    
        IMediaSample2 *pSample2;
        if (SUCCEEDED(pSample->QueryInterface(IID_IMediaSample2, (void **)&pSample2)))
        {
            AM_SAMPLE2_PROPERTIES ams2p;
            ZeroMemory(&ams2p, sizeof(AM_SAMPLE2_PROPERTIES));
            hr = pSample2->GetProperties(sizeof(AM_SAMPLE2_PROPERTIES), (PBYTE)&ams2p);
            if(SUCCEEDED(hr))
            {            
                ams2p.dwStreamId = AM_STREAM_BLOCK_ADDITIONNAL;
                pSample2->SetProperties(sizeof(AM_SAMPLE2_PROPERTIES), (PBYTE)&ams2p);
            }
            pSample2->Release();
            pSample2 = NULL;
        }
    }
    
    REFERENCE_TIME rtStart, rtStop;
    rtStart = FrameIndex;
    rtStop = rtStart + FrameLenSamples;
    rtStart = (rtStart * 10000000) / wpp->sample_rate;
    rtStop = (rtStop * 10000000) / wpp->sample_rate;
    
    rtStart -= m_pParentFilter->m_rtStart;
    rtStop  -= m_pParentFilter->m_rtStart;
    
    pSample->SetTime(&rtStart, &rtStop);
    pSample->SetPreroll(FALSE);
    pSample->SetDiscontinuity(m_bDiscontinuity);
    if(m_bDiscontinuity)
    {
        m_bDiscontinuity = FALSE;
    }
    pSample->SetSyncPoint(TRUE);
    
    // Deliver the sample
    hr = m_pParentFilter->m_pOutputPin->Deliver(pSample);
    pSample->Release();
    pSample = NULL;
    if (FAILED(hr))
    {
        DebugLog("CWavPackDSSplitterInputPin::DoProcessingLoop Deliver failed 0x%08X",hr);
        return hr;
    }

    return S_OK;
}
예제 #21
0
// the loop executed while running
HRESULT FCapturePin::DoBufferProcessingLoop(void) 
{
	Command com;

	OnThreadStartPlay();
	int32 LastFrame = -1;

	do 
	{
		while (!CheckRequest(&com)) 
		{
			// Wait for the next frame from the game thread 
			if ( !GCaptureSyncEvent->Wait(1000) )
			{
				FPlatformProcess::Sleep( 0.01f );
				continue;	// Reevaluate request
			}

			IMediaSample *pSample;
			int32 FrameNumber = FAVIWriter::GetInstance()->GetFrameNumber();
			if (FrameNumber > LastFrame)
			{
				UE_LOG(LogMovieCapture, Log, TEXT(" FrameNumber > LastFrame = %d > %d"), FrameNumber, LastFrame);
				HRESULT hr = GetDeliveryBuffer(&pSample,NULL,NULL,0);
				if (FAILED(hr)) 
				{
					if (pSample)
					{
						pSample->Release();
					}
				}
				else
				{
					LastFrame = FrameNumber;
					hr = FillBuffer(pSample);

					if (hr == S_OK) 
					{
						hr = Deliver(pSample);
						pSample->Release();
						// downstream filter returns S_FALSE if it wants us to
						// stop or an error if it's reporting an error.
						if(hr != S_OK)
						{
							UE_LOG(LogMovieCapture, Log, TEXT("Deliver() returned %08x; stopping"), hr);
							return S_OK;
						}
					}
				}
			}
			// Allow the game thread read more data
			GCaptureSyncEvent->Trigger();
		}

		// For all commands sent to us there must be a Reply call!
		if (com == CMD_RUN || com == CMD_PAUSE) 
		{
			Reply(NOERROR);
		} 
		else if (com != CMD_STOP) 
		{
			Reply((uint32) E_UNEXPECTED);
		}
	} while (com != CMD_STOP);

	return S_FALSE;
}
예제 #22
0
파일: pullpin.cpp 프로젝트: hiplayer/mpc_hc
void
CPullPin::Process(void)
{
    // is there anything to do?
    if (m_tStop <= m_tStart) {
	EndOfStream();
	return;
    }

    BOOL bDiscontinuity = TRUE;

    // if there is more than one sample at the allocator,
    // then try to queue 2 at once in order to overlap.
    // -- get buffer count and required alignment
    ALLOCATOR_PROPERTIES Actual;
    HRESULT hr = m_pAlloc->GetProperties(&Actual);

    // align the start position downwards
    REFERENCE_TIME tStart = AlignDown(m_tStart / UNITS, Actual.cbAlign) * UNITS;
    REFERENCE_TIME tCurrent = tStart;

    REFERENCE_TIME tStop = m_tStop;
    if (tStop > m_tDuration) {
	tStop = m_tDuration;
    }

    // align the stop position - may be past stop, but that
    // doesn't matter
    REFERENCE_TIME tAlignStop = AlignUp(tStop / UNITS, Actual.cbAlign) * UNITS;


    DWORD dwRequest;

    if (!m_bSync) {

	//  Break out of the loop either if we get to the end or we're asked
	//  to do something else
	while (tCurrent < tAlignStop) {

	    // Break out without calling EndOfStream if we're asked to
	    // do something different
	    if (CheckRequest(&dwRequest)) {
		return;
	    }

	    // queue a first sample
	    if (Actual.cBuffers > 1) {

		hr = QueueSample(tCurrent, tAlignStop, TRUE);
		bDiscontinuity = FALSE;

		if (FAILED(hr)) {
		    return;
		}
	    }



	    // loop queueing second and waiting for first..
	    while (tCurrent < tAlignStop) {

		hr = QueueSample(tCurrent, tAlignStop, bDiscontinuity);
		bDiscontinuity = FALSE;

		if (FAILED(hr)) {
		    return;
		}

		hr = CollectAndDeliver(tStart, tStop);
		if (S_OK != hr) {

		    // stop if error, or if downstream filter said
		    // to stop.
		    return;
		}
	    }

	    if (Actual.cBuffers > 1) {
		hr = CollectAndDeliver(tStart, tStop);
		if (FAILED(hr)) {
		    return;
		}
	    }
	}
    } else {

	// sync version of above loop
	while (tCurrent < tAlignStop) {

	    // Break out without calling EndOfStream if we're asked to
	    // do something different
	    if (CheckRequest(&dwRequest)) {
		return;
	    }

	    IMediaSample* pSample;

	    hr = m_pAlloc->GetBuffer(&pSample, NULL, NULL, 0);
	    if (FAILED(hr)) {
		OnError(hr);
		return;
	    }

	    LONGLONG tStopThis = tCurrent + (pSample->GetSize() * UNITS);
	    if (tStopThis > tAlignStop) {
		tStopThis = tAlignStop;
	    }
	    pSample->SetTime(&tCurrent, &tStopThis);
	    tCurrent = tStopThis;

	    if (bDiscontinuity) {
		pSample->SetDiscontinuity(TRUE);
		bDiscontinuity = FALSE;
	    }

	    hr = m_pReader->SyncReadAligned(pSample);

	    if (FAILED(hr)) {
		pSample->Release();
		OnError(hr);
		return;
	    }

	    hr = DeliverSample(pSample, tStart, tStop);
	    if (hr != S_OK) {
		if (FAILED(hr)) {
		    OnError(hr);
		}
		return;
	    }
	}
    }

    EndOfStream();
}
예제 #23
0
DWORD WINAPI UdpReceiveThread(LPVOID param)
{
	HRESULT hr;
	ReceiveParam *receiveParam = (ReceiveParam*)param;
	HANDLE PushSemaphore = receiveParam->PushSemaphore;
	HANDLE PushDataMutex = receiveParam->PushDataMutex;
	std::map<REFERENCE_TIME,IMediaSample*>& SampleList = *receiveParam->SampleList;
	NetReceiveFilter* filter = receiveParam->filter;
	delete receiveParam;

	NetReceiveOutputPin* outputPin = reinterpret_cast<NetReceiveOutputPin*>(filter->GetPin(0));
	assert(outputPin != NULL);

	AM_MEDIA_TYPE mediaType;
	while (true)
	{
		outputPin->ConnectionMediaType(&mediaType);
		if (mediaType.majortype == GUID_NULL)
		{
			Sleep(300);
		}
		else
			break;
	}

	SOCKET udpSocket;
	udpSocket = ::socket(AF_INET, SOCK_DGRAM, 0);
	if (udpSocket == INVALID_SOCKET)
	{
		ErrorPrint("Create udp socket error");
		return 1;
	}

	sockaddr_in bindAddress;
	bindAddress.sin_family = AF_INET;
	bindAddress.sin_addr.s_addr = htonl(INADDR_ANY);
	if(mediaType.majortype == MEDIATYPE_Video)
	{
		bindAddress.sin_port = htons(VideoBroadcastPort);
	}
	else
	{
		bindAddress.sin_port = htons(AudioBroadcastPort);
	}

	int option = 1;
	int ret = setsockopt(udpSocket, SOL_SOCKET, SO_REUSEADDR, (char*)&option, sizeof(option));
	if (ret == SOCKET_ERROR)
	{
		ErrorPrint("Set socket reuse address error");
		return 1;
	}

	int recvSystemBufferSize = 1024 * 1024 * 10;

	ret = setsockopt(udpSocket, SOL_SOCKET, SO_RCVBUF, (char*)&recvSystemBufferSize, sizeof(recvSystemBufferSize));
	if (ret == SOCKET_ERROR)
	{
		ErrorPrint("Set socket receive system buffer size error");
	}

	ret = ::bind(udpSocket, (sockaddr*)&bindAddress, sizeof(bindAddress));
	if(ret == SOCKET_ERROR)
	{
		ErrorPrint("Bind udp receive socket error");
		return 1;
	}

	sockaddr_in fromAddress;
	fromAddress.sin_family = AF_INET;
	int addressLen = sizeof(fromAddress);

	std::map<long long, IMediaSample*> idToSampleMap;

	const int packetMaxSize = 10 * 1024;
	MediaPacketHeader* mediaPacketHeader = (MediaPacketHeader*)new char[sizeof(MediaPacketHeader) + packetMaxSize];
	boost::scoped_array<char> bufferContainer((char*)mediaPacketHeader);
	char* dataStart = (char*)mediaPacketHeader;
	char* dataBuffer = (char*)mediaPacketHeader + sizeof(MediaPacketHeader);
	while (true)
	{
		int recvedSize = recvfrom(udpSocket, dataStart, sizeof(MediaPacketHeader) + packetMaxSize, 0, (sockaddr*)&fromAddress, &addressLen);
		if (recvedSize == SOCKET_ERROR)
		{
			ErrorPrint("Receive from udp error");
			return 1;
		}

		if (g_IsBroadcasting) //是自己广播的数据包,丢弃之
		{
			continue;
		}

		if (mediaPacketHeader->type == 0) // 是sample头
		{
#ifdef UDP_PRINT
			std::cout<<"Receive media packet header:"<<mediaPacketHeader->id<<std::endl;
#endif
			std::map<long long, IMediaSample*>::iterator it = idToSampleMap.begin();
			while (it != idToSampleMap.end()) //处理发生过丢包的sample
			{
				std::map<long long, IMediaSample*>::iterator tmp = it++;
				if (tmp->first < mediaPacketHeader->id) //这个sample肯定丢包了,序列号比后来的小,并且没有接受完整,直接丢弃掉
				{
					std::cout<<"Lose packet:"<<mediaPacketHeader->id<<std::endl;
					tmp->second->Release(); //一定要把sample给释放掉
					idToSampleMap.erase(tmp);
				}
				else //将所有要丢弃的包都处理完了
					break;
			}

// 			if (mediaType.majortype == MEDIATYPE_Video)
// 			{
// 				std::cout<<"Video header:"<<mediaPacketHeader->id<<std::endl;
// 			}

//			std::cout<<"Before get free sample"<<std::endl;
			IMediaSample *sample = filter->GetFreeSample(); //此时为这个sample头申请一个新的sample
//			std::cout<<"After get free sample"<<std::endl;
			if (sample == NULL)
			{
				ErrorPrint("Get free sample error");
				return 1;
			}

			AM_SAMPLE2_PROPERTIES* sample2Properties = (AM_SAMPLE2_PROPERTIES*)dataBuffer;

			sample2Properties->cbData = sizeof(AM_SAMPLE2_PROPERTIES) - 9;

			IMediaSample2 *mediaSample2;
			hr = sample->QueryInterface(IID_IMediaSample2, (void**)&mediaSample2);
			if (FAILED(hr))
			{
				ErrorPrint("Get media sample2 interface error",hr);
				sample->Release();
				return 1;
			}
			ComReleaser mediaSample2Releaser(mediaSample2);

			hr = mediaSample2->SetProperties(sample2Properties->cbData, (BYTE*)sample2Properties);//设置sample属性
			if (FAILED(hr))
			{
				ErrorPrint("Set sample properties error");
			}
			sample->SetTime(&(sample2Properties->tStart), &(sample2Properties->tStop));
			sample->SetActualDataLength(sample2Properties->lActual);

			idToSampleMap.insert(std::make_pair(mediaPacketHeader->id, sample)); //插入到map当中,等待所有的sample数据接受完
		}
		else if (mediaPacketHeader->type == 1) //是sample数据
		{
#ifdef UDP_PRINT
			std::cout<<"Receive sample data:"<<mediaPacketHeader->id<<std::endl;
#endif
			std::map<long long, IMediaSample*>::iterator it = idToSampleMap.find(mediaPacketHeader->id);
			if (it != idToSampleMap.end()) //如果id找不到,sample头丢失了,或者已经过期了,直接将该包丢弃
			{
				IMediaSample* sample = it->second;
				PBYTE dataPointer = NULL;
				hr = sample->GetPointer(&dataPointer);
				if (FAILED(hr))
				{
					ErrorPrint("Get data pointer error",hr);
					idToSampleMap.erase(it);
					sample->Release();
					continue;
				}
				memcpy(dataPointer + mediaPacketHeader->offset, dataBuffer, mediaPacketHeader->size);
				if ( (mediaPacketHeader->offset + mediaPacketHeader->size) == sample->GetActualDataLength()) //已经接收完整了,当然也有可能中间数据丢包了,但现在不管这种情况
				{
					idToSampleMap.erase(it);
					REFERENCE_TIME startTime,endTime;
					sample->GetTime(&startTime,&endTime);
					//通知PUSH线程进行数据传送
					WaitForSingleObject(PushDataMutex, INFINITE);
// 					if (mediaType.majortype == MEDIATYPE_Video)
// 					{
// 						std::cout<<"Finished Video sample:"<<mediaPacketHeader->id<<";Current Thread:"<<GetCurrentThreadId()<<";Map size:"<<idToSampleMap.size()<<std::endl;
// 						std::cout<<"Sample start time:"<<startTime <<";Sample end time:"<<endTime<<std::endl;
// 					}
					SampleList.insert(std::make_pair(startTime,sample));
					if (SampleList.size() >= 24 * 10)
					{
						ReleaseSemaphore(PushSemaphore, 1, NULL);
					}
					ReleaseMutex(PushDataMutex);
				}
			}
			else
				std::cout<<"Lose packet header:"<<mediaPacketHeader->id<<std::endl;
		}

// 		if(idToSampleMap.size() == 0 ||  idToSampleMap.begin()->first < )
// 
// 		mediaPacketHeader
// 

	}
}
예제 #24
0
파일: source.cpp 프로젝트: 9crk/EasyClient
//
// DoBufferProcessingLoop
//
// Grabs a buffer and calls the users processing function.
// Overridable, so that different delivery styles can be catered for.
HRESULT CSourceStream::DoBufferProcessingLoop(void) {

    Command com;

    OnThreadStartPlay();

    do {
	while (!CheckRequest(&com)) {

	    IMediaSample *pSample;

	    HRESULT hr = GetDeliveryBuffer(&pSample,NULL,NULL,0);
	    if (FAILED(hr)) {
                Sleep(1);
		continue;	// go round again. Perhaps the error will go away
			    // or the allocator is decommited & we will be asked to
			    // exit soon.
	    }

	    // Virtual function user will override.
	    hr = FillBuffer(pSample);

	    if (hr == S_OK) {
		hr = Deliver(pSample);
                pSample->Release();

                // downstream filter returns S_FALSE if it wants us to
                // stop or an error if it's reporting an error.
                if(hr != S_OK)
                {
                  DbgLog((LOG_TRACE, 2, TEXT("Deliver() returned %08x; stopping"), hr));
                  return S_OK;
                }

	    } else if (hr == S_FALSE) {
                // derived class wants us to stop pushing data
		pSample->Release();
		DeliverEndOfStream();
		return S_OK;
	    } else {
                // derived class encountered an error
                pSample->Release();
		DbgLog((LOG_ERROR, 1, TEXT("Error %08lX from FillBuffer!!!"), hr));
                DeliverEndOfStream();
                m_pFilter->NotifyEvent(EC_ERRORABORT, hr, 0);
                return hr;
	    }

            // all paths release the sample
	}

        // For all commands sent to us there must be a Reply call!

	if (com == CMD_RUN || com == CMD_PAUSE) {
	    Reply(NOERROR);
	} else if (com != CMD_STOP) {
	    Reply((DWORD) E_UNEXPECTED);
	    DbgLog((LOG_ERROR, 1, TEXT("Unexpected command!!!")));
	}
    } while (com != CMD_STOP);

    return S_FALSE;
}
예제 #25
0
HRESULT CLAVAudio::DeliverBitstream(AVCodecID codec, const BYTE *buffer, DWORD dwSize, DWORD dwFrameSize, REFERENCE_TIME rtStartInput, REFERENCE_TIME rtStopInput)
{
  HRESULT hr = S_OK;

  CMediaType mt = CreateBitstreamMediaType(codec, m_bsParser.m_dwSampleRate);
  WAVEFORMATEX* wfe = (WAVEFORMATEX*)mt.Format();

  if(FAILED(hr = ReconnectOutput(dwSize, mt))) {
    return hr;
  }

  IMediaSample *pOut;
  BYTE *pDataOut = NULL;
  if(FAILED(GetDeliveryBuffer(&pOut, &pDataOut))) {
    return E_FAIL;
  }

  REFERENCE_TIME rtStart = m_rtStart, rtStop = AV_NOPTS_VALUE;
  // TrueHD timings
  // Since the SPDIF muxer takes 24 frames and puts them into one IEC61937 frame, we use the cached timestamp from before.
  if (codec == AV_CODEC_ID_TRUEHD) {
    // long-term cache is valid
    if (m_rtBitstreamCache != AV_NOPTS_VALUE)
      rtStart = m_rtBitstreamCache;
    // Duration - stop time of the current frame is valid
    if (rtStopInput != AV_NOPTS_VALUE)
      rtStop = rtStopInput;
    else // no actual time of the current frame, use typical TrueHD frame size, 24 * 0.83333ms
      rtStop = rtStart + (REFERENCE_TIME)(200000 / m_dRate);
    m_rtStart = rtStop;
  } else {
    double dDuration = DBL_SECOND_MULT * (double)m_bsParser.m_dwSamples / m_bsParser.m_dwSampleRate / m_dRate;
    m_dStartOffset += fmod(dDuration, 1.0);

    // Add rounded duration to rtStop
    rtStop = rtStart + (REFERENCE_TIME)(dDuration + 0.5);
    // and unrounded to m_rtStart..
    m_rtStart += (REFERENCE_TIME)dDuration;
    // and accumulate error..
    if (m_dStartOffset > 0.5) {
      m_rtStart++;
      m_dStartOffset -= 1.0;
    }
  }

  REFERENCE_TIME rtJitter = rtStart - m_rtBitstreamCache;
  m_faJitter.Sample(rtJitter);

  REFERENCE_TIME rtJitterMin = m_faJitter.AbsMinimum();
  if (m_settings.AutoAVSync && abs(rtJitterMin) > m_JitterLimit && m_bHasVideo) {
    DbgLog((LOG_TRACE, 10, L"::Deliver(): corrected A/V sync by %I64d", rtJitterMin));
    m_rtStart -= rtJitterMin;
    m_faJitter.OffsetValues(-rtJitterMin);
    m_bDiscontinuity = TRUE;
  }

#ifdef DEBUG
  DbgLog((LOG_CUSTOM5, 20, L"Bitstream Delivery, rtStart(calc): %I64d, rtStart(input): %I64d, duration: %I64d, diff: %I64d", rtStart, m_rtBitstreamCache, rtStop-rtStart, rtJitter));

  if (m_faJitter.CurrentSample() == 0) {
    DbgLog((LOG_TRACE, 20, L"Jitter Stats: min: %I64d - max: %I64d - avg: %I64d", rtJitterMin, m_faJitter.AbsMaximum(), m_faJitter.Average()));
  }
#endif
  m_rtBitstreamCache = AV_NOPTS_VALUE;

  if(m_settings.AudioDelayEnabled) {
    REFERENCE_TIME rtDelay = (REFERENCE_TIME)((m_settings.AudioDelay * 10000i64) / m_dRate);
    rtStart += rtDelay;
    rtStop += rtDelay;
  }

  pOut->SetTime(&rtStart, &rtStop);
  pOut->SetMediaTime(NULL, NULL);

  pOut->SetPreroll(FALSE);
  pOut->SetDiscontinuity(m_bDiscontinuity);
  m_bDiscontinuity = FALSE;
  pOut->SetSyncPoint(TRUE);

  pOut->SetActualDataLength(dwSize);

  memcpy(pDataOut, buffer, dwSize);

  if(hr == S_OK) {
    hr = m_pOutput->GetConnected()->QueryAccept(&mt);
    if (hr == S_FALSE && m_nCodecId == AV_CODEC_ID_DTS && m_bDTSHD) {
      DbgLog((LOG_TRACE, 1, L"DTS-HD Media Type failed with %0#.8x, trying fallback to DTS core", hr));
      m_bForceDTSCore = TRUE;
      UpdateBitstreamContext();
      goto done;
    }
    DbgLog((LOG_TRACE, 1, L"Sending new Media Type (QueryAccept: %0#.8x)", hr));
    m_pOutput->SetMediaType(&mt);
    pOut->SetMediaType(&mt);
  }

  hr = m_pOutput->Deliver(pOut);
  if (FAILED(hr)) {
    DbgLog((LOG_ERROR, 10, L"::DeliverBitstream failed with code: %0#.8x", hr));
  }

done:
  SafeRelease(&pOut);
  return hr;
}
예제 #26
0
// Set up our output sample
HRESULT
CTransformFilter::InitializeOutputSample(IMediaSample *pSample, IMediaSample **ppOutSample) {
    IMediaSample *pOutSample;

    // default - times are the same

    AM_SAMPLE2_PROPERTIES * const pProps = m_pInput->SampleProps();
    DWORD dwFlags = m_bSampleSkipped ? AM_GBF_PREVFRAMESKIPPED : 0;

    // This will prevent the image renderer from switching us to DirectDraw
    // when we can't do it without skipping frames because we're not on a
    // keyframe.  If it really has to switch us, it still will, but then we
    // will have to wait for the next keyframe
    if(!(pProps->dwSampleFlags & AM_SAMPLE_SPLICEPOINT)) {
        dwFlags |= AM_GBF_NOTASYNCPOINT;
    }

    ASSERT(m_pOutput->m_pAllocator != NULL);
    HRESULT hr = m_pOutput->m_pAllocator->GetBuffer(&pOutSample
        , pProps->dwSampleFlags & AM_SAMPLE_TIMEVALID ?
        &pProps->tStart : NULL
        , pProps->dwSampleFlags & AM_SAMPLE_STOPVALID ?
        &pProps->tStop : NULL
        , dwFlags);
    *ppOutSample = pOutSample;
    if(FAILED(hr)) {
        return hr;
    }

    ASSERT(pOutSample);
    IMediaSample2 *pOutSample2;
    if(SUCCEEDED(pOutSample->QueryInterface(IID_IMediaSample2,
        (void **)&pOutSample2))) {
        /*  Modify it */
        AM_SAMPLE2_PROPERTIES OutProps;
        EXECUTE_ASSERT(SUCCEEDED(pOutSample2->GetProperties(FIELD_OFFSET(AM_SAMPLE2_PROPERTIES, tStart), (PBYTE)&OutProps)));
        OutProps.dwTypeSpecificFlags = pProps->dwTypeSpecificFlags;
        OutProps.dwSampleFlags =
            (OutProps.dwSampleFlags & AM_SAMPLE_TYPECHANGED) |
            (pProps->dwSampleFlags & ~AM_SAMPLE_TYPECHANGED);

        OutProps.tStart = pProps->tStart;
        OutProps.tStop  = pProps->tStop;
        OutProps.cbData = FIELD_OFFSET(AM_SAMPLE2_PROPERTIES, dwStreamId);

        hr = pOutSample2->SetProperties(FIELD_OFFSET(AM_SAMPLE2_PROPERTIES, dwStreamId),
            (PBYTE)&OutProps);
        if(pProps->dwSampleFlags & AM_SAMPLE_DATADISCONTINUITY) {
            m_bSampleSkipped = FALSE;
        }
        pOutSample2->Release();
    }
    else {
        if(pProps->dwSampleFlags & AM_SAMPLE_TIMEVALID) {
            pOutSample->SetTime(&pProps->tStart,
                &pProps->tStop);
        }
        if(pProps->dwSampleFlags & AM_SAMPLE_SPLICEPOINT) {
            pOutSample->SetSyncPoint(TRUE);
        }
        if(pProps->dwSampleFlags & AM_SAMPLE_DATADISCONTINUITY) {
            pOutSample->SetDiscontinuity(TRUE);
            m_bSampleSkipped = FALSE;
        }
        // Copy the media times

        LONGLONG MediaStart, MediaEnd;
        if(pSample->GetMediaTime(&MediaStart,&MediaEnd) == NOERROR) {
            pOutSample->SetMediaTime(&MediaStart,&MediaEnd);
        }
    }
    return S_OK;
}
HRESULT CMpeg2DecoderDXVA2::DecodeFrame(IMediaSample **ppSample)
{
	if (ppSample) {
		*ppSample = nullptr;
	}

	if (!m_pDec || !m_pVideoDecoder) {
		return E_UNEXPECTED;
	}

	if (m_pDec->picture->flags & PIC_FLAG_SKIP) {
		return GetDisplaySample(ppSample);
	}

	m_DecodeSampleIndex = GetFBufIndex(m_pDec->fbuf[0]);

	if (!m_SliceCount || m_DecodeSampleIndex < 0) {
		return S_FALSE;
	}

	if (m_fWaitForDecodeKeyFrame) {
		if ((m_pDec->picture->flags & PIC_MASK_CODING_TYPE) != PIC_FLAG_CODING_TYPE_I) {
			return S_FALSE;
		}
		m_fWaitForDecodeKeyFrame = false;
	}

	HRESULT hr;

	hr = m_pDeviceManager->TestDevice(m_pFilter->m_hDXVADevice);
	if (FAILED(hr)) {
		if (hr == DXVA2_E_NEW_VIDEO_DEVICE) {
			DBG_TRACE(TEXT("Device lost"));
			m_fDeviceLost = true;
		}
		return hr;
	}

	switch (m_pDec->picture->flags & PIC_MASK_CODING_TYPE) {
	case PIC_FLAG_CODING_TYPE_I:
		m_PrevRefSurfaceIndex = -1;
		m_ForwardRefSurfaceIndex = -1;
		//DBG_TRACE(TEXT("I [%d]"), m_CurSurfaceIndex);
		break;
	case PIC_FLAG_CODING_TYPE_P:
		m_PrevRefSurfaceIndex = GetFBufSampleID(m_pDec->fbuf[1]);
		m_ForwardRefSurfaceIndex = -1;
		//DBG_TRACE(TEXT("P [%d]->%d"), m_CurSurfaceIndex, m_PrevRefSurfaceIndex);
		break;
	case PIC_FLAG_CODING_TYPE_B:
		m_PrevRefSurfaceIndex = GetFBufSampleID(m_pDec->fbuf[1]);
		m_ForwardRefSurfaceIndex = GetFBufSampleID(m_pDec->fbuf[2]);
		//DBG_TRACE(TEXT("B %d->[%d]->%d"), m_PrevRefSurfaceIndex, m_CurSurfaceIndex, m_ForwardRefSurfaceIndex);
		if (m_ForwardRefSurfaceIndex < 0)
			return S_FALSE;
		break;
	}

	CDXVA2MediaSample *pSample = m_Samples[m_DecodeSampleIndex].pSample;

	if (!pSample) {
		IMediaSample *pMediaSample;
		IDXVA2MediaSample *pDXVA2Sample;

		for (;;) {
			hr = m_pFilter->GetDeliveryBuffer(&pMediaSample);
			if (FAILED(hr)) {
				return hr;
			}
			hr = pMediaSample->QueryInterface(IID_PPV_ARGS(&pDXVA2Sample));
			pMediaSample->Release();
			if (FAILED(hr)) {
				return hr;
			}
			pSample = static_cast<CDXVA2MediaSample*>(pDXVA2Sample);
			if (pSample->GetSurfaceID() == m_RefSamples[0].SurfaceID) {
				m_RefSamples[0].pSample = pSample;
			} else if (pSample->GetSurfaceID() == m_RefSamples[1].SurfaceID) {
				m_RefSamples[1].pSample = pSample;
			} else {
				break;
			}
		}
		m_Samples[m_DecodeSampleIndex].pSample = pSample;
		m_Samples[m_DecodeSampleIndex].SurfaceID = pSample->GetSurfaceID();
	}

	m_CurSurfaceIndex = pSample->GetSurfaceID();

#ifdef _DEBUG
	if ((m_pDec->picture->flags & PIC_MASK_CODING_TYPE) == PIC_FLAG_CODING_TYPE_P) {
		_ASSERT(m_PrevRefSurfaceIndex>=0 && m_CurSurfaceIndex != m_PrevRefSurfaceIndex);
	} else if ((m_pDec->picture->flags & PIC_MASK_CODING_TYPE) == PIC_FLAG_CODING_TYPE_B) {
		_ASSERT(m_PrevRefSurfaceIndex>=0
			&& m_CurSurfaceIndex != m_PrevRefSurfaceIndex
			&& m_ForwardRefSurfaceIndex>=0
			&& m_CurSurfaceIndex != m_ForwardRefSurfaceIndex);
	}
#endif

	IDirect3DSurface9 *pSurface;
	IMFGetService *pMFGetService;
	hr = pSample->QueryInterface(IID_PPV_ARGS(&pMFGetService));
	if (SUCCEEDED(hr)) {
		hr = pMFGetService->GetService(MR_BUFFER_SERVICE, IID_PPV_ARGS(&pSurface));
		pMFGetService->Release();
	}
	if (FAILED(hr)) {
		return hr;
	}

	int Retry = 0;
	for (;;) {
		hr = m_pVideoDecoder->BeginFrame(pSurface, nullptr);
		if (hr != E_PENDING || Retry >= 50)
			break;
		::Sleep(2);
		Retry++;
	}
	if (SUCCEEDED(hr)) {
		hr = CommitBuffers();
		if (SUCCEEDED(hr)) {
			DXVA2_DecodeExecuteParams ExecParams;
			DXVA2_DecodeBufferDesc BufferDesc[4];
			const UINT NumMBsInBuffer =
				(m_PictureParams.wPicWidthInMBminus1 + 1) * (m_PictureParams.wPicHeightInMBminus1 + 1);

			::ZeroMemory(BufferDesc, sizeof(BufferDesc));
			BufferDesc[0].CompressedBufferType = DXVA2_PictureParametersBufferType;
			BufferDesc[0].DataSize = sizeof(DXVA_PictureParameters);
			BufferDesc[1].CompressedBufferType = DXVA2_InverseQuantizationMatrixBufferType;
			BufferDesc[1].DataSize = sizeof(DXVA_QmatrixData);
			BufferDesc[2].CompressedBufferType = DXVA2_BitStreamDateBufferType;
			BufferDesc[2].DataSize = (UINT)m_SliceDataSize;
			BufferDesc[2].NumMBsInBuffer = NumMBsInBuffer;
			BufferDesc[3].CompressedBufferType = DXVA2_SliceControlBufferType;
			BufferDesc[3].DataSize = m_SliceCount * sizeof(DXVA_SliceInfo);
			BufferDesc[3].NumMBsInBuffer = NumMBsInBuffer;

			ExecParams.NumCompBuffers = 4;
			ExecParams.pCompressedBuffers = BufferDesc;
			ExecParams.pExtensionData = nullptr;

			hr = m_pVideoDecoder->Execute(&ExecParams);
			if (SUCCEEDED(hr)) {
				hr = GetDisplaySample(ppSample);
			}
		}

		m_pVideoDecoder->EndFrame(nullptr);
	}

	if ((m_pDec->picture->flags & PIC_MASK_CODING_TYPE) != PIC_FLAG_CODING_TYPE_B
			&& ppSample) {
		SafeRelease(m_RefSamples[1].pSample);
		m_RefSamples[1] = m_RefSamples[0];
		m_RefSamples[0].pSample = nullptr;
		m_RefSamples[0].SurfaceID = m_CurSurfaceIndex;
	}

	pSurface->Release();

	return hr;
}
예제 #28
0
DWORD WINAPI PushDataThread(PVOID param)
{
	PushParam* pushParam = (PushParam*)param;
	HANDLE PushSemaphore = pushParam->PushSemaphore;
	HANDLE PushDataMutex = pushParam->PushDataMutex;
	NetReceiveFilter* filter = pushParam->filter;
	std::map<REFERENCE_TIME, IMediaSample*>& SampleList = *pushParam->SampleList;

	delete pushParam;

	REFERENCE_TIME startTime,endTime;
	CRefTime streamTime(LONG(0)),lastStreamTime(LONG(0));

	bool first = true;
	AM_MEDIA_TYPE mediaType;
	IMediaSample* sample ;

	while (SampleList.size() == 0) //等待足够多的数据
	{
		WaitForSingleObject(PushSemaphore,INFINITE);
	}

	CBasePin* pin = filter->GetPin(0);
	pin->ConnectionMediaType(&mediaType);
	IFilterGraph* filterGraph = filter->GetFilterGraph();
	ComReleaser filterGraphReleaser(filterGraph);

	HRESULT hr;
	IMediaControl* mediaControl;
	hr = filterGraph->QueryInterface(IID_IMediaControl, (void**)&mediaControl);
	if(FAILED(hr))
	{
		ErrorPrint("Get media control error", hr);
		return false;
	}
	ComReleaser mediaControlReleaser(mediaControl);

	while (true)
	{
		WaitForSingleObject(PushDataMutex, INFINITE);
		if (filter->getPlayMode() == 0) // 如果只是尽快播放,则不考虑时间戳,而且一次一sample的往下传
		{
			if (SampleList.size() == 0)
			{
				ReleaseMutex(PushDataMutex);
				while (SampleList.size() == 0)
				{
					WaitForSingleObject(PushSemaphore,INFINITE);
				}
				WaitForSingleObject(PushDataMutex, INFINITE);
			}
			sample = SampleList.begin()->second;
		}
		else if (filter->getPlayMode() == 1) //需要考虑时间戳
		{
			NetReceiveFilter::State state = filter->getState();
			if (SampleList.size() == 0)
			{
				g_ReferenceTimeFilter->pauseTime(); //暂停时钟
				ReleaseMutex(PushDataMutex);
				while (SampleList.size() == 0) //等待足够多的数据
				{
					WaitForSingleObject(PushSemaphore,INFINITE);
				}
				WaitForSingleObject(PushDataMutex, INFINITE);
				g_ReferenceTimeFilter->startTime(); //启动时钟

			}

			if (state == NetReceiveFilter::Stopped)
			{
				ReleaseMutex(PushDataMutex);
				Sleep(50);
				continue;
			}

			if(g_ReferenceTimeFilter->isStop())
			{
				ReleaseMutex(PushDataMutex);
				Sleep(50);
				continue;
			}

			sample = SampleList.begin()->second;
			sample->GetTime(&startTime,&endTime);
			filter->StreamTime(streamTime); //得到当前的流时间
			g_ReferenceTimeFilter->GetTime(&startTime);
			g_ReferenceTimeFilter->GetTime(&endTime);

			if (mediaType.majortype == MEDIATYPE_Video)
			{
				int a = 0;
			}
			else
			{
				int b = 0;
			}

			if(state != NetReceiveFilter::Paused) //pause时不修正
			{
				if(startTime - 10000000 > streamTime )
				{
					ReleaseMutex(PushDataMutex);
					Sleep(50);
					continue;
				}
				sample->SetTime(&startTime, &endTime);
			}
			

			if (mediaType.majortype == MEDIATYPE_Video)
			{
				int a = 0;
			}
			else
			{
				int b = 0;
			}

		}
// 		if (mediaType.majortype == MEDIATYPE_Video)
// 		{
// 			std::cout<<"Push video data."<<std::endl;
// 		}
		SampleList.erase(SampleList.begin());
		ReleaseMutex(PushDataMutex);
		if(!filter->PushData(sample))
		{
			ErrorPrint("Push data error");
			sample->Release();
			continue;
		}
		sample->Release();
	}
	return 0;
}
예제 #29
0
int CDecDXVA2::get_dxva2_buffer(struct AVCodecContext *c, AVFrame *pic, int flags)
{
  CDecDXVA2 *pDec = (CDecDXVA2 *)c->opaque;
  IMediaSample *pSample = NULL;

  HRESULT hr = S_OK;

  if (pic->format != AV_PIX_FMT_DXVA2_VLD || (c->codec_id == AV_CODEC_ID_H264 && !H264_CHECK_PROFILE(c->profile))) {
    DbgLog((LOG_ERROR, 10, L"DXVA2 buffer request, but not dxva2 pixfmt or unsupported profile"));
    pDec->m_bFailHWDecode = TRUE;
    return -1;
  }

  if (!pDec->m_pDecoder || FFALIGN(c->coded_width, 16) != pDec->m_dwSurfaceWidth || FFALIGN(c->coded_height, 16) != pDec->m_dwSurfaceHeight) {
    DbgLog((LOG_TRACE, 10, L"No DXVA2 Decoder or image dimensions changed -> Re-Allocating resources"));
    if (!pDec->m_pDecoder && pDec->m_bNative && !pDec->m_pDXVA2Allocator) {
      ASSERT(0);
      hr = E_FAIL;
    } else if (pDec->m_bNative) {
      avcodec_flush_buffers(c);

      pDec->m_dwSurfaceWidth = FFALIGN(c->coded_width, 16);
      pDec->m_dwSurfaceHeight = FFALIGN(c->coded_height, 16);

      // Re-Commit the allocator (creates surfaces and new decoder)
      hr = pDec->m_pDXVA2Allocator->Decommit();
      if (pDec->m_pDXVA2Allocator->DecommitInProgress()) {
        DbgLog((LOG_TRACE, 10, L"WARNING! DXVA2 Allocator is still busy, trying to flush downstream"));
        pDec->m_pCallback->ReleaseAllDXVAResources();
        pDec->m_pCallback->GetOutputPin()->GetConnected()->BeginFlush();
        pDec->m_pCallback->GetOutputPin()->GetConnected()->EndFlush();
        if (pDec->m_pDXVA2Allocator->DecommitInProgress()) {
          DbgLog((LOG_TRACE, 10, L"WARNING! Flush had no effect, decommit of the allocator still not complete"));
        } else {
          DbgLog((LOG_TRACE, 10, L"Flush was successfull, decommit completed!"));
        }
      }
      hr = pDec->m_pDXVA2Allocator->Commit();
    } else if (!pDec->m_bNative) {
      hr = pDec->CreateDXVA2Decoder();
    }
    if (FAILED(hr)) {
      pDec->m_bFailHWDecode = TRUE;
      return -1;
    }
  }

  if (FAILED(pDec->m_pD3DDevMngr->TestDevice(pDec->m_hDevice))) {
    DbgLog((LOG_ERROR, 10, L"Device Lost"));
  }

  int i;
  if (pDec->m_bNative) {
    if (!pDec->m_pDXVA2Allocator)
      return -1;

    hr = pDec->m_pDXVA2Allocator->GetBuffer(&pSample, NULL, NULL, 0);
    if (FAILED(hr)) {
      DbgLog((LOG_ERROR, 10, L"DXVA2Allocator returned error, hr: 0x%x", hr));
      return -1;
    }

    ILAVDXVA2Sample *pLavDXVA2 = NULL;
    hr = pSample->QueryInterface(&pLavDXVA2);
    if (FAILED(hr)) {
      DbgLog((LOG_ERROR, 10, L"Sample is no LAV DXVA2 sample?????"));
      SafeRelease(&pSample);
      return -1;
    }
    i = pLavDXVA2->GetDXSurfaceId();
    SafeRelease(&pLavDXVA2);
  } else {
    int old, old_unused;
    for (i = 0, old = 0, old_unused = -1; i < pDec->m_NumSurfaces; i++) {
      d3d_surface_t *surface = &pDec->m_pSurfaces[i];
      if (!surface->used && (old_unused == -1 || surface->age < pDec->m_pSurfaces[old_unused].age))
        old_unused = i;
      if (surface->age < pDec->m_pSurfaces[old].age)
        old = i;
    }
    if (old_unused == -1) {
      DbgLog((LOG_TRACE, 10, L"No free surface, using oldest"));
      i = old;
    } else {
      i = old_unused;
    }
  }

  LPDIRECT3DSURFACE9 pSurface = pDec->m_pSurfaces[i].d3d;
  if (!pSurface) {
    DbgLog((LOG_ERROR, 10, L"There is a sample, but no D3D Surace? WTF?"));
    SafeRelease(&pSample);
    return -1;
  }

  pDec->m_pSurfaces[i].age  = pDec->m_CurrentSurfaceAge++;
  pDec->m_pSurfaces[i].used = true;

  memset(pic->data, 0, sizeof(pic->data));
  memset(pic->linesize, 0, sizeof(pic->linesize));
  memset(pic->buf, 0, sizeof(pic->buf));

  pic->data[0] = pic->data[3] = (uint8_t *)pSurface;
  pic->data[4] = (uint8_t *)pSample;

  SurfaceWrapper *surfaceWrapper = new SurfaceWrapper();
  surfaceWrapper->pDec = pDec;
  surfaceWrapper->surface = pSurface;
  surfaceWrapper->sample = pSample;
  pic->buf[0] = av_buffer_create(NULL, 0, free_dxva2_buffer, surfaceWrapper, 0);

  return 0;
}
예제 #30
0
//
// Copy
//
// return a pointer to an identical copy of pSample
IMediaSample * CTransInPlaceFilter::Copy(IMediaSample *pSource)
{
    IMediaSample * pDest;

    HRESULT hr;
    REFERENCE_TIME tStart, tStop;
    const BOOL bTime = S_OK == pSource->GetTime( &tStart, &tStop);

    // this may block for an indeterminate amount of time
    hr = OutputPin()->PeekAllocator()->GetBuffer(
              &pDest
              , bTime ? &tStart : NULL
              , bTime ? &tStop : NULL
              , m_bSampleSkipped ? AM_GBF_PREVFRAMESKIPPED : 0
              );

    if (FAILED(hr)) {
        return NULL;
    }

    ASSERT(pDest);
    IMediaSample2 *pSample2;
    if (SUCCEEDED(pDest->QueryInterface(IID_IMediaSample2, (void **)&pSample2))) {
        HRESULT hr = pSample2->SetProperties(
            FIELD_OFFSET(AM_SAMPLE2_PROPERTIES, pbBuffer),
            (PBYTE)m_pInput->SampleProps());
        pSample2->Release();
        if (FAILED(hr)) {
            pDest->Release();
            return NULL;
        }
    } else {
        if (bTime) {
            pDest->SetTime(&tStart, &tStop);
        }

        if (S_OK == pSource->IsSyncPoint()) {
            pDest->SetSyncPoint(TRUE);
        }
        if (S_OK == pSource->IsDiscontinuity() || m_bSampleSkipped) {
            pDest->SetDiscontinuity(TRUE);
        }
        if (S_OK == pSource->IsPreroll()) {
            pDest->SetPreroll(TRUE);
        }

        // Copy the media type
        AM_MEDIA_TYPE *pMediaType;
        if (S_OK == pSource->GetMediaType(&pMediaType)) {
            pDest->SetMediaType(pMediaType);
            DeleteMediaType( pMediaType );
        }

    }

    m_bSampleSkipped = FALSE;

    // Copy the sample media times
    REFERENCE_TIME TimeStart, TimeEnd;
    if (pSource->GetMediaTime(&TimeStart,&TimeEnd) == NOERROR) {
        pDest->SetMediaTime(&TimeStart,&TimeEnd);
    }

    // Copy the actual data length and the actual data.
    {
        const long lDataLength = pSource->GetActualDataLength();
        pDest->SetActualDataLength(lDataLength);

        // Copy the sample data
        {
            BYTE *pSourceBuffer, *pDestBuffer;
            long lSourceSize  = pSource->GetSize();
            long lDestSize = pDest->GetSize();

            ASSERT(lDestSize >= lSourceSize && lDestSize >= lDataLength);

            pSource->GetPointer(&pSourceBuffer);
            pDest->GetPointer(&pDestBuffer);
            ASSERT(lDestSize == 0 || pSourceBuffer != NULL && pDestBuffer != NULL);

            CopyMemory( (PVOID) pDestBuffer, (PVOID) pSourceBuffer, lDataLength );
        }
    }

    return pDest;

} // Copy