Exemple #1
0
LRESULT Tacm::streamOpen(LPACMDRVSTREAMINSTANCE padsi)
{
    LPWAVEFORMATEX pwfxSrc = padsi->pwfxSrc;
    if (pwfxSrc->wFormatTag != WAVE_FORMAT_AVIS) {
        return ACMERR_NOTPOSSIBLE;
    }
    LPWAVEFORMATEX pwfxDst = padsi->pwfxDst;
    if (!avisIsValidFormat(pwfxSrc) || !pcmIsValidFormat(pwfxDst)) {
        return ACMERR_NOTPOSSIBLE;
    }
    padsi->fdwDriver = 0L;
    padsi->dwDriver = (DWORD_PTR)this;
    if (ACM_STREAMOPENF_QUERY & padsi->fdwOpen) {
        return MMSYSERR_NOERROR;    //only querying
    }
    if (!avisynth) {
        CMediaType mt;
        mt.formattype = FORMAT_WaveFormatEx;
        mt.SetFormat((BYTE*)pwfxSrc, sizeof(*pwfxSrc) + pwfxSrc->cbSize);
        TsampleFormat fmt;
        avisynth = new TavisynthAudio(mt, fmt, NULL, "ffdshow_acm_avisynth_script");
    }
    if (avisynth->ok) {
        bytesPerSample = avisynth->vi->BytesPerAudioSample();
        fps_denominator = avisynth->vi->fps_denominator;
        fps_numerator = avisynth->vi->fps_numerator;
        return MMSYSERR_NOERROR;
    } else {
        OutputDebugString(_l("ffacm error"));//ffvfw->dbgError(err.msg);
        return ACMERR_NOTPOSSIBLE;
    }
}
Exemple #2
0
HRESULT CCaptureAudio::CreateCaptureSampleGrabber()
{
	HRESULT hr=NOERROR;
	if(m_pSampleGrabberFilter==NULL)
	{
		hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, 
			(LPVOID *)&m_pSampleGrabberFilter);
		if(FAILED(hr))
		{
			SAFE_RELEASE(m_pSampleGrabberFilter);
			//ERR_DEBUG("CreateCaptureGraphBuilder  QueryInterface m_pSampleGrabberFilter Failed");
			return hr;
		}
	}
	if(m_pSampleGrabber==NULL)
	{
		hr = m_pSampleGrabberFilter->QueryInterface(IID_ISampleGrabber, (void**)&m_pSampleGrabber);
		if(FAILED(hr))
		{
			SAFE_RELEASE(m_pSampleGrabberFilter);
			SAFE_RELEASE(m_pSampleGrabber);
			//ERR_DEBUG("CreateCaptureGraphBuilder  QueryInterface m_pSampleGrabber Failed");
			return hr;
		}

		CMediaType audioType;
		audioType.SetType(&MEDIATYPE_Audio);
		hr = m_pSampleGrabber->SetMediaType( &audioType );
		hr = m_pGraphBuilder->AddFilter(m_pSampleGrabberFilter, L"Grabber");
	}
	return hr;
}
CMediaType CLAVFAudioHelper::initAudioType(AVCodecID codecId, unsigned int &codecTag, std::string container)
{
    CMediaType mediaType;
    mediaType.InitMediaType();
    mediaType.majortype = MEDIATYPE_Audio;
    mediaType.subtype = FOURCCMap(codecTag);
    mediaType.formattype = FORMAT_WaveFormatEx; //default value
    mediaType.SetSampleSize(256000);

    // Check against values from the map above
    for(unsigned i = 0; i < countof(audio_map); ++i) {
        if (audio_map[i].codec == codecId) {
            if (audio_map[i].subtype)
                mediaType.subtype = *audio_map[i].subtype;
            if (audio_map[i].codecTag)
                codecTag = audio_map[i].codecTag;
            if (audio_map[i].format)
                mediaType.formattype = *audio_map[i].format;
            break;
        }
    }

    // special cases
    switch(codecId)
    {
    case AV_CODEC_ID_PCM_F64LE:
        // Qt PCM
        if (codecTag == MKTAG('f', 'l', '6', '4')) mediaType.subtype = MEDIASUBTYPE_PCM_FL64_le;
        break;
    }
    return mediaType;
}
Exemple #4
0
HRESULT CConvert::AddAudioGroup()
{
	HRESULT hr;
	hr = m_pTimeline->CreateEmptyNode(&m_pAudioGroupObj, TIMELINE_MAJOR_TYPE_GROUP );
    if(FAILED( hr )) 
    {
        return hr;
    }

	CComQIPtr<IAMTimelineGroup, &IID_IAMTimelineGroup> pAudioGroup(m_pAudioGroupObj);
    CMediaType AudioGroupType;

    // all we set is the major type. The group will automatically use other defaults
    AudioGroupType.SetType( &MEDIATYPE_Audio);
    hr = pAudioGroup->SetMediaType( &AudioGroupType );
    if(FAILED( hr )) 
    {
        return hr;
    }
	
	hr = m_pTimeline->AddGroup(m_pAudioGroupObj);
    if(FAILED( hr )) 
    {
        return hr;
    }
	
	return hr;
}
Exemple #5
0
HRESULT CStreamParser::ParsePlanarPCM(Packet *pPacket)
{
  CMediaType mt = m_pPin->GetActiveMediaType();

  WORD nChannels = 0, nBPS = 0, nBlockAlign = 0;
  audioFormatTypeHandler(mt.Format(), mt.FormatType(), nullptr, &nChannels, &nBPS, &nBlockAlign, nullptr);

  // Mono needs no special handling
  if (nChannels == 1)
    return Queue(pPacket);

  Packet *out = new Packet();
  out->CopyProperties(pPacket);
  out->SetDataSize(pPacket->GetDataSize());

  int nBytesPerChannel = nBPS / 8;
  int nAudioBlocks = pPacket->GetDataSize() / nChannels;
  BYTE *out_data = out->GetData();
  const BYTE *in_data = pPacket->GetData();

  for (int i = 0; i < nAudioBlocks; i += nBytesPerChannel) {
    // interleave the channels into audio blocks
    for (int c = 0; c < nChannels; c++) {
      memcpy(out_data + (c * nBytesPerChannel), in_data + (nAudioBlocks * c), nBytesPerChannel);
    }
    // Skip to the next output block
    out_data += nChannels * nBytesPerChannel;

    // skip to the next input sample
    in_data += nBytesPerChannel;
  }

  return Queue(out);
}
CMediaType CLAVFVideoHelper::initVideoType(AVCodecID codecId, unsigned int &codecTag, std::string container)
{
  CMediaType mediaType;
  mediaType.InitMediaType();
  mediaType.majortype = MEDIATYPE_Video;
  mediaType.subtype = FOURCCMap(codecTag);
  mediaType.formattype = FORMAT_VideoInfo; //default value

    // Check against values from the map above
  for(unsigned i = 0; i < countof(video_map); ++i) {
    if (video_map[i].codec == codecId) {
      if (video_map[i].subtype)
        mediaType.subtype = *video_map[i].subtype;
      if (video_map[i].codecTag)
        codecTag = video_map[i].codecTag;
      if (video_map[i].format)
         mediaType.formattype = *video_map[i].format;
      break;
    }
  }

  switch(codecId)
  {
  // All these codecs should use VideoInfo2
  case AV_CODEC_ID_ASV1:
  case AV_CODEC_ID_ASV2:
  case AV_CODEC_ID_FLV1:
  case AV_CODEC_ID_HUFFYUV:
  case AV_CODEC_ID_WMV3:
    mediaType.formattype = FORMAT_VideoInfo2;
    break;
  case AV_CODEC_ID_MPEG4:
    if (container == "mp4") {
      mediaType.formattype = FORMAT_MPEG2Video;
    } else if (container == "mpegts") {
      mediaType.formattype = FORMAT_VideoInfo2;
      mediaType.subtype = MEDIASUBTYPE_MP4V;
    } else {
      mediaType.formattype = FORMAT_VideoInfo2;
    }
    break;
  case AV_CODEC_ID_VC1:
    if (codecTag != MKTAG('W','M','V','A'))
      codecTag = MKTAG('W','V','C','1');
    mediaType.formattype = FORMAT_VideoInfo2;
    mediaType.subtype = FOURCCMap(codecTag);
    break;
  case AV_CODEC_ID_DVVIDEO:
    if (codecTag == 0)
      mediaType.subtype = MEDIASUBTYPE_DVCP;
    break;
  }

  return mediaType;
}
Exemple #7
0
HRESULT StaticSourceVideoPin::ReconnectWithChangesSync(void)
{
	HRESULT hr = S_OK;
	CMediaType mediaType;

	CHECK_HR(hr = mediaType.Set(this->m_mt));
	CHECK_HR(hr = ApplyParametersToMT(&mediaType));

	CHECK_HR(hr = this->m_pFilter->ReconnectPinSync(this, &mediaType));

done:
	return hr;
}
Exemple #8
0
bool spk2mt(Speakers spk, CMediaType &mt, int i)
{
  if (spk.format == FORMAT_SPDIF)
  {
    // SPDIF media types
    if (i < 0 || i >= 2)
      return false;

    std::auto_ptr<WAVEFORMATEX> wfe(spk2wfe(spk, 0));
    if (!wfe.get())
      return false;

    mt.SetType(&MEDIATYPE_Audio);
    mt.SetSubtype(i == 0? &MEDIASUBTYPE_DOLBY_AC3_SPDIF: &MEDIASUBTYPE_PCM);
    mt.SetFormatType(&FORMAT_WaveFormatEx);
    mt.SetFormat((BYTE*)wfe.get(), sizeof(WAVEFORMATEX) + wfe->cbSize);
    return true;
  }
  else if (FORMAT_MASK(spk.format) & FORMAT_CLASS_PCM)
  {
    // PCM media types
    std::auto_ptr<WAVEFORMATEX> wfe(spk2wfe(spk, i));
    if (!wfe.get())
      return false;

    mt.SetType(&MEDIATYPE_Audio);
    mt.SetSubtype(&MEDIASUBTYPE_PCM);
    mt.SetFormatType(&FORMAT_WaveFormatEx);
    mt.SetFormat((BYTE*)wfe.get(), sizeof(WAVEFORMATEX) + wfe->cbSize);
    return true;
  }
  else
    return false;
}
Exemple #9
0
HRESULT COggSplitter::CreateOutputPins()
{
    CAutoLock	lock(&m_csFilter);
	int			i;
	COggSplitOutputPin	*pPin;

	// Create a pin for the active streams ...
	for (i=0; i<m_iStreams; i++)
		if (m_paStream[i]->m_bEnabled)
		{
			CMediaType	*pmt = &(m_paStream[i]->m_mt);

			if (*(pmt->Type()) != MEDIATYPE_NULL)
			{
				
				HRESULT				hr = NOERROR;
				wchar_t				PinNameL[64];
				char				PinName[64];
				
				if (*(pmt->Type()) == MEDIATYPE_Video)
					wsprintf(PinName, "Video %d", m_paStream[i]->m_iStreamID);
				else if (*(pmt->Type()) == MEDIATYPE_Audio)
					wsprintf(PinName, "Audio %d", m_paStream[i]->m_iStreamID);
				else if (*(pmt->Type()) == MEDIATYPE_Text)
					wsprintf(PinName, "Subtitle %d", m_paStream[i]->m_iStreamID);
				else
					wsprintf(PinName, "Stream %d", m_paStream[i]->m_iStreamID);

				wsprintfW(PinNameL, L"%s", PinName);
				pPin = new COggSplitOutputPin(PinName, this, &m_csFilter, &hr, PinNameL);
				if FAILED(hr) return E_OUTOFMEMORY;
				{
					// Add the pin the array ...
					COggSplitOutputPin **paOutput = new COggSplitOutputPin *[m_iOutputs+1];
					if (!paOutput) return E_OUTOFMEMORY;
					// Copy the array if there was one before
					if (m_paOutput)
					{
						CopyMemory((void*)paOutput, (void*)m_paOutput, m_iOutputs * sizeof(m_paOutput[0]));
						delete [] m_paOutput;
					}
					m_paOutput = paOutput;
					m_paOutput[m_iOutputs] = pPin;
					m_iOutputs++;

					m_paStream[i]->m_pPin = pPin;
					pPin->m_pStream = m_paStream[i];
				}
			}
Exemple #10
0
bool CDSMSplitterFile::Read(__int64 len, BYTE& id, CMediaType& mt)
{
	id = (BYTE)BitRead(8);
	ByteRead((BYTE*)&mt.majortype, sizeof(mt.majortype));
	ByteRead((BYTE*)&mt.subtype, sizeof(mt.subtype));
	mt.bFixedSizeSamples = (BOOL)BitRead(1);
	mt.bTemporalCompression = (BOOL)BitRead(1);
	mt.lSampleSize = (ULONG)BitRead(30);
	ByteRead((BYTE*)&mt.formattype, sizeof(mt.formattype));
	len -= 5 + sizeof(GUID)*3;
	ASSERT(len >= 0);
	if(len > 0) {mt.AllocFormatBuffer((LONG)len); ByteRead(mt.Format(), mt.FormatLength());}
	else mt.ResetFormatBuffer();	
	return true;
}
Exemple #11
0
bool CWAVFile::SetMediaType(CMediaType& mt)
{
	if (!m_fmtdata || !m_fmtsize) {
		return false;
	}

	mt.majortype	= MEDIATYPE_Audio;
	mt.formattype	= FORMAT_WaveFormatEx;
	mt.subtype		= m_subtype;
	mt.SetSampleSize(m_blocksize);

	memcpy(mt.AllocFormatBuffer(m_fmtsize), m_fmtdata, m_fmtsize);

	return true;
}
Exemple #12
0
//
// Classify the streams and set the groupID
//
void COggSplitter::SetGroupID()
{
	int i = 0;

	while (i<m_iStreams)
	{
		CMediaType	*pmt = &(m_paStream[i]->m_mt);
		int			iMaxGroupID = -1;
		m_paStream[i]->m_iGroupID = -1;

		for (int j=0; j<i; j++)
		{
			CMediaType	*pmt_cmp = &(m_paStream[j]->m_mt);

			if (m_paStream[j]->m_iGroupID > iMaxGroupID)
				iMaxGroupID = m_paStream[j]->m_iGroupID;

			if (*(pmt->Type())    == *(pmt_cmp->Type()) &&
				*(pmt->Subtype()) == *(pmt_cmp->Subtype()))
			{
				m_paStream[i]->m_iGroupID = m_paStream[j]->m_iGroupID;
				m_paStream[i]->Enable(m_bEnableAll);
			}
		}
		
		if (m_paStream[i]->m_iGroupID == -1)
		{ // This is the first stream of this type
			if (*(m_paStream[i]->m_mt.Type()) == MEDIATYPE_Text && !m_bEnableAll)
			{
				// We must create dummy stream ...
				COggStream* pStream;
				InsertStream(i,&pStream, 65535, true);
				m_paStream[i]->m_iGroupID = iMaxGroupID + 1;
				m_paStream[i]->Enable(true);
				i++;
				m_paStream[i]->m_iGroupID = iMaxGroupID + 1;
				m_paStream[i]->Enable(false);
			}
			else
			{
				m_paStream[i]->m_iGroupID = iMaxGroupID + 1;
				m_paStream[i]->Enable(TRUE);
			}
		}

		i++;
	}
}
Exemple #13
0
STDMETHODIMP GetFormat(CPushPin *pPin, int iIndex, AM_MEDIA_TYPE **ppmt)
{
	CheckPointer(ppmt, E_POINTER);

	HRESULT hr;
	CMediaType mt;

	hr = pPin->GetMediaType(iIndex, &mt);
	if(FAILED(hr)) return hr;

	*ppmt = static_cast<AM_MEDIA_TYPE*>(CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE)));
	**ppmt = mt;
	(*ppmt)->pbFormat = static_cast<BYTE*>(CoTaskMemAlloc((*ppmt)->cbFormat));
	memcpy((*ppmt)->pbFormat, mt.Format(), (*ppmt)->cbFormat);

	return S_OK;
}
HRESULT CWaveOutRenderer::CompleteConnect(IPin* pin)
{
    if (!pin)
        return E_POINTER;

    HRESULT r = CBaseRenderer::CompleteConnect(pin);
    if (FAILED(r))
        return r;

    if (!m_outPut)
        m_outPut.reset(new CWaveOutput(0));

    CMediaType mt;
    pin->ConnectionMediaType(&mt);
    WAVEFORMATEX* format = reinterpret_cast<WAVEFORMATEX*>(mt.Format());

    return m_outPut->Init(format) ? S_OK : E_FAIL;
}
Exemple #15
0
void CBaseDemuxer::CreatePGSForcedSubtitleStream()
{
  stream s;
  s.pid = FORCED_SUBTITLE_PID;
  s.streamInfo = new CStreamInfo();
  s.language = "und";
  // Create the media type
  CMediaType mtype;
  mtype.majortype = MEDIATYPE_Subtitle;
  mtype.subtype = MEDIASUBTYPE_HDMVSUB;
  mtype.formattype = FORMAT_SubtitleInfo;
  SUBTITLEINFO *subInfo = (SUBTITLEINFO *)mtype.AllocFormatBuffer(sizeof(SUBTITLEINFO));
  memset(subInfo, 0, mtype.FormatLength());
  wcscpy_s(subInfo->TrackName, FORCED_SUB_STRING);
  subInfo->dwOffset = sizeof(SUBTITLEINFO);
  s.streamInfo->mtypes.push_back(mtype);
  // Append it to the list
  m_streams[subpic].push_back(s);
}
Exemple #16
0
XnVideoStream::Mode XnVideoStream::MediaTypeToMode(const CMediaType& mediaType)
{
	Mode result = {0};

	if (*mediaType.Type() != MEDIATYPE_Video)   // we only output video
	{                                                  
		xnLogError(XN_MASK_FILTER, "bad type");
		return result;
	}

	// Check for the subtypes we support
	const GUID *SubType = mediaType.Subtype();

	if (SubType && *SubType != GUID_NULL) 
	{
		if (*SubType == MEDIASUBTYPE_RGB24)
		{
			result.Format = XN_PIXEL_FORMAT_RGB24;
		}
		else if (*SubType == MEDIASUBTYPE_MJPG)
		{
			result.Format = XN_PIXEL_FORMAT_MJPEG;
		}
		else
		{
			xnLogVerbose(XN_MASK_FILTER, "bad subtype");
			return result;
		}
	}

	// Get the format area of the media type
	VIDEOINFO *pvi = (VIDEOINFO*)mediaType.Format();
	if (pvi == NULL)
	{
		return result;
	}

	result.OutputMode.nFPS = (XnUInt32)(10000000ULL / pvi->AvgTimePerFrame);
	result.OutputMode.nXRes = pvi->bmiHeader.biWidth;
	result.OutputMode.nYRes = pvi->bmiHeader.biHeight;
	return result;
}
Exemple #17
0
/// method which implements IAMStreamSelect.Info
/// returns an array of all audio streams available
STDMETHODIMP CBDReaderFilter::Info(long lIndex, AM_MEDIA_TYPE**ppmt, DWORD* pdwFlags, LCID* plcid, DWORD* pdwGroup, WCHAR** ppszName, IUnknown** ppObject, IUnknown** ppUnk)
{
  if (pdwFlags)
  {
    int audioIndex = 0;
    m_demultiplexer.GetAudioStream(audioIndex);

    //if (m_demultiplexer.GetAudioStream()==(int)lIndex)
    if (audioIndex == (int)lIndex)
      *pdwFlags = AMSTREAMSELECTINFO_EXCLUSIVE;
    else
      *pdwFlags = 0;
  }
  if (plcid) *plcid = 0;
  if (pdwGroup) *pdwGroup = m_demultiplexer.GetAudioStreamType((int)lIndex); //*pdwGroup = 1;
  if (ppObject) *ppObject = NULL;
  if (ppUnk) *ppUnk = NULL;
  if (ppszName)
  {
    char szName[40];
    m_demultiplexer.GetAudioStreamInfo((int)lIndex, szName);
    *ppszName = (WCHAR *)CoTaskMemAlloc(20);
    MultiByteToWideChar(CP_ACP, 0, szName, -1, *ppszName, 20);
  }
  if (ppmt)
  {
    CMediaType mediaType;
    m_demultiplexer.GetAudioStreamPMT(mediaType);
    AM_MEDIA_TYPE* mType = (AM_MEDIA_TYPE*)(&mediaType);
    *ppmt = (AM_MEDIA_TYPE*)CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE));
    if (*ppmt)
    {
      memcpy(*ppmt, mType, sizeof(AM_MEDIA_TYPE));
      (*ppmt)->pbFormat = (BYTE*)CoTaskMemAlloc(mediaType.FormatLength());
      memcpy((*ppmt)->pbFormat, mType->pbFormat, mediaType.FormatLength());
    }
    else
      return S_FALSE;
  }
  return S_OK;
}
Exemple #18
0
HRESULT OutputPin::Push(void *buf, long size)
{
    HRESULT hr;
    IMediaSample *pSample;
    VIDEOINFOHEADER *vi;
    AM_MEDIA_TYPE *pmt;
    BYTE *dst_buf;

    /**
     * Hold the critical section here as the pin might get disconnected
     * during the Deliver() method call.
     */
    m_pLock->Lock();

    hr = GetDeliveryBuffer(&pSample, NULL, NULL, 0);
    if (FAILED(hr))
        goto on_error;

    pSample->GetMediaType(&pmt);
    if (pmt) {
        mediaType.Set(*pmt);
        bufSize = pmt->lSampleSize;
    }

    pSample->GetPointer(&dst_buf);
    vi = (VIDEOINFOHEADER *)mediaType.pbFormat;
    if (vi->rcSource.right == vi->bmiHeader.biWidth) {
        assert(pSample->GetSize() >= size);
        memcpy(dst_buf, buf, size);
    } else {
        unsigned i, bpp;
        unsigned dststride, srcstride;
        BYTE *src_buf = (BYTE *)buf;

        bpp = size / abs(vi->bmiHeader.biHeight) / vi->rcSource.right;
        dststride = vi->bmiHeader.biWidth * bpp;
        srcstride = vi->rcSource.right * bpp;
        for (i = abs(vi->bmiHeader.biHeight); i > 0; i--) {
            memcpy(dst_buf, src_buf, srcstride);
            dst_buf += dststride;
            src_buf += srcstride;
        }
    }
    pSample->SetActualDataLength(size);

    hr = Deliver(pSample);

    pSample->Release();

on_error:
    m_pLock->Unlock();
    return hr;
}
Exemple #19
0
HRESULT CAudioDecFilter::CheckTransform(const CMediaType* mtIn, const CMediaType* mtOut)
{
	CheckPointer(mtIn, E_POINTER);
	CheckPointer(mtOut, E_POINTER);

	if (*mtOut->Type() == MEDIATYPE_Audio) {
		if (*mtOut->Subtype() == MEDIASUBTYPE_PCM) {

			// GUID_NULLではデバッグアサートが発生するのでダミーを設定して回避
			CMediaType MediaType;
			MediaType.InitMediaType();
			MediaType.SetType(&MEDIATYPE_Stream);
			MediaType.SetSubtype(&MEDIASUBTYPE_None);

			m_pInput->SetMediaType(&MediaType);

			return S_OK;
		}
	}

	return VFW_E_TYPE_NOT_ACCEPTED;
}
Exemple #20
0
Packet* CClip::GenerateFakeAudio(REFERENCE_TIME rtStart)
{
  if (rtStart + FAKE_AUDIO_DURATION - 1 > playlistFirstPacketTime + clipDuration) 
    superceeded |= SUPERCEEDED_AUDIO_RETURN;
  
  if (superceeded&SUPERCEEDED_AUDIO_RETURN) 
    return NULL;
  
  if (!FakeAudioAvailable()) 
    return NULL;

  Packet* packet = new Packet();
  packet->nClipNumber = nClip;
    
  packet->SetCount(AC3_FRAME_LENGTH);
  packet->SetData(ac3_sample, AC3_FRAME_LENGTH);
  packet->rtStart = rtStart;
  packet->rtStop = packet->rtStart + 1;

  if (firstAudio)
  {
    CMediaType pmt;
    pmt.InitMediaType();
    pmt.SetType(&MEDIATYPE_Audio);
    pmt.SetSubtype(&MEDIASUBTYPE_DOLBY_AC3);
    pmt.SetSampleSize(1);
    pmt.SetTemporalCompression(FALSE);
    pmt.SetVariableSize();
    pmt.SetFormatType(&FORMAT_WaveFormatEx);
    pmt.SetFormat(AC3AudioFormat, sizeof(AC3AudioFormat));
    WAVEFORMATEXTENSIBLE* wfe = (WAVEFORMATEXTENSIBLE*)pmt.pbFormat;
    wfe->Format.nChannels = 6;
    wfe->Format.nSamplesPerSec = 48000;
    wfe->Format.wFormatTag = WAVE_FORMAT_DOLBY_AC3;

    packet->pmt = CreateMediaType(&pmt);
  }
  
  audioPlaybackPosition += FAKE_AUDIO_DURATION;
  lastAudioPosition += FAKE_AUDIO_DURATION;

  return packet;
}
Exemple #21
0
HRESULT CLAVAudio::UpdateBitstreamContext()
{
  if (!m_pInput || !m_pInput->IsConnected())
    return E_UNEXPECTED;

  BOOL bBitstream = IsBitstreaming(m_nCodecId);
  if ((bBitstream && !m_avBSContext) || (!bBitstream && m_avBSContext)) {
    CMediaType mt = m_pInput->CurrentMediaType();

    const void *format = mt.Format();
    GUID format_type = mt.formattype;
    DWORD formatlen = mt.cbFormat;

    // Override the format type
    if (mt.subtype == MEDIASUBTYPE_FFMPEG_AUDIO && format_type == FORMAT_WaveFormatExFFMPEG) {
      WAVEFORMATEXFFMPEG *wfexff = (WAVEFORMATEXFFMPEG *)mt.Format();
      format = &wfexff->wfex;
      format_type = FORMAT_WaveFormatEx;
      formatlen -= sizeof(WAVEFORMATEXFFMPEG) - sizeof(WAVEFORMATEX);
    }

    ffmpeg_init(m_nCodecId, format, format_type, formatlen);
    m_bQueueResync = TRUE;
  }

  // Configure DTS-HD setting
  if(m_avBSContext) {
    if (m_settings.bBitstream[Bitstream_DTSHD] && m_settings.DTSHDFraming && !m_bForceDTSCore) {
      m_bDTSHD = TRUE;
      av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", LAV_BITSTREAM_DTS_HD_RATE, 0);
    } else {
      m_bDTSHD = FALSE; // Force auto-detection
      av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", 0, 0);
    }
  }

  return S_OK;
}
void CDeMultiplexer::GetAudioStreamPMT(CMediaType& pmt)
{
  // Fake audio in use
  if (m_AudioStreamType == NO_STREAM)
  {
    pmt.InitMediaType();
    pmt.SetType(&MEDIATYPE_Audio);
    pmt.SetSubtype(&MEDIASUBTYPE_DOLBY_AC3);
    pmt.SetSampleSize(1);
    pmt.SetTemporalCompression(FALSE);
    pmt.SetVariableSize();
    pmt.SetFormatType(&FORMAT_WaveFormatEx);
    pmt.SetFormat(AC3AudioFormat, sizeof(AC3AudioFormat));
  }
  else
    pmt = m_audioParser->pmt;
}
Exemple #23
0
HRESULT CAMRSplitter::ConfigureMediaType(CAMROutputPin *pin)
{
	CMediaType mt;
	mt.majortype	= MEDIATYPE_Audio;
	mt.subtype		= MEDIASUBTYPE_AMR;
	mt.formattype	= FORMAT_WaveFormatEx;
	mt.lSampleSize	= 1*1024;				// should be way enough

	ASSERT(file);

	// let us fill the waveformatex structure
	WAVEFORMATEX *wfx		= (WAVEFORMATEX*)mt.AllocFormatBuffer(sizeof(WAVEFORMATEX));
	memset(wfx, 0, sizeof(*wfx));
	wfx->wBitsPerSample		= 0;
	wfx->nChannels			= 1;
	wfx->nSamplesPerSec		= 8000;
	wfx->nBlockAlign		= 1;
	wfx->nAvgBytesPerSec	= 0;
	wfx->wFormatTag			= 0;

	// the one and only type
	pin->mt_types.Add(mt);
	return NOERROR;
}
Exemple #24
0
void CTSParserFilter::OutpinConnecting( IPin * pReceivePin, CBasePin* pFilterPin, const AM_MEDIA_TYPE *pmt )
{
	REMUXER*  pRemuxer;
	HRESULT hr = m_pInputPin->GetParser( &pRemuxer );
	if ( hr != NOERROR  )
		return ;

	if ( pFilterPin == m_pOutputPin )
	{
		CMediaType mt;
		mt.SetTemporalCompression(FALSE);
		mt.SetType(&MEDIATYPE_Stream);
		mt.SetSubtype(&MEDIASUBTYPE_MPEG2_TRANSPORT);
		if ( IS_PS_TYPE( m_wOutputFormat ) )
			mt.SetSubtype(&MEDIASUBTYPE_MPEG2_PROGRAM ); 
		else
			mt.SetSubtype(&MEDIASUBTYPE_MPEG2_TRANSPORT);
		m_pOutputPin->DefaultMediaType( &mt );
	}
}
HRESULT CSubtitlePin::CheckMediaType(const CMediaType* pmt)
{
  CheckPointer(pmt, E_POINTER);

  CDeMultiplexer& demux=m_pTsReaderFilter->GetDemultiplexer();
  
  if (!m_pTsReaderFilter->CheckCallback())
  {
    //LogDebug("subPin: Not running in MP - CheckMediaType() fail");
    return E_FAIL;
  }

  if (!demux.PatParsed())
  {
    return E_FAIL;
  }

  CMediaType pmti;
  CMediaType* ppmti = &pmti;
  
  ppmti->InitMediaType();
  ppmti->SetType      (& MEDIATYPE_Stream);
  ppmti->SetSubtype   (& MEDIASUBTYPE_MPEG2_TRANSPORT);
  ppmti->SetSampleSize(1);
  ppmti->SetTemporalCompression(FALSE);
  ppmti->SetVariableSize();    

  if(*pmt == *ppmti)
  {
    //LogDebug("subPin:CheckMediaType() ok");  
    return S_OK;
  }

  //LogDebug("subPin:CheckMediaType() fail");  
  return E_FAIL;
}
Exemple #26
0
HRESULT CQTDec::CompleteConnect(PIN_DIRECTION dir, IPin* pReceivePin)
{
	if(dir == PINDIR_INPUT)
	{
		m_mts.RemoveAll();

		VIDEOINFOHEADER* vihin = (VIDEOINFOHEADER*)m_pInput->CurrentMediaType().pbFormat;
		BITMAPINFOHEADER& bihin = vihin->bmiHeader;

		CMediaType mt;
		mt.majortype = MEDIATYPE_Video;
		mt.subtype = MEDIASUBTYPE_None;
		mt.formattype = FORMAT_VideoInfo;
		mt.bFixedSizeSamples = TRUE;
		mt.bTemporalCompression = FALSE;
		mt.lSampleSize = 0;
		mt.pUnk = NULL;

		VIDEOINFOHEADER vih;
		memset(&vih, 0, sizeof(vih));
		vih.AvgTimePerFrame = vihin->AvgTimePerFrame;
		vih.rcSource = vihin->rcSource;
		vih.rcTarget = vihin->rcTarget;
		vih.dwBitRate = vihin->dwBitRate;
		vih.dwBitErrorRate = vihin->dwBitErrorRate;

		BITMAPINFOHEADER& bih = vih.bmiHeader;
		bih.biSize = sizeof(bih);
		bih.biWidth = bihin.biWidth;
		bih.biHeight = abs(bihin.biHeight);
		bih.biPlanes = 1;
		bih.biXPelsPerMeter = bih.biYPelsPerMeter = 0;
		bih.biClrUsed = bih.biClrImportant = 0;

//		if(fRGB32) // always can decompress to (?)
		{
			VIDEOINFOHEADER* vihout = (VIDEOINFOHEADER*)mt.AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
			memcpy(vihout, &vih, sizeof(vih));

			BITMAPINFOHEADER& bihout = vihout->bmiHeader;
			bihout.biBitCount = 32;
			bihout.biSizeImage = bihout.biWidth*abs(bihout.biHeight)*bihout.biBitCount>>3;

			mt.subtype = MEDIASUBTYPE_RGB32;

			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biCompression = BI_BITFIELDS;
			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biHeight = -bih.biHeight;
			CorrectMediaType(&mt);
			m_mts.Add(mt);

			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biCompression = BI_RGB;
			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biHeight = bih.biHeight;
			CorrectMediaType(&mt);
			m_mts.Add(mt);
		}

//		if(fRGB16) // always can decompress to (?)
		{
			VIDEOINFOHEADER* vihout = (VIDEOINFOHEADER*)mt.AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
			memcpy(vihout, &vih, sizeof(vih));

			BITMAPINFOHEADER& bihout = vihout->bmiHeader;
			bihout.biBitCount = 16;
			bihout.biSizeImage = bihout.biWidth*abs(bihout.biHeight)*bihout.biBitCount>>3;

			mt.subtype = MEDIASUBTYPE_RGB565;

			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biCompression = BI_BITFIELDS;
			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biHeight = -bih.biHeight;
			CorrectMediaType(&mt);
			m_mts.Add(mt);

			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biCompression = BI_RGB;
			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biHeight = bih.biHeight;
			CorrectMediaType(&mt);
			m_mts.Add(mt);

			mt.subtype = MEDIASUBTYPE_RGB555;

			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biCompression = BI_BITFIELDS;
			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biHeight = -bih.biHeight;
			CorrectMediaType(&mt);
			m_mts.Add(mt);

			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biCompression = BI_RGB;
			((VIDEOINFOHEADER*)mt.pbFormat)->bmiHeader.biHeight = bih.biHeight;
			CorrectMediaType(&mt);
			m_mts.Add(mt);
		}
	}
Exemple #27
0
HRESULT CLAVAudio::DeliverBitstream(AVCodecID codec, const BYTE *buffer, DWORD dwSize, DWORD dwFrameSize, REFERENCE_TIME rtStartInput, REFERENCE_TIME rtStopInput)
{
  HRESULT hr = S_OK;

  CMediaType mt = CreateBitstreamMediaType(codec, m_bsParser.m_dwSampleRate);
  WAVEFORMATEX* wfe = (WAVEFORMATEX*)mt.Format();

  if(FAILED(hr = ReconnectOutput(dwSize, mt))) {
    return hr;
  }

  IMediaSample *pOut;
  BYTE *pDataOut = NULL;
  if(FAILED(GetDeliveryBuffer(&pOut, &pDataOut))) {
    return E_FAIL;
  }

  REFERENCE_TIME rtStart = m_rtStart, rtStop = AV_NOPTS_VALUE;
  // TrueHD timings
  // Since the SPDIF muxer takes 24 frames and puts them into one IEC61937 frame, we use the cached timestamp from before.
  if (codec == AV_CODEC_ID_TRUEHD) {
    // long-term cache is valid
    if (m_rtBitstreamCache != AV_NOPTS_VALUE)
      rtStart = m_rtBitstreamCache;
    // Duration - stop time of the current frame is valid
    if (rtStopInput != AV_NOPTS_VALUE)
      rtStop = rtStopInput;
    else // no actual time of the current frame, use typical TrueHD frame size, 24 * 0.83333ms
      rtStop = rtStart + (REFERENCE_TIME)(200000 / m_dRate);
    m_rtStart = rtStop;
  } else {
    double dDuration = DBL_SECOND_MULT * (double)m_bsParser.m_dwSamples / m_bsParser.m_dwSampleRate / m_dRate;
    m_dStartOffset += fmod(dDuration, 1.0);

    // Add rounded duration to rtStop
    rtStop = rtStart + (REFERENCE_TIME)(dDuration + 0.5);
    // and unrounded to m_rtStart..
    m_rtStart += (REFERENCE_TIME)dDuration;
    // and accumulate error..
    if (m_dStartOffset > 0.5) {
      m_rtStart++;
      m_dStartOffset -= 1.0;
    }
  }

  REFERENCE_TIME rtJitter = rtStart - m_rtBitstreamCache;
  m_faJitter.Sample(rtJitter);

  REFERENCE_TIME rtJitterMin = m_faJitter.AbsMinimum();
  if (m_settings.AutoAVSync && abs(rtJitterMin) > m_JitterLimit && m_bHasVideo) {
    DbgLog((LOG_TRACE, 10, L"::Deliver(): corrected A/V sync by %I64d", rtJitterMin));
    m_rtStart -= rtJitterMin;
    m_faJitter.OffsetValues(-rtJitterMin);
    m_bDiscontinuity = TRUE;
  }

#ifdef DEBUG
  DbgLog((LOG_CUSTOM5, 20, L"Bitstream Delivery, rtStart(calc): %I64d, rtStart(input): %I64d, duration: %I64d, diff: %I64d", rtStart, m_rtBitstreamCache, rtStop-rtStart, rtJitter));

  if (m_faJitter.CurrentSample() == 0) {
    DbgLog((LOG_TRACE, 20, L"Jitter Stats: min: %I64d - max: %I64d - avg: %I64d", rtJitterMin, m_faJitter.AbsMaximum(), m_faJitter.Average()));
  }
#endif
  m_rtBitstreamCache = AV_NOPTS_VALUE;

  if(m_settings.AudioDelayEnabled) {
    REFERENCE_TIME rtDelay = (REFERENCE_TIME)((m_settings.AudioDelay * 10000i64) / m_dRate);
    rtStart += rtDelay;
    rtStop += rtDelay;
  }

  pOut->SetTime(&rtStart, &rtStop);
  pOut->SetMediaTime(NULL, NULL);

  pOut->SetPreroll(FALSE);
  pOut->SetDiscontinuity(m_bDiscontinuity);
  m_bDiscontinuity = FALSE;
  pOut->SetSyncPoint(TRUE);

  pOut->SetActualDataLength(dwSize);

  memcpy(pDataOut, buffer, dwSize);

  if(hr == S_OK) {
    hr = m_pOutput->GetConnected()->QueryAccept(&mt);
    if (hr == S_FALSE && m_nCodecId == AV_CODEC_ID_DTS && m_bDTSHD) {
      DbgLog((LOG_TRACE, 1, L"DTS-HD Media Type failed with %0#.8x, trying fallback to DTS core", hr));
      m_bForceDTSCore = TRUE;
      UpdateBitstreamContext();
      goto done;
    }
    DbgLog((LOG_TRACE, 1, L"Sending new Media Type (QueryAccept: %0#.8x)", hr));
    m_pOutput->SetMediaType(&mt);
    pOut->SetMediaType(&mt);
  }

  hr = m_pOutput->Deliver(pOut);
  if (FAILED(hr)) {
    DbgLog((LOG_ERROR, 10, L"::DeliverBitstream failed with code: %0#.8x", hr));
  }

done:
  SafeRelease(&pOut);
  return hr;
}
Exemple #28
0
CMediaType CLAVAudio::CreateBitstreamMediaType(AVCodecID codec, DWORD dwSampleRate, BOOL bDTSHDOverride)
{
   CMediaType mt;

   mt.majortype  = MEDIATYPE_Audio;
   mt.subtype    = MEDIASUBTYPE_PCM;
   mt.formattype = FORMAT_WaveFormatEx;

   WAVEFORMATEXTENSIBLE wfex;
   memset(&wfex, 0, sizeof(wfex));

   WAVEFORMATEX* wfe = &wfex.Format;

   wfe->nChannels = 2;
   wfe->wBitsPerSample = 16;

   GUID subtype = GUID_NULL;

   switch(codec) {
   case AV_CODEC_ID_AC3:
     wfe->wFormatTag     = WAVE_FORMAT_DOLBY_AC3_SPDIF;
     wfe->nSamplesPerSec = min(dwSampleRate, 48000);
     break;
   case AV_CODEC_ID_EAC3:
     wfe->nSamplesPerSec = 192000;
     wfe->nChannels      = 2;
     subtype = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS;
     break;
   case AV_CODEC_ID_TRUEHD:
     wfe->nSamplesPerSec = 192000;
     wfe->nChannels      = 8;
     subtype = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP;
     break;
   case AV_CODEC_ID_DTS:
     if (m_settings.bBitstream[Bitstream_DTSHD] && m_bDTSHD && !bDTSHDOverride) {
       wfe->nSamplesPerSec = 192000;
       wfe->nChannels      = 8;
       subtype = KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD;
     } else {
       wfe->wFormatTag     = WAVE_FORMAT_DOLBY_AC3_SPDIF; // huh? but it works.
       wfe->nSamplesPerSec = min(dwSampleRate, 48000);
     }
     break;
   default:
     ASSERT(0);
     break;
   }

   wfe->nBlockAlign = wfe->nChannels * wfe->wBitsPerSample / 8;
   wfe->nAvgBytesPerSec = wfe->nSamplesPerSec * wfe->nBlockAlign;

   if (subtype != GUID_NULL) {
      wfex.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
      wfex.Format.cbSize = sizeof(wfex) - sizeof(wfex.Format);
      wfex.dwChannelMask = get_channel_mask(wfe->nChannels);
      wfex.Samples.wValidBitsPerSample = wfex.Format.wBitsPerSample;
      wfex.SubFormat = subtype;
   }

   mt.SetSampleSize(1);
   mt.SetFormat((BYTE*)&wfex, sizeof(wfex.Format) + wfex.Format.cbSize);

   return mt;
}
Exemple #29
0
static int v4w_open_videodevice(V4wState *s, int format, MSVideoSize *vsize)
{
	// Initialize COM
	CoInitialize(NULL);

	// get a Graph
	HRESULT hr=s->m_pGraph.CoCreateInstance(CLSID_FilterGraph);
	if(FAILED(hr))
	{
		return -1;
	}

	// get a CaptureGraphBuilder2
#if !defined(_WIN32_WCE)
	hr=s->m_pBuilder.CoCreateInstance(CLSID_CaptureGraphBuilder2);
#else
	hr=s->m_pBuilder.CoCreateInstance(CLSID_CaptureGraphBuilder);
#endif
	if(FAILED(hr))
	{
		return -2;
	}

	// connect capture graph builder with the graph
	s->m_pBuilder->SetFiltergraph(s->m_pGraph);

	// get mediacontrol so we can start and stop the filter graph
	hr=s->m_pGraph.QueryInterface(&(s->m_pControl));
	if(FAILED(hr))
	{
		return -3;
	}

	// get DXFilter
	s->m_pDXFilter = new CDXFilter(NULL, &hr, FALSE);
	if(s->m_pDXFilter==NULL)
	{
		return -4;
	}
	s->m_pDXFilter->AddRef();
	if(FAILED(hr))
	{
		return -4;
	}

	CMediaType mt;
	mt.SetType(&MEDIATYPE_Video);

	if (format==MS_YUV420P)
	{
		GUID m = (GUID)FOURCCMap(MAKEFOURCC('I','4','2','0'));
		mt.SetSubtype(&m);
		mt.SetSubtype(&MEDIASUBTYPE_YV12);
	}
	else //if (format==MS_RGB24)
	{
		mt.SetSubtype(&MEDIASUBTYPE_RGB24);
	}

	//mt.SetSubtype(&MEDIASUBTYPE_IYUV);
	//mt.SetSubtype(&MEDIASUBTYPE_YUYV);
	//mt.SetSubtype(&MEDIASUBTYPE_RGB24);
	//mt.SetSampleSize();
	mt.formattype = FORMAT_VideoInfo;
	mt.SetTemporalCompression(FALSE);

	VIDEOINFO *pvi = (VIDEOINFO *)
	mt.AllocFormatBuffer(sizeof(VIDEOINFO));
	if (NULL == pvi)
		return E_OUTOFMEMORY;
	ZeroMemory(pvi, sizeof(VIDEOINFO));
	if (format==MS_YUV420P)
	{
		pvi->bmiHeader.biCompression = MAKEFOURCC('I','4','2','0');
		pvi->bmiHeader.biCompression = MAKEFOURCC('Y','V','1','2');
		pvi->bmiHeader.biBitCount = 12;
	}
	else
	{
		pvi->bmiHeader.biCompression = BI_RGB;
		pvi->bmiHeader.biBitCount = 24;
	}
	pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
	pvi->bmiHeader.biWidth = vsize->width;
	pvi->bmiHeader.biHeight = vsize->height;
	pvi->bmiHeader.biPlanes = 1;
	pvi->bmiHeader.biSizeImage = GetBitmapSize(&pvi->bmiHeader);
	pvi->bmiHeader.biClrImportant = 0;
	mt.SetSampleSize(pvi->bmiHeader.biSizeImage);
	mt.SetFormat((BYTE*)pvi, sizeof(VIDEOINFO));

	hr = s->m_pDXFilter->SetAcceptedMediaType(&mt);
	if(FAILED(hr))
	{
		return -5;
	}

	hr = s->m_pDXFilter->SetCallback(Callback); 
	if(FAILED(hr))
	{
		return -6;
	}

	hr = s->m_pDXFilter->QueryInterface(IID_IBaseFilter,
	 (LPVOID *)&s->m_pIDXFilter);
	if(FAILED(hr))
	{
		return -7;
	}

	hr = s->m_pGraph->AddFilter(s->m_pIDXFilter, L"DXFilter Filter");
	if(FAILED(hr))
	{
		return -8;
	}

#ifdef WM6
	ICreateDevEnum *pCreateDevEnum = NULL;
	IEnumMoniker *pEnumMoniker = NULL;
	IMoniker *pMoniker = NULL;

	ULONG nFetched = 0;

	hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, 
		IID_ICreateDevEnum, (PVOID *)&pCreateDevEnum);
	if(FAILED(hr))
	{
		return -9;
	}

	hr = pCreateDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
		&pEnumMoniker, 0);
	if (FAILED(hr) || pEnumMoniker == NULL) {
		//printf("no device\n");
		return -10;
	}

	pEnumMoniker->Reset();

	hr = pEnumMoniker->Next(1, &pMoniker, &nFetched);
	if(FAILED(hr) || pMoniker==NULL)
	{
		return -11;
	}

	hr = pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&s->m_pDeviceFilter );
	if(FAILED(hr))
	{
		return -12;
	}

	s->m_pGraph->AddFilter(s->m_pDeviceFilter, L"Device Filter");

	pMoniker->Release();
	pEnumMoniker->Release();
	pCreateDevEnum->Release();
#else
	WCHAR wzDeviceName[ MAX_PATH + 1 ];
	CComVariant   varCamName;
	CPropertyBag PropBag;
    CComPtr<IPersistPropertyBag>    pPropertyBag;
	GetFirstCameraDriver(wzDeviceName);

	hr = s->m_pDeviceFilter.CoCreateInstance( CLSID_VideoCapture ); 
	if (FAILED(hr))
	{
		return -8;
	}

	s->m_pDeviceFilter.QueryInterface( &pPropertyBag );
	varCamName = wzDeviceName;
	if(( varCamName.vt == VT_BSTR ) == NULL ) {
	  return E_OUTOFMEMORY;
	}
	PropBag.Write( L"VCapName", &varCamName );   
	pPropertyBag->Load( &PropBag, NULL );
	pPropertyBag.Release();

	hr = s->m_pGraph->AddFilter( s->m_pDeviceFilter, L"Video capture source" );
#endif

	if (FAILED(hr))
	{
		return -8;
	}

	// get null renderer
	s->m_pNullRenderer = NULL;
#if 0
	hr=s->m_pNullRenderer.CoCreateInstance(CLSID_NullRenderer);
	if(FAILED(hr))
	{
		return -13;
	}
#endif
	if (s->m_pNullRenderer!=NULL)
	{
		s->m_pGraph->AddFilter(s->m_pNullRenderer, L"Null Renderer");
	}

	hr = s->m_pBuilder->RenderStream(&PIN_CATEGORY_PREVIEW,
		&MEDIATYPE_Video, s->m_pDeviceFilter, s->m_pIDXFilter, s->m_pNullRenderer);
	if (FAILED(hr))
	{
		//hr = s->m_pBuilder->RenderStream(&PIN_CATEGORY_CAPTURE,
		//	&MEDIATYPE_Video, s->m_pDeviceFilter, s->m_pIDXFilter, s->m_pNullRenderer);
		if (FAILED(hr))
		{
			return -14;
		}
	}
	
	//m_pDXFilter->SetBufferSamples(TRUE);


		// Create the System Device Enumerator.
IFilterMapper *pMapper = NULL;
//IEnumMoniker *pEnum = NULL;
IEnumRegFilters *pEnum = NULL;

hr = CoCreateInstance(CLSID_FilterMapper, 
    NULL, CLSCTX_INPROC, IID_IFilterMapper, 
    (void **) &pMapper);

if (FAILED(hr))
{
    // Error handling omitted for clarity.
}

GUID arrayInTypes[2];
arrayInTypes[0] = MEDIATYPE_Video;
arrayInTypes[1] = MEDIASUBTYPE_dvsd;

hr = pMapper->EnumMatchingFilters(
        &pEnum,
        MERIT_HW_COMPRESSOR, // Minimum merit.
        FALSE,               // At least one input pin?
        MEDIATYPE_NULL,
        MEDIASUBTYPE_NULL,
        FALSE,              // Must be a renderer?
        FALSE,               // At least one output pin?
        MEDIATYPE_NULL,                  
        MEDIASUBTYPE_NULL);              

// Enumerate the monikers.
//IMoniker *pMoniker;
REGFILTER *pMoniker;
ULONG cFetched;
while (pEnum->Next(1, &pMoniker, &cFetched) == S_OK)
{
    IPropertyBag *pPropBag = NULL;
#if 0
	hr = pMoniker->BindToStorage(0, 0, IID_IPropertyBag, 
       (void **)&pPropBag);

    if (SUCCEEDED(hr))
    {
        // To retrieve the friendly name of the filter, do the following:
        VARIANT varName;
        VariantInit(&varName);
        hr = pPropBag->Read(L"FriendlyName", &varName, 0);
        if (SUCCEEDED(hr))
        {
            // Display the name in your UI somehow.
        }
        VariantClear(&varName);

        // To create an instance of the filter, do the following:
        IBaseFilter *pFilter;
        hr = pMoniker->BindToObject(NULL, NULL, IID_IBaseFilter, (void**)&pFilter);
        // Now add the filter to the graph. Remember to release pFilter later.
    
        // Clean up.
        pPropBag->Release();
    }
    pMoniker->Release();
#endif

}

// Clean up.
pMapper->Release();
pEnum->Release();




	s_callback = s;
	hr = s->m_pControl->Run();
	if(FAILED(hr))
	{
		return -15;
	}

	s->rotregvalue=1;
	s->pix_fmt = format;
	s->vsize.height = vsize->height;
	s->vsize.width = vsize->width;
	return 0;
}
HRESULT CAudioPin::FillBuffer(IMediaSample *pSample)
{
  try
  {
    CDeMultiplexer& demux=m_pTsReaderFilter->GetDemultiplexer();
    CBuffer* buffer=NULL;
    bool earlyStall = false;
    
    //get file-duration and set m_rtDuration
    GetDuration(NULL);
    
    do
    {
      //Check if we need to wait for a while
      DWORD timeNow = GET_TIME_NOW();
      while (timeNow < (m_LastFillBuffTime + m_FillBuffSleepTime))
      {      
        Sleep(1);
        timeNow = GET_TIME_NOW();
      }
      m_LastFillBuffTime = timeNow;

      //did we reach the end of the file
      if (demux.EndOfFile())
      {
        int ACnt, VCnt;
        demux.GetBufferCounts(&ACnt, &VCnt);
        if (ACnt <= 0 && VCnt <= 0) //have we used all the data ?
        {
          LogDebug("audPin:set eof");
          m_FillBuffSleepTime = 5;
          CreateEmptySample(pSample);
          m_bInFillBuffer = false;
          return S_FALSE; //S_FALSE will notify the graph that end of file has been reached
        }
      }

      //if the filter is currently seeking to a new position
      //or this pin is currently seeking to a new position then
      //we dont try to read any packets, but simply return...
      if (m_pTsReaderFilter->IsSeeking() || m_pTsReaderFilter->IsStopping() || demux.m_bFlushRunning || !m_pTsReaderFilter->m_bStreamCompensated)
      {
        m_FillBuffSleepTime = 5;
        CreateEmptySample(pSample);
        m_bInFillBuffer = false;
        if (demux.m_bFlushRunning || !m_pTsReaderFilter->m_bStreamCompensated)
        {
          //Force discon on next good sample
          m_sampleCount = 0;
          m_bDiscontinuity=true;
        }
        if (!m_pTsReaderFilter->m_bStreamCompensated && (m_nNextAFT != 0))
        {
          ClearAverageFtime();
        }
        return NOERROR;
      }
      else
      {
        m_FillBuffSleepTime = 1;
        m_bInFillBuffer = true;
      }     
                  
      // Get next audio buffer from demultiplexer
      buffer=demux.GetAudio(earlyStall, m_rtStart);

      if (buffer==NULL)
      {
        m_FillBuffSleepTime = 5;
      }
      else
      {
        m_bPresentSample = true ;
        
        if (buffer->GetForcePMT())
        {
          m_bAddPMT = true;
        }
        if (buffer->GetDiscontinuity())
        {
          m_bDiscontinuity = true;
        }
        
        CRefTime RefTime,cRefTime ;
        bool HasTimestamp ;
        double fTime = 0.0;
        double clock = 0.0;
        double stallPoint = AUDIO_STALL_POINT;
        //check if it has a timestamp
        if ((HasTimestamp=buffer->MediaTime(RefTime)))
        {
          cRefTime = RefTime ;
					cRefTime -= m_rtStart ;
          //adjust the timestamp with the compensation
          cRefTime-= m_pTsReaderFilter->GetCompensation() ;
          
          //Check if total compensation offset is more than +/-10ms
          if (abs(m_pTsReaderFilter->GetTotalDeltaComp()) > 100000)
          {
            if (!m_bDisableSlowPlayDiscontinuity)
            {
              //Force downstream filters to resync by setting discontinuity flag
              pSample->SetDiscontinuity(TRUE);
            }
            m_pTsReaderFilter->ClearTotalDeltaComp();
          }

          REFERENCE_TIME RefClock = 0;
          m_pTsReaderFilter->GetMediaPosition(&RefClock) ;
          clock = (double)(RefClock-m_rtStart.m_time)/10000000.0 ;
          fTime = ((double)cRefTime.m_time/10000000.0) - clock ;
          
          //Calculate a mean 'fTime' value using 'raw' fTime data
          CalcAverageFtime(fTime);
          if (timeNow < (m_pTsReaderFilter->m_lastPauseRun + (30*1000)))
          {
            //do this for 30s after start of play, a flush or pause
            m_fAFTMeanRef = m_fAFTMean;
          }
          
          //Add compensation time for external downstream audio delay
          //to stop samples becoming 'late' (note: this does NOT change the actual sample timestamps)
          fTime -= m_fAFTMeanRef;  //remove the 'mean' offset
          fTime += ((AUDIO_STALL_POINT/2.0) + 0.2); //re-centre the timing                 

          //Discard late samples at start of play,
          //and samples outside a sensible timing window during play 
          //(helps with signal corruption recovery)
          cRefTime -= m_pTsReaderFilter->m_ClockOnStart.m_time;

          if (fTime < -2.0)
          {                          
            if ((m_dRateSeeking == 1.0) && (m_pTsReaderFilter->State() == State_Running) && (clock > 8.0) && !demux.m_bFlushDelegated)
            { 
              //Very late - request internal flush and re-sync to stream
              demux.DelegatedFlush(false, false);
              LogDebug("audPin : Audio to render very late, flushing") ;
            }
          }
          
          if ((cRefTime.m_time >= PRESENT_DELAY) && 
              (fTime > ((cRefTime.m_time >= FS_TIM_LIM) ? -0.3 : -0.5)) && (fTime < 2.5))
          {
            if ((fTime > stallPoint) && (m_sampleCount > 2))
            {
              //Too early - stall to avoid over-filling of audio decode/renderer buffers,
              //but don't enable at start of play to make sure graph starts properly
              m_FillBuffSleepTime = 10;
              buffer = NULL;
              earlyStall = true;
              continue;
            }           
          }
          else //Don't drop samples normally - it upsets the rate matching in the audio renderer
          {
            // Sample is too late.
            m_bPresentSample = false ;
          }
          cRefTime += m_pTsReaderFilter->m_ClockOnStart.m_time;         
        }

        if (m_bPresentSample && (m_dRateSeeking == 1.0) && (buffer->Length() > 0))
        {
          //do we need to set the discontinuity flag?
          if (m_bDiscontinuity)
          {
            //ifso, set it
            pSample->SetDiscontinuity(TRUE);
            
            LogDebug("audPin: Set discontinuity L:%d B:%d fTime:%03.3f SampCnt:%d", m_bDiscontinuity, buffer->GetDiscontinuity(), (float)fTime, m_sampleCount);
            m_bDiscontinuity=FALSE;
          }

          if (m_bAddPMT && !m_pTsReaderFilter->m_bDisableAddPMT && !m_bPinNoAddPMT)
          {
            //Add MediaType info to sample
            CMediaType mt; 
            int audioIndex = 0;
            demux.GetAudioStream(audioIndex);
            demux.GetAudioStreamType(audioIndex, mt, m_iPosition);
            pSample->SetMediaType(&mt); 
            SetMediaType(&mt);               
            WAVEFORMATEX* wfe = (WAVEFORMATEX*)mt.Format();         
            LogDebug("audPin: Add pmt, fTime:%03.3f SampCnt:%d, Ch:%d, Sr:%d", (float)fTime, m_sampleCount, wfe->nChannels, wfe->nSamplesPerSec);
            m_bAddPMT = false; //Only add once
          }   

          if (HasTimestamp)
          {
            //now we have the final timestamp, set timestamp in sample
            REFERENCE_TIME refTime=(REFERENCE_TIME)cRefTime;
            refTime = (REFERENCE_TIME)((double)refTime/m_dRateSeeking);
            refTime += m_pTsReaderFilter->m_regAudioDelay; //add offset (to produce delay relative to video)

            pSample->SetSyncPoint(TRUE);
            pSample->SetTime(&refTime,&refTime);
            
            if (m_pTsReaderFilter->m_ShowBufferAudio || fTime < 0.02 || (m_sampleCount < 3))
            {
              int cntA, cntV;
              CRefTime firstAudio, lastAudio;
              CRefTime firstVideo, lastVideo, zeroVideo;
              cntA = demux.GetAudioBufferPts(firstAudio, lastAudio); 
              cntV = demux.GetVideoBufferPts(firstVideo, lastVideo, zeroVideo);
              
              LogDebug("Aud/Ref : %03.3f, Compensated = %03.3f ( %0.3f A/V buffers=%02d/%02d), Clk : %f, SampCnt %d, Sleep %d ms, stallPt %03.3f", (float)RefTime.Millisecs()/1000.0f, (float)cRefTime.Millisecs()/1000.0f, fTime,cntA,cntV, clock, m_sampleCount, m_FillBuffSleepTime, (float)stallPoint);
            }
            if (m_pTsReaderFilter->m_ShowBufferAudio) m_pTsReaderFilter->m_ShowBufferAudio--;
            // CalcAverageFtime(fTime);
              
            if (((float)cRefTime.Millisecs()/1000.0f) > AUDIO_READY_POINT)
            {
              m_pTsReaderFilter->m_audioReady = true;
            }
          }
          else
          {
            //buffer has no timestamp
            pSample->SetTime(NULL,NULL);
            pSample->SetSyncPoint(FALSE);
          }

          //copy buffer in sample
          BYTE* pSampleBuffer;
          pSample->SetActualDataLength(buffer->Length());
          pSample->GetPointer(&pSampleBuffer);
          memcpy(pSampleBuffer,buffer->Data(),buffer->Length());
          //delete the buffer and return
          delete buffer;
          demux.EraseAudioBuff();
        }
        else
        { // Buffer was not displayed because it was out of date, search for next.
          delete buffer;
          demux.EraseAudioBuff();
          buffer=NULL ;
          m_FillBuffSleepTime = (m_dRateSeeking == 1.0) ? 1 : 2;
          m_bDiscontinuity = TRUE; //Next good sample will be discontinuous
        }
      }      
      earlyStall = false;
    } while (buffer==NULL);

    m_bInFillBuffer = false;
    return NOERROR;
  }

  // Should we return something else than NOERROR when hitting an exception?
  catch(int e)
  {
    LogDebug("audPin:fillbuffer exception %d", e);
  }
  catch(...)
  {
    LogDebug("audPin:fillbuffer exception ...");
  }
  m_FillBuffSleepTime = 5;
  CreateEmptySample(pSample);
  m_bDiscontinuity = TRUE; //Next good sample will be discontinuous  
  m_bInFillBuffer = false; 
  return NOERROR;
}