예제 #1
0
HRESULT CStreamParser::ParsePlanarPCM(Packet *pPacket)
{
  CMediaType mt = m_pPin->GetActiveMediaType();

  WORD nChannels = 0, nBPS = 0, nBlockAlign = 0;
  audioFormatTypeHandler(mt.Format(), mt.FormatType(), nullptr, &nChannels, &nBPS, &nBlockAlign, nullptr);

  // Mono needs no special handling
  if (nChannels == 1)
    return Queue(pPacket);

  Packet *out = new Packet();
  out->CopyProperties(pPacket);
  out->SetDataSize(pPacket->GetDataSize());

  int nBytesPerChannel = nBPS / 8;
  int nAudioBlocks = pPacket->GetDataSize() / nChannels;
  BYTE *out_data = out->GetData();
  const BYTE *in_data = pPacket->GetData();

  for (int i = 0; i < nAudioBlocks; i += nBytesPerChannel) {
    // interleave the channels into audio blocks
    for (int c = 0; c < nChannels; c++) {
      memcpy(out_data + (c * nBytesPerChannel), in_data + (nAudioBlocks * c), nBytesPerChannel);
    }
    // Skip to the next output block
    out_data += nChannels * nBytesPerChannel;

    // skip to the next input sample
    in_data += nBytesPerChannel;
  }

  return Queue(out);
}
예제 #2
0
HRESULT CLAVAudio::UpdateBitstreamContext()
{
  if (!m_pInput || !m_pInput->IsConnected())
    return E_UNEXPECTED;

  BOOL bBitstream = IsBitstreaming(m_nCodecId);
  if ((bBitstream && !m_avBSContext) || (!bBitstream && m_avBSContext)) {
    CMediaType mt = m_pInput->CurrentMediaType();

    const void *format = mt.Format();
    GUID format_type = mt.formattype;
    DWORD formatlen = mt.cbFormat;

    // Override the format type
    if (mt.subtype == MEDIASUBTYPE_FFMPEG_AUDIO && format_type == FORMAT_WaveFormatExFFMPEG) {
      WAVEFORMATEXFFMPEG *wfexff = (WAVEFORMATEXFFMPEG *)mt.Format();
      format = &wfexff->wfex;
      format_type = FORMAT_WaveFormatEx;
      formatlen -= sizeof(WAVEFORMATEXFFMPEG) - sizeof(WAVEFORMATEX);
    }

    ffmpeg_init(m_nCodecId, format, format_type, formatlen);
    m_bQueueResync = TRUE;
  }

  // Configure DTS-HD setting
  if(m_avBSContext) {
    if (m_settings.bBitstream[Bitstream_DTSHD] && m_settings.DTSHDFraming && !m_bForceDTSCore) {
      m_bDTSHD = TRUE;
      av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", LAV_BITSTREAM_DTS_HD_RATE, 0);
    } else {
      m_bDTSHD = FALSE; // Force auto-detection
      av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", 0, 0);
    }
  }

  return S_OK;
}
예제 #3
0
bool CDSMSplitterFile::Read(__int64 len, BYTE& id, CMediaType& mt)
{
	id = (BYTE)BitRead(8);
	ByteRead((BYTE*)&mt.majortype, sizeof(mt.majortype));
	ByteRead((BYTE*)&mt.subtype, sizeof(mt.subtype));
	mt.bFixedSizeSamples = (BOOL)BitRead(1);
	mt.bTemporalCompression = (BOOL)BitRead(1);
	mt.lSampleSize = (ULONG)BitRead(30);
	ByteRead((BYTE*)&mt.formattype, sizeof(mt.formattype));
	len -= 5 + sizeof(GUID)*3;
	ASSERT(len >= 0);
	if(len > 0) {mt.AllocFormatBuffer((LONG)len); ByteRead(mt.Format(), mt.FormatLength());}
	else mt.ResetFormatBuffer();	
	return true;
}
예제 #4
0
STDMETHODIMP GetFormat(CPushPin *pPin, int iIndex, AM_MEDIA_TYPE **ppmt)
{
	CheckPointer(ppmt, E_POINTER);

	HRESULT hr;
	CMediaType mt;

	hr = pPin->GetMediaType(iIndex, &mt);
	if(FAILED(hr)) return hr;

	*ppmt = static_cast<AM_MEDIA_TYPE*>(CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE)));
	**ppmt = mt;
	(*ppmt)->pbFormat = static_cast<BYTE*>(CoTaskMemAlloc((*ppmt)->cbFormat));
	memcpy((*ppmt)->pbFormat, mt.Format(), (*ppmt)->cbFormat);

	return S_OK;
}
HRESULT CWaveOutRenderer::CompleteConnect(IPin* pin)
{
    if (!pin)
        return E_POINTER;

    HRESULT r = CBaseRenderer::CompleteConnect(pin);
    if (FAILED(r))
        return r;

    if (!m_outPut)
        m_outPut.reset(new CWaveOutput(0));

    CMediaType mt;
    pin->ConnectionMediaType(&mt);
    WAVEFORMATEX* format = reinterpret_cast<WAVEFORMATEX*>(mt.Format());

    return m_outPut->Init(format) ? S_OK : E_FAIL;
}
예제 #6
0
XnVideoStream::Mode XnVideoStream::MediaTypeToMode(const CMediaType& mediaType)
{
	Mode result = {0};

	if (*mediaType.Type() != MEDIATYPE_Video)   // we only output video
	{                                                  
		xnLogError(XN_MASK_FILTER, "bad type");
		return result;
	}

	// Check for the subtypes we support
	const GUID *SubType = mediaType.Subtype();

	if (SubType && *SubType != GUID_NULL) 
	{
		if (*SubType == MEDIASUBTYPE_RGB24)
		{
			result.Format = XN_PIXEL_FORMAT_RGB24;
		}
		else if (*SubType == MEDIASUBTYPE_MJPG)
		{
			result.Format = XN_PIXEL_FORMAT_MJPEG;
		}
		else
		{
			xnLogVerbose(XN_MASK_FILTER, "bad subtype");
			return result;
		}
	}

	// Get the format area of the media type
	VIDEOINFO *pvi = (VIDEOINFO*)mediaType.Format();
	if (pvi == NULL)
	{
		return result;
	}

	result.OutputMode.nFPS = (XnUInt32)(10000000ULL / pvi->AvgTimePerFrame);
	result.OutputMode.nXRes = pvi->bmiHeader.biWidth;
	result.OutputMode.nYRes = pvi->bmiHeader.biHeight;
	return result;
}
예제 #7
0
HRESULT CLAVAudio::DeliverBitstream(AVCodecID codec, const BYTE *buffer, DWORD dwSize, DWORD dwFrameSize, REFERENCE_TIME rtStartInput, REFERENCE_TIME rtStopInput)
{
  HRESULT hr = S_OK;

  CMediaType mt = CreateBitstreamMediaType(codec, m_bsParser.m_dwSampleRate);
  WAVEFORMATEX* wfe = (WAVEFORMATEX*)mt.Format();

  if(FAILED(hr = ReconnectOutput(dwSize, mt))) {
    return hr;
  }

  IMediaSample *pOut;
  BYTE *pDataOut = NULL;
  if(FAILED(GetDeliveryBuffer(&pOut, &pDataOut))) {
    return E_FAIL;
  }

  REFERENCE_TIME rtStart = m_rtStart, rtStop = AV_NOPTS_VALUE;
  // TrueHD timings
  // Since the SPDIF muxer takes 24 frames and puts them into one IEC61937 frame, we use the cached timestamp from before.
  if (codec == AV_CODEC_ID_TRUEHD) {
    // long-term cache is valid
    if (m_rtBitstreamCache != AV_NOPTS_VALUE)
      rtStart = m_rtBitstreamCache;
    // Duration - stop time of the current frame is valid
    if (rtStopInput != AV_NOPTS_VALUE)
      rtStop = rtStopInput;
    else // no actual time of the current frame, use typical TrueHD frame size, 24 * 0.83333ms
      rtStop = rtStart + (REFERENCE_TIME)(200000 / m_dRate);
    m_rtStart = rtStop;
  } else {
    double dDuration = DBL_SECOND_MULT * (double)m_bsParser.m_dwSamples / m_bsParser.m_dwSampleRate / m_dRate;
    m_dStartOffset += fmod(dDuration, 1.0);

    // Add rounded duration to rtStop
    rtStop = rtStart + (REFERENCE_TIME)(dDuration + 0.5);
    // and unrounded to m_rtStart..
    m_rtStart += (REFERENCE_TIME)dDuration;
    // and accumulate error..
    if (m_dStartOffset > 0.5) {
      m_rtStart++;
      m_dStartOffset -= 1.0;
    }
  }

  REFERENCE_TIME rtJitter = rtStart - m_rtBitstreamCache;
  m_faJitter.Sample(rtJitter);

  REFERENCE_TIME rtJitterMin = m_faJitter.AbsMinimum();
  if (m_settings.AutoAVSync && abs(rtJitterMin) > m_JitterLimit && m_bHasVideo) {
    DbgLog((LOG_TRACE, 10, L"::Deliver(): corrected A/V sync by %I64d", rtJitterMin));
    m_rtStart -= rtJitterMin;
    m_faJitter.OffsetValues(-rtJitterMin);
    m_bDiscontinuity = TRUE;
  }

#ifdef DEBUG
  DbgLog((LOG_CUSTOM5, 20, L"Bitstream Delivery, rtStart(calc): %I64d, rtStart(input): %I64d, duration: %I64d, diff: %I64d", rtStart, m_rtBitstreamCache, rtStop-rtStart, rtJitter));

  if (m_faJitter.CurrentSample() == 0) {
    DbgLog((LOG_TRACE, 20, L"Jitter Stats: min: %I64d - max: %I64d - avg: %I64d", rtJitterMin, m_faJitter.AbsMaximum(), m_faJitter.Average()));
  }
#endif
  m_rtBitstreamCache = AV_NOPTS_VALUE;

  if(m_settings.AudioDelayEnabled) {
    REFERENCE_TIME rtDelay = (REFERENCE_TIME)((m_settings.AudioDelay * 10000i64) / m_dRate);
    rtStart += rtDelay;
    rtStop += rtDelay;
  }

  pOut->SetTime(&rtStart, &rtStop);
  pOut->SetMediaTime(NULL, NULL);

  pOut->SetPreroll(FALSE);
  pOut->SetDiscontinuity(m_bDiscontinuity);
  m_bDiscontinuity = FALSE;
  pOut->SetSyncPoint(TRUE);

  pOut->SetActualDataLength(dwSize);

  memcpy(pDataOut, buffer, dwSize);

  if(hr == S_OK) {
    hr = m_pOutput->GetConnected()->QueryAccept(&mt);
    if (hr == S_FALSE && m_nCodecId == AV_CODEC_ID_DTS && m_bDTSHD) {
      DbgLog((LOG_TRACE, 1, L"DTS-HD Media Type failed with %0#.8x, trying fallback to DTS core", hr));
      m_bForceDTSCore = TRUE;
      UpdateBitstreamContext();
      goto done;
    }
    DbgLog((LOG_TRACE, 1, L"Sending new Media Type (QueryAccept: %0#.8x)", hr));
    m_pOutput->SetMediaType(&mt);
    pOut->SetMediaType(&mt);
  }

  hr = m_pOutput->Deliver(pOut);
  if (FAILED(hr)) {
    DbgLog((LOG_ERROR, 10, L"::DeliverBitstream failed with code: %0#.8x", hr));
  }

done:
  SafeRelease(&pOut);
  return hr;
}
예제 #8
0
HRESULT CAudioPin::FillBuffer(IMediaSample *pSample)
{
  try
  {
    CDeMultiplexer& demux=m_pTsReaderFilter->GetDemultiplexer();
    CBuffer* buffer=NULL;
    bool earlyStall = false;
    
    //get file-duration and set m_rtDuration
    GetDuration(NULL);
    
    do
    {
      //Check if we need to wait for a while
      DWORD timeNow = GET_TIME_NOW();
      while (timeNow < (m_LastFillBuffTime + m_FillBuffSleepTime))
      {      
        Sleep(1);
        timeNow = GET_TIME_NOW();
      }
      m_LastFillBuffTime = timeNow;

      //did we reach the end of the file
      if (demux.EndOfFile())
      {
        int ACnt, VCnt;
        demux.GetBufferCounts(&ACnt, &VCnt);
        if (ACnt <= 0 && VCnt <= 0) //have we used all the data ?
        {
          LogDebug("audPin:set eof");
          m_FillBuffSleepTime = 5;
          CreateEmptySample(pSample);
          m_bInFillBuffer = false;
          return S_FALSE; //S_FALSE will notify the graph that end of file has been reached
        }
      }

      //if the filter is currently seeking to a new position
      //or this pin is currently seeking to a new position then
      //we dont try to read any packets, but simply return...
      if (m_pTsReaderFilter->IsSeeking() || m_pTsReaderFilter->IsStopping() || demux.m_bFlushRunning || !m_pTsReaderFilter->m_bStreamCompensated)
      {
        m_FillBuffSleepTime = 5;
        CreateEmptySample(pSample);
        m_bInFillBuffer = false;
        if (demux.m_bFlushRunning || !m_pTsReaderFilter->m_bStreamCompensated)
        {
          //Force discon on next good sample
          m_sampleCount = 0;
          m_bDiscontinuity=true;
        }
        if (!m_pTsReaderFilter->m_bStreamCompensated && (m_nNextAFT != 0))
        {
          ClearAverageFtime();
        }
        return NOERROR;
      }
      else
      {
        m_FillBuffSleepTime = 1;
        m_bInFillBuffer = true;
      }     
                  
      // Get next audio buffer from demultiplexer
      buffer=demux.GetAudio(earlyStall, m_rtStart);

      if (buffer==NULL)
      {
        m_FillBuffSleepTime = 5;
      }
      else
      {
        m_bPresentSample = true ;
        
        if (buffer->GetForcePMT())
        {
          m_bAddPMT = true;
        }
        if (buffer->GetDiscontinuity())
        {
          m_bDiscontinuity = true;
        }
        
        CRefTime RefTime,cRefTime ;
        bool HasTimestamp ;
        double fTime = 0.0;
        double clock = 0.0;
        double stallPoint = AUDIO_STALL_POINT;
        //check if it has a timestamp
        if ((HasTimestamp=buffer->MediaTime(RefTime)))
        {
          cRefTime = RefTime ;
					cRefTime -= m_rtStart ;
          //adjust the timestamp with the compensation
          cRefTime-= m_pTsReaderFilter->GetCompensation() ;
          
          //Check if total compensation offset is more than +/-10ms
          if (abs(m_pTsReaderFilter->GetTotalDeltaComp()) > 100000)
          {
            if (!m_bDisableSlowPlayDiscontinuity)
            {
              //Force downstream filters to resync by setting discontinuity flag
              pSample->SetDiscontinuity(TRUE);
            }
            m_pTsReaderFilter->ClearTotalDeltaComp();
          }

          REFERENCE_TIME RefClock = 0;
          m_pTsReaderFilter->GetMediaPosition(&RefClock) ;
          clock = (double)(RefClock-m_rtStart.m_time)/10000000.0 ;
          fTime = ((double)cRefTime.m_time/10000000.0) - clock ;
          
          //Calculate a mean 'fTime' value using 'raw' fTime data
          CalcAverageFtime(fTime);
          if (timeNow < (m_pTsReaderFilter->m_lastPauseRun + (30*1000)))
          {
            //do this for 30s after start of play, a flush or pause
            m_fAFTMeanRef = m_fAFTMean;
          }
          
          //Add compensation time for external downstream audio delay
          //to stop samples becoming 'late' (note: this does NOT change the actual sample timestamps)
          fTime -= m_fAFTMeanRef;  //remove the 'mean' offset
          fTime += ((AUDIO_STALL_POINT/2.0) + 0.2); //re-centre the timing                 

          //Discard late samples at start of play,
          //and samples outside a sensible timing window during play 
          //(helps with signal corruption recovery)
          cRefTime -= m_pTsReaderFilter->m_ClockOnStart.m_time;

          if (fTime < -2.0)
          {                          
            if ((m_dRateSeeking == 1.0) && (m_pTsReaderFilter->State() == State_Running) && (clock > 8.0) && !demux.m_bFlushDelegated)
            { 
              //Very late - request internal flush and re-sync to stream
              demux.DelegatedFlush(false, false);
              LogDebug("audPin : Audio to render very late, flushing") ;
            }
          }
          
          if ((cRefTime.m_time >= PRESENT_DELAY) && 
              (fTime > ((cRefTime.m_time >= FS_TIM_LIM) ? -0.3 : -0.5)) && (fTime < 2.5))
          {
            if ((fTime > stallPoint) && (m_sampleCount > 2))
            {
              //Too early - stall to avoid over-filling of audio decode/renderer buffers,
              //but don't enable at start of play to make sure graph starts properly
              m_FillBuffSleepTime = 10;
              buffer = NULL;
              earlyStall = true;
              continue;
            }           
          }
          else //Don't drop samples normally - it upsets the rate matching in the audio renderer
          {
            // Sample is too late.
            m_bPresentSample = false ;
          }
          cRefTime += m_pTsReaderFilter->m_ClockOnStart.m_time;         
        }

        if (m_bPresentSample && (m_dRateSeeking == 1.0) && (buffer->Length() > 0))
        {
          //do we need to set the discontinuity flag?
          if (m_bDiscontinuity)
          {
            //ifso, set it
            pSample->SetDiscontinuity(TRUE);
            
            LogDebug("audPin: Set discontinuity L:%d B:%d fTime:%03.3f SampCnt:%d", m_bDiscontinuity, buffer->GetDiscontinuity(), (float)fTime, m_sampleCount);
            m_bDiscontinuity=FALSE;
          }

          if (m_bAddPMT && !m_pTsReaderFilter->m_bDisableAddPMT && !m_bPinNoAddPMT)
          {
            //Add MediaType info to sample
            CMediaType mt; 
            int audioIndex = 0;
            demux.GetAudioStream(audioIndex);
            demux.GetAudioStreamType(audioIndex, mt, m_iPosition);
            pSample->SetMediaType(&mt); 
            SetMediaType(&mt);               
            WAVEFORMATEX* wfe = (WAVEFORMATEX*)mt.Format();         
            LogDebug("audPin: Add pmt, fTime:%03.3f SampCnt:%d, Ch:%d, Sr:%d", (float)fTime, m_sampleCount, wfe->nChannels, wfe->nSamplesPerSec);
            m_bAddPMT = false; //Only add once
          }   

          if (HasTimestamp)
          {
            //now we have the final timestamp, set timestamp in sample
            REFERENCE_TIME refTime=(REFERENCE_TIME)cRefTime;
            refTime = (REFERENCE_TIME)((double)refTime/m_dRateSeeking);
            refTime += m_pTsReaderFilter->m_regAudioDelay; //add offset (to produce delay relative to video)

            pSample->SetSyncPoint(TRUE);
            pSample->SetTime(&refTime,&refTime);
            
            if (m_pTsReaderFilter->m_ShowBufferAudio || fTime < 0.02 || (m_sampleCount < 3))
            {
              int cntA, cntV;
              CRefTime firstAudio, lastAudio;
              CRefTime firstVideo, lastVideo, zeroVideo;
              cntA = demux.GetAudioBufferPts(firstAudio, lastAudio); 
              cntV = demux.GetVideoBufferPts(firstVideo, lastVideo, zeroVideo);
              
              LogDebug("Aud/Ref : %03.3f, Compensated = %03.3f ( %0.3f A/V buffers=%02d/%02d), Clk : %f, SampCnt %d, Sleep %d ms, stallPt %03.3f", (float)RefTime.Millisecs()/1000.0f, (float)cRefTime.Millisecs()/1000.0f, fTime,cntA,cntV, clock, m_sampleCount, m_FillBuffSleepTime, (float)stallPoint);
            }
            if (m_pTsReaderFilter->m_ShowBufferAudio) m_pTsReaderFilter->m_ShowBufferAudio--;
            // CalcAverageFtime(fTime);
              
            if (((float)cRefTime.Millisecs()/1000.0f) > AUDIO_READY_POINT)
            {
              m_pTsReaderFilter->m_audioReady = true;
            }
          }
          else
          {
            //buffer has no timestamp
            pSample->SetTime(NULL,NULL);
            pSample->SetSyncPoint(FALSE);
          }

          //copy buffer in sample
          BYTE* pSampleBuffer;
          pSample->SetActualDataLength(buffer->Length());
          pSample->GetPointer(&pSampleBuffer);
          memcpy(pSampleBuffer,buffer->Data(),buffer->Length());
          //delete the buffer and return
          delete buffer;
          demux.EraseAudioBuff();
        }
        else
        { // Buffer was not displayed because it was out of date, search for next.
          delete buffer;
          demux.EraseAudioBuff();
          buffer=NULL ;
          m_FillBuffSleepTime = (m_dRateSeeking == 1.0) ? 1 : 2;
          m_bDiscontinuity = TRUE; //Next good sample will be discontinuous
        }
      }      
      earlyStall = false;
    } while (buffer==NULL);

    m_bInFillBuffer = false;
    return NOERROR;
  }

  // Should we return something else than NOERROR when hitting an exception?
  catch(int e)
  {
    LogDebug("audPin:fillbuffer exception %d", e);
  }
  catch(...)
  {
    LogDebug("audPin:fillbuffer exception ...");
  }
  m_FillBuffSleepTime = 5;
  CreateEmptySample(pSample);
  m_bDiscontinuity = TRUE; //Next good sample will be discontinuous  
  m_bInFillBuffer = false; 
  return NOERROR;
}
예제 #9
0
bool mt2spk(CMediaType mt, Speakers &spk)
{
  const GUID type = *mt.Type();
  const GUID subtype = *mt.Subtype();
  const GUID formattype = *mt.FormatType();

  WAVEFORMAT *wf = 0;
  size_t wf_size = 0;
  int sample_rate = 0;

  if ((formattype == FORMAT_WaveFormatEx) &&
      (mt.FormatLength() > sizeof(WAVEFORMAT)))
  {
    wf = (WAVEFORMAT *)mt.Format();
    wf_size = mt.FormatLength();
    sample_rate = wf->nSamplesPerSec;
  }

  /////////////////////////////////////////////////////////
  // HD LPCM

  if (type == MEDIATYPE_Audio &&
      subtype == MEDIASUBTYPE_HDMV_LPCM_AUDIO &&
      wf && wf->wFormatTag == 1)
  {
    spk = wf2spk(wf, wf_size);
    switch (spk.format)
    {
      case FORMAT_PCM16: spk.format = FORMAT_PCM16_BE; return true;
      case FORMAT_PCM24: spk.format = FORMAT_PCM24_BE; return true;
      case FORMAT_PCM32: spk.format = FORMAT_PCM32_BE; return true;
      default: return false;
    }
  }

  /////////////////////////////////////////////////////////
  // Compressed formats

  if (type == MEDIATYPE_MPEG2_PES ||
      type == MEDIATYPE_MPEG2_PACK ||
      type == MEDIATYPE_DVD_ENCRYPTED_PACK)
    if (subtype == MEDIASUBTYPE_DOLBY_AC3 ||
        subtype == MEDIASUBTYPE_DTS ||
        subtype == MEDIASUBTYPE_MPEG1AudioPayload ||
        subtype == MEDIASUBTYPE_MPEG2_AUDIO ||
        subtype == MEDIASUBTYPE_DVD_LPCM_AUDIO)
    {
      spk = Speakers(FORMAT_PES, 0, sample_rate);
      return true;
    }

  if (subtype == MEDIASUBTYPE_DOLBY_AC3 || 
      subtype == MEDIASUBTYPE_AVI_AC3)
  {
    // It may be AC3 or EAC3
    spk = Speakers(FORMAT_DOLBY, 0, sample_rate);
    return true;
  }

  if (subtype == MEDIASUBTYPE_DOLBY_DDPLUS)
  {
    spk = Speakers(FORMAT_EAC3, 0, sample_rate);
    return true;
  }

  if (subtype == MEDIASUBTYPE_DOLBY_TRUEHD)
  {
    spk = Speakers(FORMAT_TRUEHD, 0, sample_rate);
    return true;
  }

  if (subtype == MEDIASUBTYPE_DTS || 
      subtype == MEDIASUBTYPE_DTS_HD ||
      subtype == MEDIASUBTYPE_AVI_DTS)
  {
    spk = Speakers(FORMAT_DTS, 0, sample_rate);
    return true;
  }

  if (subtype == MEDIASUBTYPE_MPEG1AudioPayload ||
      subtype == MEDIASUBTYPE_MPEG2_AUDIO)
  {
    spk = Speakers(FORMAT_MPA, 0, sample_rate);
    return true;
  }

  if (subtype == MEDIASUBTYPE_DOLBY_AC3_SPDIF)
  {
    spk = Speakers(FORMAT_SPDIF, 0, sample_rate);
    return true;
  }
/*
  if (subtype == MEDIASUBTYPE_Vorbis &&
      formattype == FORMAT_Vorbis && 
      mt.FormatLength() > sizeof(VORBISFORMAT))
  {
    VORBISFORMAT *format = (VORBISFORMAT *)mt.Format();
    spk = Speakers(FORMAT_VORBIS, 0, format->samplesPerSec);
    spk.set_format_data(mt.Format(), mt.FormatLength());
  }
*/
  if (subtype == MEDIASUBTYPE_Vorbis2 &&
      formattype == FORMAT_Vorbis2 &&
      mt.FormatLength() > sizeof(VORBISFORMAT2))
  {
    VORBISFORMAT2 *format = (VORBISFORMAT2 *)mt.Format();
    spk = Speakers(FORMAT_VORBIS, 0, format->samplesPerSec);
    spk.set_format_data(mt.Format(), mt.FormatLength());
    return true;
  }

  /////////////////////////////////////////////////////////
  // LPCM

  if (subtype == MEDIASUBTYPE_DVD_LPCM_AUDIO)
  {
    PCMWAVEFORMAT *pcmwf = wf_cast<PCMWAVEFORMAT>(wf, wf_size);
    if (!pcmwf) return false;

    int format, mode;
    switch (pcmwf->wBitsPerSample)
    {
      case 16: format = FORMAT_PCM16_BE; break;
      case 20: format = FORMAT_LPCM20; break;
      case 24: format = FORMAT_LPCM24; break;
      default: return false;
    }
    switch (pcmwf->wf.nChannels)
    {
      case 1: mode = MODE_MONO; break;
      case 2: mode = MODE_STEREO; break;
      default: return false;
    }
    spk = Speakers(format, mode, sample_rate);
    return true;
  }

  /////////////////////////////////////////////////////////
  // General WAVEFORMAT conversion

  spk = Speakers();
  if (wf)
    spk = wf2spk(wf, wf_size);
  return !spk.is_unknown();
}
예제 #10
0
HRESULT CMatroskaSplitterFilter::CreateOutputs(IAsyncReader* pAsyncReader)
{
	CheckPointer(pAsyncReader, E_POINTER);

	HRESULT hr = E_FAIL;

	m_pFile.Free();
	m_pTrackEntryMap.RemoveAll();
	m_pOrderedTrackArray.RemoveAll();

	CAtlArray<CMatroskaSplitterOutputPin*> pinOut;
	CAtlArray<TrackEntry*> pinOutTE;

	m_pFile.Attach(DNew CMatroskaFile(pAsyncReader, hr));
	if(!m_pFile) return E_OUTOFMEMORY;
	if(FAILED(hr)) {m_pFile.Free(); return hr;}

	m_rtNewStart = m_rtCurrent = 0;
	m_rtNewStop = m_rtStop = m_rtDuration = 0;

	int iVideo = 1, iAudio = 1, iSubtitle = 1;

	POSITION pos = m_pFile->m_segment.Tracks.GetHeadPosition();
	while(pos)
	{
		Track* pT = m_pFile->m_segment.Tracks.GetNext(pos);

		POSITION pos2 = pT->TrackEntries.GetHeadPosition();
		while(pos2)
		{
			TrackEntry* pTE = pT->TrackEntries.GetNext(pos2);

			bool isSub = false;

			if(!pTE->Expand(pTE->CodecPrivate, ContentEncoding::TracksPrivateData))
				continue;

			CStringA CodecID = pTE->CodecID.ToString();

			CStringW Name;
			Name.Format(L"Output %I64d", (UINT64)pTE->TrackNumber);

			CMediaType mt;
			CAtlArray<CMediaType> mts;

			mt.SetSampleSize(1);

			if(pTE->TrackType == TrackEntry::TypeVideo)
			{
				Name.Format(L"Video %d", iVideo++);

				mt.majortype = MEDIATYPE_Video;

				if(CodecID == "V_MS/VFW/FOURCC")
				{
					mt.formattype = FORMAT_VideoInfo;
					VIDEOINFOHEADER* pvih = (VIDEOINFOHEADER*)mt.AllocFormatBuffer(sizeof(VIDEOINFOHEADER) + pTE->CodecPrivate.GetCount() - sizeof(BITMAPINFOHEADER));
					memset(mt.Format(), 0, mt.FormatLength());
					memcpy(&pvih->bmiHeader, pTE->CodecPrivate.GetData(), pTE->CodecPrivate.GetCount());
					mt.subtype = FOURCCMap(pvih->bmiHeader.biCompression);
					switch(pvih->bmiHeader.biCompression)
					{
					case BI_RGB: case BI_BITFIELDS: mt.subtype = 
						pvih->bmiHeader.biBitCount == 1 ? MEDIASUBTYPE_RGB1 :
						pvih->bmiHeader.biBitCount == 4 ? MEDIASUBTYPE_RGB4 :
						pvih->bmiHeader.biBitCount == 8 ? MEDIASUBTYPE_RGB8 :
						pvih->bmiHeader.biBitCount == 16 ? MEDIASUBTYPE_RGB565 :
						pvih->bmiHeader.biBitCount == 24 ? MEDIASUBTYPE_RGB24 :
						pvih->bmiHeader.biBitCount == 32 ? MEDIASUBTYPE_ARGB32 :
						MEDIASUBTYPE_NULL;
						break;
//					case BI_RLE8: mt.subtype = MEDIASUBTYPE_RGB8; break;
//					case BI_RLE4: mt.subtype = MEDIASUBTYPE_RGB4; break;
					}
					mts.Add(mt);
				}
				else if(CodecID == "V_UNCOMPRESSED")
				{
				}
				else if(CodecID.Find("V_MPEG4/ISO/AVC") == 0 && pTE->CodecPrivate.GetCount() >= 6)
				{
					BYTE sps = pTE->CodecPrivate[5] & 0x1f;

	std::vector<BYTE> avcC;
	for(int i = 0, j = pTE->CodecPrivate.GetCount(); i < j; i++)
		avcC.push_back(pTE->CodecPrivate[i]);

	std::vector<BYTE> sh;

	unsigned jj = 6;

	while (sps--) {
	  if (jj + 2 > avcC.size())
	    goto avcfail;
	  unsigned spslen = ((unsigned)avcC[jj] << 8) | avcC[jj+1];
	  if (jj + 2 + spslen > avcC.size())
	    goto avcfail;
	  unsigned cur = sh.size();
	  sh.resize(cur + spslen + 2, 0);
	  std::copy(avcC.begin() + jj, avcC.begin() + jj + 2 + spslen,sh.begin() + cur);
	  jj += 2 + spslen;
	}

	if (jj + 1 > avcC.size())
	  continue;

	unsigned pps = avcC[jj++];

	while (pps--) {
	  if (jj + 2 > avcC.size())
	    goto avcfail;
	  unsigned ppslen = ((unsigned)avcC[jj] << 8) | avcC[jj+1];
	  if (jj + 2 + ppslen > avcC.size())
	    goto avcfail;
	  unsigned cur = sh.size();
	  sh.resize(cur + ppslen + 2, 0);
	  std::copy(avcC.begin() + jj, avcC.begin() + jj + 2 + ppslen, sh.begin() + cur);
	  jj += 2 + ppslen;
	}

	goto avcsuccess;
avcfail:
	continue;
avcsuccess:

					CAtlArray<BYTE> data;
					data.SetCount(sh.size());
					std::copy(sh.begin(), sh.end(), data.GetData());

					mt.subtype = FOURCCMap('1CVA');
					mt.formattype = FORMAT_MPEG2Video;
					MPEG2VIDEOINFO* pm2vi = (MPEG2VIDEOINFO*)mt.AllocFormatBuffer(FIELD_OFFSET(MPEG2VIDEOINFO, dwSequenceHeader) + data.GetCount());
					memset(mt.Format(), 0, mt.FormatLength());
					pm2vi->hdr.bmiHeader.biSize = sizeof(pm2vi->hdr.bmiHeader);
					pm2vi->hdr.bmiHeader.biWidth = (LONG)pTE->v.PixelWidth;
					pm2vi->hdr.bmiHeader.biHeight = (LONG)pTE->v.PixelHeight;
					pm2vi->hdr.bmiHeader.biCompression = '1CVA';
					pm2vi->hdr.bmiHeader.biPlanes = 1;
					pm2vi->hdr.bmiHeader.biBitCount = 24;
					pm2vi->dwProfile = pTE->CodecPrivate[1];
					pm2vi->dwLevel = pTE->CodecPrivate[3];
					pm2vi->dwFlags = (pTE->CodecPrivate[4] & 3) + 1;
					BYTE* pSequenceHeader = (BYTE*)pm2vi->dwSequenceHeader;
					memcpy(pSequenceHeader, data.GetData(), data.GetCount());
					pm2vi->cbSequenceHeader = data.GetCount();
					mts.Add(mt);
				}
				else if(CodecID.Find("V_MPEG4/") == 0)
				{
					mt.subtype = FOURCCMap('V4PM');
					mt.formattype = FORMAT_MPEG2Video;
					MPEG2VIDEOINFO* pm2vi = (MPEG2VIDEOINFO*)mt.AllocFormatBuffer(FIELD_OFFSET(MPEG2VIDEOINFO, dwSequenceHeader) + pTE->CodecPrivate.GetCount());
					memset(mt.Format(), 0, mt.FormatLength());
					pm2vi->hdr.bmiHeader.biSize = sizeof(pm2vi->hdr.bmiHeader);
					pm2vi->hdr.bmiHeader.biWidth = (LONG)pTE->v.PixelWidth;
					pm2vi->hdr.bmiHeader.biHeight = (LONG)pTE->v.PixelHeight;
					pm2vi->hdr.bmiHeader.biCompression = 'V4PM';
					pm2vi->hdr.bmiHeader.biPlanes = 1;
					pm2vi->hdr.bmiHeader.biBitCount = 24;
					BYTE* pSequenceHeader = (BYTE*)pm2vi->dwSequenceHeader;
					memcpy(pSequenceHeader, pTE->CodecPrivate.GetData(), pTE->CodecPrivate.GetCount());
					pm2vi->cbSequenceHeader = pTE->CodecPrivate.GetCount();
					mts.Add(mt);
				}
				else if(CodecID.Find("V_REAL/RV") == 0)
				{
					mt.subtype = FOURCCMap('00VR' + ((CodecID[9]-0x30)<<16));
					mt.formattype = FORMAT_VideoInfo;
					VIDEOINFOHEADER* pvih = (VIDEOINFOHEADER*)mt.AllocFormatBuffer(sizeof(VIDEOINFOHEADER) + pTE->CodecPrivate.GetCount());
					memset(mt.Format(), 0, mt.FormatLength());
					memcpy(mt.Format() + sizeof(VIDEOINFOHEADER), pTE->CodecPrivate.GetData(), pTE->CodecPrivate.GetCount());
					pvih->bmiHeader.biSize = sizeof(pvih->bmiHeader);
					pvih->bmiHeader.biWidth = (LONG)pTE->v.PixelWidth;
					pvih->bmiHeader.biHeight = (LONG)pTE->v.PixelHeight;
					pvih->bmiHeader.biCompression = mt.subtype.Data1;
					mts.Add(mt);
				}
				else if(CodecID == "V_DIRAC")
				{
					mt.subtype = MEDIASUBTYPE_DiracVideo;
					mt.formattype = FORMAT_DiracVideoInfo;
					DIRACINFOHEADER* dvih = (DIRACINFOHEADER*)mt.AllocFormatBuffer(FIELD_OFFSET(DIRACINFOHEADER, dwSequenceHeader) + pTE->CodecPrivate.GetCount());
					memset(mt.Format(), 0, mt.FormatLength());
					dvih->hdr.bmiHeader.biSize = sizeof(dvih->hdr.bmiHeader);
					dvih->hdr.bmiHeader.biWidth = (LONG)pTE->v.PixelWidth;
					dvih->hdr.bmiHeader.biHeight = (LONG)pTE->v.PixelHeight;
					dvih->hdr.dwPictAspectRatioX = dvih->hdr.bmiHeader.biWidth;
					dvih->hdr.dwPictAspectRatioY = dvih->hdr.bmiHeader.biHeight;

					BYTE* pSequenceHeader = (BYTE*)dvih->dwSequenceHeader;
					memcpy(pSequenceHeader, pTE->CodecPrivate.GetData(), pTE->CodecPrivate.GetCount());
					dvih->cbSequenceHeader = pTE->CodecPrivate.GetCount();

					mts.Add(mt);
				}
				else if(CodecID == "V_MPEG2")
				{
					BYTE* seqhdr = pTE->CodecPrivate.GetData();
					DWORD len = pTE->CodecPrivate.GetCount();
					int w = pTE->v.PixelWidth;
					int h = pTE->v.PixelHeight;

					if(MakeMPEG2MediaType(mt, seqhdr, len, w, h))
						mts.Add(mt);
				}
				else if(CodecID == "V_THEORA")
				{
					BYTE* thdr = pTE->CodecPrivate.GetData() + 3;

					mt.majortype		= MEDIATYPE_Video;
					mt.subtype			= FOURCCMap('OEHT');
					mt.formattype		= FORMAT_MPEG2_VIDEO;
					MPEG2VIDEOINFO* vih = (MPEG2VIDEOINFO*)mt.AllocFormatBuffer(sizeof(MPEG2VIDEOINFO) + pTE->CodecPrivate.GetCount());
					memset(mt.Format(), 0, mt.FormatLength());
					vih->hdr.bmiHeader.biSize		 = sizeof(vih->hdr.bmiHeader);
					vih->hdr.bmiHeader.biWidth		 = *(WORD*)&thdr[10] >> 4;
					vih->hdr.bmiHeader.biHeight		 = *(WORD*)&thdr[12] >> 4;
					vih->hdr.bmiHeader.biCompression = 'OEHT';
					vih->hdr.bmiHeader.biPlanes		 = 1;
					vih->hdr.bmiHeader.biBitCount	 = 24;
					int nFpsNum	= (thdr[22]<<24)|(thdr[23]<<16)|(thdr[24]<<8)|thdr[25];
					int nFpsDenum	= (thdr[26]<<24)|(thdr[27]<<16)|(thdr[28]<<8)|thdr[29];
					if(nFpsNum) vih->hdr.AvgTimePerFrame = (REFERENCE_TIME)(10000000.0 * nFpsDenum / nFpsNum);
					vih->hdr.dwPictAspectRatioX = (thdr[14]<<16)|(thdr[15]<<8)|thdr[16];
					vih->hdr.dwPictAspectRatioY = (thdr[17]<<16)|(thdr[18]<<8)|thdr[19];
					mt.bFixedSizeSamples = 0;

					vih->cbSequenceHeader = pTE->CodecPrivate.GetCount();
					memcpy (&vih->dwSequenceHeader, pTE->CodecPrivate.GetData(), vih->cbSequenceHeader);

					mts.Add(mt);
				}
				else if(CodecID.Find("V_VP8") == 0) 
				{ 
					mt.subtype = FOURCCMap('08PV'); 
					mt.formattype = FORMAT_VideoInfo; 
					VIDEOINFOHEADER* pvih = (VIDEOINFOHEADER*)mt.AllocFormatBuffer(sizeof(VIDEOINFOHEADER) + pTE->CodecPrivate.GetCount()); 
					memset(mt.Format(), 0, mt.FormatLength()); 
					memcpy(mt.Format() + sizeof(VIDEOINFOHEADER), pTE->CodecPrivate.GetData(), pTE->CodecPrivate.GetCount()); 
					pvih->bmiHeader.biSize = sizeof(pvih->bmiHeader); 
					pvih->bmiHeader.biWidth = (LONG)pTE->v.PixelWidth; 
					pvih->bmiHeader.biHeight = (LONG)pTE->v.PixelHeight; 
					pvih->bmiHeader.biCompression = mt.subtype.Data1; 
					mts.Add(mt); 
				} 
/*
				else if(CodecID == "V_DSHOW/MPEG1VIDEO") // V_MPEG1
				{
					mt.majortype = MEDIATYPE_Video;
					mt.subtype = MEDIASUBTYPE_MPEG1Payload;
					mt.formattype = FORMAT_MPEGVideo;
					MPEG1VIDEOINFO* pm1vi = (MPEG1VIDEOINFO*)mt.AllocFormatBuffer(pTE->CodecPrivate.GetCount());
					memcpy(pm1vi, pTE->CodecPrivate.GetData(), pTE->CodecPrivate.GetCount());
					mt.SetSampleSize(pm1vi->hdr.bmiHeader.biWidth*pm1vi->hdr.bmiHeader.biHeight*4);
					mts.Add(mt);
				}
*/
				REFERENCE_TIME AvgTimePerFrame = 0;

                if(pTE->v.FramePerSec > 0)
					AvgTimePerFrame = (REFERENCE_TIME)(10000000i64 / pTE->v.FramePerSec);
				else if(pTE->DefaultDuration > 0)
					AvgTimePerFrame = (REFERENCE_TIME)pTE->DefaultDuration / 100;

				if(AvgTimePerFrame)
				{
					for(int i = 0; i < mts.GetCount(); i++)
					{
						if(mts[i].formattype == FORMAT_VideoInfo
						|| mts[i].formattype == FORMAT_VideoInfo2
						|| mts[i].formattype == FORMAT_MPEG2Video)
						{
							((VIDEOINFOHEADER*)mts[i].Format())->AvgTimePerFrame = AvgTimePerFrame;
						}
					}
				}

				if(pTE->v.DisplayWidth != 0 && pTE->v.DisplayHeight != 0)
				{
					for(int i = 0; i < mts.GetCount(); i++)
					{
						if(mts[i].formattype == FORMAT_VideoInfo)
						{
							DWORD vih1 = FIELD_OFFSET(VIDEOINFOHEADER, bmiHeader);
							DWORD vih2 = FIELD_OFFSET(VIDEOINFOHEADER2, bmiHeader);
							DWORD bmi = mts[i].FormatLength() - FIELD_OFFSET(VIDEOINFOHEADER, bmiHeader);
							mt.formattype = FORMAT_VideoInfo2;
							mt.AllocFormatBuffer(vih2 + bmi);
							memcpy(mt.Format(), mts[i].Format(), vih1);
							memset(mt.Format() + vih1, 0, vih2 - vih1);
							memcpy(mt.Format() + vih2, mts[i].Format() + vih1, bmi);
							((VIDEOINFOHEADER2*)mt.Format())->dwPictAspectRatioX = (DWORD)pTE->v.DisplayWidth;
							((VIDEOINFOHEADER2*)mt.Format())->dwPictAspectRatioY = (DWORD)pTE->v.DisplayHeight;
							mts.InsertAt(i++, mt);
						}
						else if(mts[i].formattype == FORMAT_MPEG2Video)
						{
							((MPEG2VIDEOINFO*)mts[i].Format())->hdr.dwPictAspectRatioX = (DWORD)pTE->v.DisplayWidth;
							((MPEG2VIDEOINFO*)mts[i].Format())->hdr.dwPictAspectRatioY = (DWORD)pTE->v.DisplayHeight;
						}
					}
				}
			}
예제 #11
0
STDMETHODIMP CDecDXVA2::InitDecoder(AVCodecID codec, const CMediaType *pmt)
{
  HRESULT hr = S_OK;
  DbgLog((LOG_TRACE, 10, L"CDecDXVA2::InitDecoder(): Initializing DXVA2 decoder"));

  // Hack-ish check to avoid re-creating the full decoder when only the aspect ratio changes.
  // Re-creating the DXVA2 decoder can lead to issues like missing frames or a several second delay
  if (m_pDecoder) {
    CMediaType mediaTypeCheck = m_MediaType;
    if (mediaTypeCheck.formattype == FORMAT_VideoInfo2 && pmt->formattype == FORMAT_VideoInfo2) {
      VIDEOINFOHEADER2 *vih2Old = (VIDEOINFOHEADER2 *)mediaTypeCheck.Format();
      VIDEOINFOHEADER2 *vih2New = (VIDEOINFOHEADER2 *)pmt->Format();

      vih2Old->dwPictAspectRatioX = vih2New->dwPictAspectRatioX;
      vih2Old->dwPictAspectRatioY = vih2New->dwPictAspectRatioY;

      if (mediaTypeCheck == *pmt) {
        DbgLog((LOG_TRACE, 10, L"-> Skipping re-init because media type is unchanged."));
        m_MediaType = *pmt;
        return S_OK;
      }
    }
  }

  DestroyDecoder(false);

  m_DisplayDelay = DXVA2_QUEUE_SURFACES;

  // Intel GPUs don't like the display and performance goes way down, so disable it.
  if (m_dwVendorId == VEND_ID_INTEL)
    m_DisplayDelay = 0;

  // Reduce display delay for DVD decoding for lower decode latency
  if (m_pCallback->GetDecodeFlags() & LAV_VIDEO_DEC_FLAG_DVD)
    m_DisplayDelay /= 2;

  // If we have a DXVA Decoder, check if its capable
  // If we don't have one yet, it may be handed to us later, and compat is checked at that point
  GUID input = GUID_NULL;
  if (m_pDXVADecoderService) {
    D3DFORMAT output;
    hr = FindVideoServiceConversion(codec, &input, &output);
    if (FAILED(hr)) {
      DbgLog((LOG_TRACE, 10, L"-> No decoder device available that can decode codec '%S' to NV12", avcodec_get_name(codec)));
      return E_FAIL;
    }
  }

  m_bFailHWDecode = FALSE;

  DbgLog((LOG_TRACE, 10, L"-> Creation of DXVA2 decoder successfull, initializing ffmpeg"));
  hr = CDecAvcodec::InitDecoder(codec, pmt);
  if (FAILED(hr)) {
    return hr;
  }

  if (((codec == AV_CODEC_ID_H264 || codec == AV_CODEC_ID_MPEG2VIDEO || codec == AV_CODEC_ID_HEVC) && m_pAVCtx->pix_fmt != AV_PIX_FMT_YUV420P && m_pAVCtx->pix_fmt != AV_PIX_FMT_YUVJ420P && m_pAVCtx->pix_fmt != AV_PIX_FMT_DXVA2_VLD && m_pAVCtx->pix_fmt != AV_PIX_FMT_NONE)
    || (codec == AV_CODEC_ID_H264 && m_pAVCtx->profile != FF_PROFILE_UNKNOWN && !H264_CHECK_PROFILE(m_pAVCtx->profile))
    || ((codec == AV_CODEC_ID_WMV3 || codec == AV_CODEC_ID_VC1) && m_pAVCtx->profile == FF_PROFILE_VC1_COMPLEX)
    || (codec == AV_CODEC_ID_HEVC && !HEVC_CHECK_PROFILE(m_pAVCtx->profile))) {
    DbgLog((LOG_TRACE, 10, L"-> Incompatible profile detected, falling back to software decoding"));
    return E_FAIL;
  }

  m_dwSurfaceWidth = GetAlignedDimension(m_pAVCtx->coded_width);
  m_dwSurfaceHeight = GetAlignedDimension(m_pAVCtx->coded_height);

  if (FAILED(CheckHWCompatConditions(input))) {
    return E_FAIL;
  }

  m_MediaType = *pmt;

  return S_OK;
}