コード例 #1
0
// MSDN
static DWORD CalculateMaxAudioDataSize(
    IMFMediaType *pAudioType,    // The PCM audio format.
    DWORD cbHeader,              // The size of the WAVE file header.
    DWORD msecAudioData          // Maximum duration, in milliseconds.
    )
{
    UINT32 cbBlockSize = 0;         // Audio frame size, in bytes.
    UINT32 cbBytesPerSecond = 0;    // Bytes per second.

    // Get the audio block size and number of bytes/second from the audio format.

    cbBlockSize = MFGetAttributeUINT32(pAudioType, MF_MT_AUDIO_BLOCK_ALIGNMENT, 0);
    cbBytesPerSecond = MFGetAttributeUINT32(pAudioType, MF_MT_AUDIO_AVG_BYTES_PER_SECOND, 0);

    // Calculate the maximum amount of audio data to write.
    // This value equals (duration in seconds x bytes/second), but cannot
    // exceed the maximum size of the data chunk in the WAVE file.

        // Size of the desired audio clip in bytes:
    DWORD cbAudioClipSize = (DWORD)MulDiv(cbBytesPerSecond, msecAudioData, 1000);

    // Largest possible size of the data chunk:
    DWORD cbMaxSize = MAXDWORD - cbHeader;

    // Maximum size altogether.
    cbAudioClipSize = min(cbAudioClipSize, cbMaxSize);

    // Round to the audio block size, so that we do not write a partial audio frame.
    cbAudioClipSize = (cbAudioClipSize / cbBlockSize) * cbBlockSize;

    return cbAudioClipSize;
}
コード例 #2
0
bool FFmpegDecodeServices::VerifyVideoMediaType(IMFMediaType* pMediaType)
{
	GUID subType = GUID_NULL;
	pMediaType->GetGUID(MF_MT_SUBTYPE, &subType);

	UINT32 width = 0, height = 0;
	MFGetAttributeSize(pMediaType, MF_MT_FRAME_SIZE, &width, &height);
	if (width == 0 ||
		height == 0)
		return false;

	if (subType == MFVideoFormat_H264 || subType == MFVideoFormat_HEVC ||
		subType == MFVideoFormat_MPG1 || subType == MFVideoFormat_MPEG2) {
		if ((MFGetAttributeUINT32(pMediaType, MF_MT_MPEG2_PROFILE, 0) == 0 ||
			MFGetAttributeUINT32(pMediaType, MF_MT_MPEG2_LEVEL, 0) == 0) &&
			subType != MFVideoFormat_MPG1)
			return false;

		UINT32 seqSize = 0;
		pMediaType->GetBlobSize(MF_MT_MPEG_SEQUENCE_HEADER, &seqSize);
		if (seqSize == 0)
			return false;
	}
	return true;
}
コード例 #3
0
bool FFmpegDecodeServices::VerifyAudioMediaType(IMFMediaType* pMediaType)
{
	if (MFGetAttributeUINT32(pMediaType, MF_MT_AUDIO_NUM_CHANNELS, 0) == 0)
		return false;
	if (MFGetAttributeUINT32(pMediaType, MF_MT_AUDIO_SAMPLES_PER_SECOND, 0) < 8000)
		return false;
	if (MFGetAttributeUINT32(pMediaType, MF_MT_AUDIO_BITS_PER_SAMPLE, 0) > 32)
		return false;
	return true;
}
コード例 #4
0
ファイル: MediaInfo.cpp プロジェクト: carlnome/SYEngine
HRESULT MediaInfo::InternalInitVideo(IMFMediaType* mediaType, StreamInfo& info)
{
	MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &info.video.width, &info.video.height);
	MFGetAttributeRatio(mediaType, MF_MT_PIXEL_ASPECT_RATIO, &info.video.pixelAR0, &info.video.pixelAR1);

	UINT32 fps_den = 0, fps_num = 0;
	MFGetAttributeRatio(mediaType, MF_MT_FRAME_RATE, &fps_num, &fps_den);
	info.video.frameRate = float(fps_num) / float(fps_den);
	info.video.fps_den = fps_den;
	info.video.fps_num = fps_num;

	info.video.profile = MFGetAttributeUINT32(mediaType, MF_MT_MPEG2_PROFILE, 0);
	info.video.profileLevel = MFGetAttributeUINT32(mediaType, MF_MT_MPEG2_LEVEL, 0);
	return S_OK;
}
コード例 #5
0
ファイル: WMFReader.cpp プロジェクト: LordJZ/gecko-dev
HRESULT
WMFReader::ConfigureAudioDecoder()
{
  NS_ASSERTION(mSourceReader, "Must have a SourceReader before configuring decoders!");

  if (!mSourceReader ||
      !SourceReaderHasStream(mSourceReader, MF_SOURCE_READER_FIRST_AUDIO_STREAM)) {
    // No stream, no error.
    return S_OK;
  }

  const GUID* codecs;
  uint32_t numCodecs = 0;
  GetSupportedAudioCodecs(&codecs, &numCodecs);

  HRESULT hr = ConfigureSourceReaderStream(mSourceReader,
                                           MF_SOURCE_READER_FIRST_AUDIO_STREAM,
                                           MFAudioFormat_Float,
                                           codecs,
                                           numCodecs);
  if (FAILED(hr)) {
    NS_WARNING("Failed to configure WMF Audio decoder for PCM output");
    return hr;
  }

  RefPtr<IMFMediaType> mediaType;
  hr = mSourceReader->GetCurrentMediaType(MF_SOURCE_READER_FIRST_AUDIO_STREAM,
                                          byRef(mediaType));
  if (FAILED(hr)) {
    NS_WARNING("Failed to get configured audio media type");
    return hr;
  }

  mAudioRate = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_SAMPLES_PER_SECOND, 0);
  mAudioChannels = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_NUM_CHANNELS, 0);
  mAudioBytesPerSample = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_BITS_PER_SAMPLE, 16) / 8;

  mInfo.mAudio.mChannels = mAudioChannels;
  mInfo.mAudio.mRate = mAudioRate;
  mHasAudio = true;

  DECODER_LOG("Successfully configured audio stream. rate=%u channels=%u bitsPerSample=%u",
              mAudioRate, mAudioChannels, mAudioBytesPerSample);

  return S_OK;
}
コード例 #6
0
ファイル: sfmf.cpp プロジェクト: sfpgmr/sfmf2
	audio_reader::audio_reader(std::wstring& source)
	{
		{
			IMFByteStreamPtr stream;
			CHK(MFCreateFile(MF_FILE_ACCESSMODE::MF_ACCESSMODE_READ, MF_FILE_OPENMODE::MF_OPENMODE_FAIL_IF_NOT_EXIST, MF_FILE_FLAGS::MF_FILEFLAGS_NONE, source.c_str(), &stream));

			IMFAttributesPtr attr;
			CHK(MFCreateAttributes(&attr, 10));
			CHK(attr->SetUINT32(MF_READWRITE_ENABLE_HARDWARE_TRANSFORMS, true));
			IMFSourceReaderPtr reader;
			CHK(MFCreateSourceReaderFromByteStream(stream.Get(), attr.Get(), &reader));

			//      CHK(MFCreateSourceReaderFromURL(source.c_str(),attr.Get(), &reader));
			CHK(reader.As(&reader_));
			QWORD length;
			CHK(stream->GetLength(&length));
			fileSize_ = length;
		}

		//CHK(reader_->GetServiceForStream(0, MF_WRAPPED_OBJECT, __uuidof(IMFByteStream), &stream));

		CHK(reader_->GetNativeMediaType(0, 0, &native_media_type_));
		CHK(MFCreateMediaType(&current_media_type_));
		CHK(current_media_type_->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio));
		CHK(current_media_type_->SetGUID(MF_MT_SUBTYPE, MFAudioFormat_PCM));
		CHK(current_media_type_->SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE,
			MFGetAttributeUINT32(native_media_type_.Get(), MF_MT_AUDIO_BITS_PER_SAMPLE, bits_per_sample)
			)
			);
		CHK(current_media_type_->SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND,
			MFGetAttributeUINT32(native_media_type_.Get(), MF_MT_AUDIO_SAMPLES_PER_SECOND, samples_per_second)
			));
		CHK(current_media_type_->SetUINT32(MF_MT_AUDIO_NUM_CHANNELS,
			MFGetAttributeUINT32(native_media_type_.Get(), MF_MT_AUDIO_NUM_CHANNELS, channel_count)
			));
		//DWORD blockAlign;
		//CHK(native_media_type_->GetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, &blockAlign));
		//CHK(current_media_type_->SetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT,blockAlign ));
		CHK(reader_->SetCurrentMediaType(0, nullptr, current_media_type_.Get()));
		CHK(reader_->GetCurrentMediaType(0, current_media_type_.ReleaseAndGetAddressOf()));
		UINT32 blockAlign;
		CHK(current_media_type_->GetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, &blockAlign));

		DOUT(boost::wformat(L"Block Align: %10d %10x") % blockAlign % blockAlign);

	}
コード例 #7
0
ファイル: WMFUtils.cpp プロジェクト: ShakoHo/gecko-dev
// Gets the sub-region of the video frame that should be displayed.
// See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb530115(v=vs.85).aspx
HRESULT
GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion)
{
  // Determine if "pan and scan" is enabled for this media. If it is, we
  // only display a region of the video frame, not the entire frame.
  BOOL panScan = MFGetAttributeUINT32(aMediaType, MF_MT_PAN_SCAN_ENABLED, FALSE);

  // If pan and scan mode is enabled. Try to get the display region.
  HRESULT hr = E_FAIL;
  MFVideoArea videoArea;
  memset(&videoArea, 0, sizeof(MFVideoArea));
  if (panScan) {
    hr = aMediaType->GetBlob(MF_MT_PAN_SCAN_APERTURE,
                             (UINT8*)&videoArea,
                             sizeof(MFVideoArea),
                             nullptr);
  }

  // If we're not in pan-and-scan mode, or the pan-and-scan region is not set,
  // check for a minimimum display aperture.
  if (!panScan || hr == MF_E_ATTRIBUTENOTFOUND) {
    hr = aMediaType->GetBlob(MF_MT_MINIMUM_DISPLAY_APERTURE,
                             (UINT8*)&videoArea,
                             sizeof(MFVideoArea),
                             nullptr);
  }

  if (hr == MF_E_ATTRIBUTENOTFOUND) {
    // Minimum display aperture is not set, for "backward compatibility with
    // some components", check for a geometric aperture.
    hr = aMediaType->GetBlob(MF_MT_GEOMETRIC_APERTURE,
                             (UINT8*)&videoArea,
                             sizeof(MFVideoArea),
                             nullptr);
  }

  if (SUCCEEDED(hr)) {
    // The media specified a picture region, return it.
    aOutPictureRegion = nsIntRect(MFOffsetToInt32(videoArea.OffsetX),
                                  MFOffsetToInt32(videoArea.OffsetY),
                                  videoArea.Area.cx,
                                  videoArea.Area.cy);
    return S_OK;
  }

  // No picture region defined, fall back to using the entire video area.
  UINT32 width = 0, height = 0;
  hr = MFGetAttributeSize(aMediaType, MF_MT_FRAME_SIZE, &width, &height);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  aOutPictureRegion = nsIntRect(0, 0, width, height);
  return S_OK;
}
コード例 #8
0
HRESULT CASFManager::SendKeyFrameToDecoder(
    IMFSample* pSample,
    const MFTIME& hnsSeekTime,
    BOOL bReverse,
    BOOL* fDecodedKeyFrame,
    SAMPLE_INFO* pSampleInfo,
    void (*FuncPtrToDisplaySampleInfo)(SAMPLE_INFO*))
{
    if (!pSample)
    {
        return E_INVALIDARG;
    }
    
    HRESULT hr = S_OK;
    
    MFTIME hnsCurrentSampleTime =0;

    BOOL   fShouldDecode = FALSE;
    UINT32 fIsKeyFrame = 0;

    IMFSample* pKeyFrameSample = NULL;

    // Get the time stamp on the sample
    CHECK_HR (hr = pSample->GetSampleTime (&hnsCurrentSampleTime));

    if ((UINT64)hnsCurrentSampleTime > m_fileinfo->cbPreroll)
    {
        hnsCurrentSampleTime -= m_fileinfo->cbPreroll;
    }

    // Check if the key-frame attribute is set on the sample
    fIsKeyFrame = MFGetAttributeUINT32(pSample, MFSampleExtension_CleanPoint, FALSE);

    if (!fIsKeyFrame)
    {
        return hr;
    }

    // Should we decode this sample?
    if (bReverse)
    {
        // Reverse playback: 
        // Is the sample *prior* to the seek time, and a key frame?
        fShouldDecode = (hnsCurrentSampleTime <= hnsSeekTime) ;
    }
    else
    {
        // Forward playback:
        // Is the sample *after* the seek time, and a key frame?
        fShouldDecode = (hnsCurrentSampleTime >= hnsSeekTime);
    }

    if (fShouldDecode)
    {
        // We found the key frame closest to the seek time.
        // Start the decoder if not already started.
        if ( m_pDecoder->GetDecoderStatus() != STREAMING)
        {
            CHECK_HR (hr =  m_pDecoder->StartDecoding());
        }

        // Set the discontinity attribute.
        CHECK_HR (hr = pSample->SetUINT32(MFSampleExtension_Discontinuity, TRUE)); 

        //Send the sample to the decoder.
        CHECK_HR (hr =  m_pDecoder->ProcessVideo(pSample));

        *fDecodedKeyFrame = TRUE;
        
        //Get sample information
        (void)GetSampleInfo(pSample, pSampleInfo);
        pSampleInfo->fSeekedKeyFrame = *fDecodedKeyFrame;

        //Send it to callback to display
        FuncPtrToDisplaySampleInfo(pSampleInfo);
            
        CHECK_HR (hr =  m_pDecoder->StopDecoding());
    }


done:
    return hr;
}
コード例 #9
0
ファイル: DrawDevice.cpp プロジェクト: n-n-n/ImageVideo
HRESULT DrawDevice::SetVideoType(IMFMediaType *pType)
{
    HRESULT hr = S_OK;
    GUID subtype = { 0 };
    MFRatio PAR = { 0 };

    // Find the video subtype.
    hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);

    if (FAILED(hr)) { goto done; }

    // Choose a conversion function.
    // (This also validates the format type.)

    hr = SetConversionFunction(subtype); 
    
    if (FAILED(hr)) { goto done; }

    //
    // Get some video attributes.
    //

    // Get the frame size.
    hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &m_width, &m_height);
    
    if (FAILED(hr)) { goto done; }

    
    // Get the interlace mode. Default: assume progressive.
    m_interlace = (MFVideoInterlaceMode)MFGetAttributeUINT32(
        pType,
        MF_MT_INTERLACE_MODE, 
        MFVideoInterlace_Progressive
        );

    // Get the image stride.
    hr = GetDefaultStride(pType, &m_lDefaultStride);

    if (FAILED(hr)) { goto done; }

    // Get the pixel aspect ratio. Default: Assume square pixels (1:1)
    hr = MFGetAttributeRatio(
        pType, 
        MF_MT_PIXEL_ASPECT_RATIO, 
        (UINT32*)&PAR.Numerator, 
        (UINT32*)&PAR.Denominator
        );

    if (SUCCEEDED(hr))
    {
        m_PixelAR = PAR;
    }
    else
    {
        m_PixelAR.Numerator = m_PixelAR.Denominator = 1;
    }

    m_format = (D3DFORMAT)subtype.Data1;

    // Create Direct3D swap chains.

    hr = CreateSwapChains();

    if (FAILED(hr)) { goto done; }


    // Update the destination rectangle for the correct
    // aspect ratio.

    UpdateDestinationRect();

	if (m_pBuf) delete [] m_pBuf;
	m_pBuf = new BYTE[m_height * m_width * 3];

done:
    if (FAILED(hr))
    {
        m_format = D3DFMT_UNKNOWN;
        m_convertFn = NULL;
    }
    return hr;
}
コード例 #10
0
ファイル: wmv9mft.cpp プロジェクト: kasper93/LAVFilters
STDMETHODIMP CDecWMV9MFT::ProcessOutput()
{
  HRESULT hr = S_OK;
  DWORD dwStatus = 0;

  MFT_OUTPUT_STREAM_INFO outputInfo = {0};
  m_pMFT->GetOutputStreamInfo(0, &outputInfo);

  IMFMediaBuffer *pMFBuffer = nullptr;
  ASSERT(!(outputInfo.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES));

  MFT_OUTPUT_DATA_BUFFER OutputBuffer = {0};
  if (!(outputInfo.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES)) {
    pMFBuffer = GetBuffer(outputInfo.cbSize);
    if (!pMFBuffer) { DbgLog((LOG_TRACE, 10, L"Unable to allocate media buffere")); return E_FAIL; }
  
    IMFSample *pSampleOut = nullptr;
    hr = MF.CreateSample(&pSampleOut);
    if (FAILED(hr)) { DbgLog((LOG_TRACE, 10, L"Unable to allocate MF sample, hr: 0x%x", hr)); ReleaseBuffer(pMFBuffer); return E_FAIL; }
    
    pSampleOut->AddBuffer(pMFBuffer);
    OutputBuffer.pSample = pSampleOut;
  }
  hr = m_pMFT->ProcessOutput(0, 1, &OutputBuffer, &dwStatus);

  // We don't process events, just release them
  SafeRelease(&OutputBuffer.pEvents);

  // handle stream format changes
  if (hr == MF_E_TRANSFORM_STREAM_CHANGE || OutputBuffer.dwStatus == MFT_OUTPUT_DATA_BUFFER_FORMAT_CHANGE ) {
    SafeRelease(&OutputBuffer.pSample);
    ReleaseBuffer(pMFBuffer);
    hr = SelectOutputType();
    if (FAILED(hr)) {
      DbgLog((LOG_TRACE, 10, L"-> Failed to handle stream change, hr: %x", hr));
      return E_FAIL;
    }
    // try again with the new type, it should work now!
    return ProcessOutput();
  }
  
  // the MFT generated no output, discard the sample and return
  if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT || OutputBuffer.dwStatus == MFT_OUTPUT_DATA_BUFFER_NO_SAMPLE) {
    SafeRelease(&OutputBuffer.pSample);
    ReleaseBuffer(pMFBuffer);
    return S_FALSE;
  }
  
  // unknown error condition
  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> ProcessOutput failed with hr: %x", hr));
    SafeRelease(&OutputBuffer.pSample);
    ReleaseBuffer(pMFBuffer);
    return E_FAIL;
  }

  LAVFrame *pFrame = nullptr;
  AllocateFrame(&pFrame);

  IMFMediaType *pMTOut = nullptr;
  m_pMFT->GetOutputCurrentType(0, &pMTOut);

  MFGetAttributeSize(pMTOut, MF_MT_FRAME_SIZE, (UINT32 *)&pFrame->width, (UINT32 *)&pFrame->height);
  pFrame->format = m_OutPixFmt;

  AVRational pixel_aspect_ratio = {1, 1};
  MFGetAttributeRatio(pMTOut, MF_MT_PIXEL_ASPECT_RATIO, (UINT32*)&pixel_aspect_ratio.num, (UINT32*)&pixel_aspect_ratio.den);

  AVRational display_aspect_ratio = {0, 0};
  av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, (int64_t)pixel_aspect_ratio.num * pFrame->width, (int64_t)pixel_aspect_ratio.den * pFrame->height, INT_MAX);
  pFrame->aspect_ratio = display_aspect_ratio;

  pFrame->interlaced = MFGetAttributeUINT32(OutputBuffer.pSample, MFSampleExtension_Interlaced,       FALSE);
  pFrame->repeat     = MFGetAttributeUINT32(OutputBuffer.pSample, MFSampleExtension_RepeatFirstField, FALSE);

  LAVDeintFieldOrder fo = m_pSettings->GetDeintFieldOrder();
  pFrame->tff = (fo == DeintFieldOrder_Auto) ? !MFGetAttributeUINT32(OutputBuffer.pSample, MFSampleExtension_BottomFieldFirst, FALSE) : (fo == DeintFieldOrder_TopFieldFirst);

  if (pFrame->interlaced && !m_bInterlaced)
    m_bInterlaced = TRUE;

  pFrame->interlaced = (pFrame->interlaced || (m_bInterlaced && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);

  pFrame->ext_format.VideoPrimaries         = MFGetAttributeUINT32(pMTOut, MF_MT_VIDEO_PRIMARIES,     MFVideoPrimaries_Unknown);
  pFrame->ext_format.VideoTransferFunction  = MFGetAttributeUINT32(pMTOut, MF_MT_TRANSFER_FUNCTION,   MFVideoTransFunc_Unknown);
  pFrame->ext_format.VideoTransferMatrix    = MFGetAttributeUINT32(pMTOut, MF_MT_YUV_MATRIX,          MFVideoTransferMatrix_Unknown);
  pFrame->ext_format.VideoChromaSubsampling = MFGetAttributeUINT32(pMTOut, MF_MT_VIDEO_CHROMA_SITING, MFVideoChromaSubsampling_Unknown);
  pFrame->ext_format.NominalRange           = MFGetAttributeUINT32(pMTOut, MF_MT_VIDEO_NOMINAL_RANGE, MFNominalRange_Unknown);

  // HACK: don't flag range=limited if its the only value set, since its also the implied default, this helps to avoid a reconnect
  // The MFT always sets this value, even if the bitstream says nothing about it, causing a reconnect on every vc1/wmv3 file
  if (pFrame->ext_format.value == 0x2000)
    pFrame->ext_format.value = 0;

  // Timestamps
  if (m_bManualReorder) {
    if (!m_timestampQueue.empty()) {
      pFrame->rtStart = m_timestampQueue.front();
      m_timestampQueue.pop();
      
      LONGLONG llDuration = 0;
      hr = OutputBuffer.pSample->GetSampleDuration(&llDuration);
      if (SUCCEEDED(hr) && llDuration > 0) {
        pFrame->rtStop = pFrame->rtStart + llDuration;
      }
    }
  } else {
    LONGLONG llTimestamp = 0;
    hr = OutputBuffer.pSample->GetSampleTime(&llTimestamp);
    if (SUCCEEDED(hr)) {
      pFrame->rtStart = llTimestamp;
      
      LONGLONG llDuration = 0;
      hr = OutputBuffer.pSample->GetSampleDuration(&llDuration);
      if (SUCCEEDED(hr) && llDuration > 0) {
        pFrame->rtStop = pFrame->rtStart + llDuration;
      }
    }
  }

  SafeRelease(&pMTOut);

  // Lock memory in the buffer
  BYTE *pBuffer = nullptr;
  pMFBuffer->Lock(&pBuffer, NULL, NULL);

  // Check alignment
  // If not properly aligned, we need to make the data aligned.
  int alignment = (m_OutPixFmt == LAVPixFmt_NV12) ? 16 : 32;
  if ((pFrame->width % alignment) != 0) {
    hr = AllocLAVFrameBuffers(pFrame);
    if (FAILED(hr)) {
      pMFBuffer->Unlock();
      ReleaseBuffer(pMFBuffer);
      SafeRelease(&OutputBuffer.pSample);
      return hr;
    }
    size_t ySize = pFrame->width * pFrame->height;
    
    memcpy_plane(pFrame->data[0], pBuffer, pFrame->width, pFrame->stride[0], pFrame->height);
    if (m_OutPixFmt == LAVPixFmt_NV12) {
      memcpy_plane(pFrame->data[1], pBuffer + ySize, pFrame->width, pFrame->stride[1], pFrame->height / 2);
    } else if (m_OutPixFmt == LAVPixFmt_YUV420) {
      size_t uvSize = ySize / 4;
      memcpy_plane(pFrame->data[2], pBuffer + ySize, pFrame->width / 2, pFrame->stride[2], pFrame->height / 2);
      memcpy_plane(pFrame->data[1], pBuffer + ySize + uvSize, pFrame->width / 2, pFrame->stride[1], pFrame->height / 2);
    }
    pMFBuffer->Unlock();
    ReleaseBuffer(pMFBuffer);
  } else {
    if (m_OutPixFmt == LAVPixFmt_NV12) {
      pFrame->data[0] = pBuffer;
      pFrame->data[1] = pBuffer + pFrame->width * pFrame->height;
      pFrame->stride[0] = pFrame->stride[1] = pFrame->width;
    } else if (m_OutPixFmt == LAVPixFmt_YUV420) {
      pFrame->data[0] = pBuffer;
      pFrame->data[2] = pBuffer + pFrame->width * pFrame->height;
      pFrame->data[1] = pFrame->data[2] + (pFrame->width / 2) * (pFrame->height / 2);
      pFrame->stride[0] = pFrame->width;
      pFrame->stride[1] = pFrame->stride[2] = pFrame->width / 2;
    }
    pFrame->data[3] = (BYTE *)pMFBuffer;
    pFrame->destruct = wmv9_buffer_destruct;
    pFrame->priv_data = this;
  }
  pFrame->flags |= LAV_FRAME_FLAG_BUFFER_MODIFY;
  Deliver(pFrame);

  SafeRelease(&OutputBuffer.pSample);

  if (OutputBuffer.dwStatus == MFT_OUTPUT_DATA_BUFFER_INCOMPLETE)
    return ProcessOutput();
  return hr;
}
コード例 #11
0
ファイル: wmv9mft.cpp プロジェクト: kasper93/LAVFilters
STDMETHODIMP CDecWMV9MFT::InitDecoder(AVCodecID codec, const CMediaType *pmt)
{
  HRESULT hr = S_OK;
  DbgLog((LOG_TRACE, 10, L"CDecWMV9MFT::InitDecoder(): Initializing WMV9 MFT decoder"));

  DestroyDecoder(false);

  BITMAPINFOHEADER *pBMI = nullptr;
  REFERENCE_TIME rtAvg = 0;
  DWORD dwARX = 0, dwARY = 0;
  videoFormatTypeHandler(*pmt, &pBMI, &rtAvg, &dwARX, &dwARY);

  size_t extralen = 0;
  BYTE *extra = nullptr;
  getExtraData(*pmt, nullptr, &extralen);
  if (extralen > 0) {
    extra = (BYTE *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
    getExtraData(*pmt, extra, &extralen);
  }

  if (codec == AV_CODEC_ID_VC1 && extralen) {
    size_t i = 0;
    for (i = 0; i < (extralen - 4); i++) {
      uint32_t code = AV_RB32(extra + i);
      if ((code & ~0xFF) == 0x00000100)
        break;
    }
    if (i == 0) {
      memmove(extra + 1, extra, extralen);
      *extra = 0;
      extralen++;
    } else if (i > 1) {
      DbgLog((LOG_TRACE, 10, L"-> VC-1 Header at position %u (should be 0 or 1)", i));
    }
  }

  if (extralen > 0) {
    m_vc1Header = new CVC1HeaderParser(extra, extralen, codec);
  }

  /* Create input type */
  m_nCodecId = codec;

  IMFMediaType *pMTIn = nullptr;
  MF.CreateMediaType(&pMTIn);
  
  pMTIn->SetUINT32(MF_MT_COMPRESSED, TRUE);
  pMTIn->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, FALSE);
  pMTIn->SetUINT32(MF_MT_FIXED_SIZE_SAMPLES, FALSE);

  pMTIn->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
  pMTIn->SetGUID(MF_MT_SUBTYPE,    VerifySubtype(codec, pmt->subtype));

  MFSetAttributeSize(pMTIn, MF_MT_FRAME_SIZE, pBMI->biWidth, pBMI->biHeight);

  UINT32 rateNum = 0, rateDen = 0;
  MF.AverageTimePerFrameToFrameRate(rtAvg, &rateNum, &rateDen);
  MFSetAttributeRatio(pMTIn, MF_MT_FRAME_RATE, rateNum, rateDen);
  
  pMTIn->SetBlob(MF_MT_USER_DATA, extra, (UINT32)extralen);
  av_freep(&extra);

  hr = m_pMFT->SetInputType(0, pMTIn, 0);
  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> Failed to set input type on MFT"));
    return hr;
  }

  /* Create output type */
  hr = SelectOutputType();
  SafeRelease(&pMTIn);

  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> Failed to set output type on MFT"));
    return hr;
  }

  IMFMediaType *pMTOut = nullptr;
  m_pMFT->GetOutputCurrentType(0, &pMTOut);
  m_bInterlaced = MFGetAttributeUINT32(pMTOut, MF_MT_INTERLACE_MODE, MFVideoInterlace_Unknown) > MFVideoInterlace_Progressive;
  SafeRelease(&pMTOut);

  m_bManualReorder = (codec == AV_CODEC_ID_VC1) && !(m_pCallback->GetDecodeFlags() & LAV_VIDEO_DEC_FLAG_ONLY_DTS);

  return S_OK;
}
コード例 #12
0
/*
Description:

    This is used whenever there is a media type change on an output pin and the 
    Output queue is being reconfigured.
    The possible return values for the function are as follows
    
    DeviceMftTransformXVPIllegal        -> If either of the mediatypes or both are NULL
    DeviceMftTransformXVPDisruptiveIn   -> If the mediatype  at the output pin is greater than the input pin. This will result in change of the media type on the input
    DeviceMftTransformXVPDisruptiveOut  -> This is a reconfiguration or addition of the XVP in the Output pin queue
    DeviceMftTransformXVPCurrent        -> No XVP needed at all
    Note: This iteration doesn't support decoder. The next one will and this function will accordingly change
*/
STDMETHODIMP CompareMediaTypesForXVP(
    _In_opt_ IMFMediaType *inMediaType,
    _In_    IMFMediaType                *newMediaType,
    _Inout_ MF_TRANSFORM_XVP_OPERATION  *operation 
    )
{
    UINT32  unWidthin, unHeightin, unWidthNew, unHeightNew = 0;
    HRESULT hr          = S_OK;
    GUID    guidTypeA   = GUID_NULL;
    GUID    guidTypeB   = GUID_NULL;
    
    *operation = DeviceMftTransformXVPIllegal;
    if ((!inMediaType) || (!newMediaType))
    {
       goto done;
    }

    DMFTCHECKHR_GOTO( MFGetAttributeSize( inMediaType, MF_MT_FRAME_SIZE, &unWidthin, &unHeightin ), done );
    DMFTCHECKHR_GOTO( MFGetAttributeSize( newMediaType, MF_MT_FRAME_SIZE, &unWidthNew, &unHeightNew ), done );


    if ( SUCCEEDED( inMediaType->GetGUID(  MF_MT_MAJOR_TYPE, &guidTypeA ) ) &&
         SUCCEEDED( newMediaType->GetGUID( MF_MT_MAJOR_TYPE, &guidTypeB ) ) &&
        IsEqualGUID( guidTypeA, guidTypeB ) )
    {
        if ( SUCCEEDED( inMediaType->GetGUID ( MF_MT_SUBTYPE, &guidTypeA ) ) &&
             SUCCEEDED( newMediaType->GetGUID( MF_MT_SUBTYPE, &guidTypeB ) ) &&
            IsEqualGUID( guidTypeA, guidTypeB ) )
        {
            //Comparing the MF_MT_AM_FORMAT_TYPE for the directshow format guid
#if 0
            if (SUCCEEDED(inMediaType->GetGUID(MF_MT_AM_FORMAT_TYPE, &guidTypeA)) &&
                SUCCEEDED(newMediaType->GetGUID(MF_MT_AM_FORMAT_TYPE, &guidTypeB)) &&
                IsEqualGUID(guidTypeA, guidTypeB))
#endif
            {

                if (!(( unWidthin == unWidthNew ) &&
                    ( unHeightin == unHeightNew ) ) )
                {
                    if ( ( unWidthNew > unWidthin ) || ( unHeightNew > unHeightin ) )
                    {
                      *operation = DeviceMftTransformXVPDisruptiveIn; //Media type needs to change at input
                    }
                    else
                    {
                        *operation = DeviceMftTransformXVPDisruptiveOut; //Media type needs to change at output
                    }
                    goto done;
                }

                if ( MFGetAttributeUINT32( inMediaType,  MF_MT_SAMPLE_SIZE, 0 ) !=
                     MFGetAttributeUINT32( newMediaType, MF_MT_SAMPLE_SIZE, 0 ) )
                {
                    hr = S_FALSE; //Sample sizes differ. 
                    goto done;
                }
                else
                {
                    //Same media type.. No XVP needed or the current XVP is fine!
                    *operation = DeviceMftTransformXVPCurrent;
                }
            }
        }
        else
        {
            //This is a disruptive operation. Actually a decoder operation!
            *operation = DeviceMftTransformXVPDisruptiveIn;
        }
    }
 done:
    return hr;
}
コード例 #13
0
ファイル: MediaInfo.cpp プロジェクト: carlnome/SYEngine
HRESULT MediaInfo::InternalInitAudio(IMFMediaType* mediaType, StreamInfo& info)
{
	info.audio.channels = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_NUM_CHANNELS, 0);
	info.audio.sampleRate = MFGetAttributeUINT32(mediaType, MF_MT_AUDIO_SAMPLES_PER_SECOND, 0);
	return S_OK;
}
コード例 #14
0
ファイル: capture_device.cpp プロジェクト: bmharper/mvision
bool WinCaptureDevice::InitializeFirst(std::string& error)
{
	HRESULT hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
	if (!SUCCEEDED(hr))
	{
		return false;
		error = "CoInitializeEx failed";
	}

	hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
	if (!SUCCEEDED(hr))
	{
		error = "MFStartup failed";
		return false;
	}

	Close();

	memset(&InputType, 0, sizeof(InputType));

	IMFActivate* activate = WinCaptureDevice::ChooseFirst(error);
	if (!activate)
		return false;

	IMFMediaSource  *pSource = NULL;
	IMFAttributes   *pAttributes = NULL;
	IMFMediaType    *pType = NULL;

	UINT32 m_cchSymbolicLink = 0;

	// Create the media source for the device.
	if (SUCCEEDED(hr))
		hr = activate->ActivateObject(__uuidof(IMFMediaSource), (void**) &pSource);

	// Get the symbolic link.
	if (SUCCEEDED(hr))
		hr = activate->GetAllocatedString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &SymbolicLink, &m_cchSymbolicLink);

	//
	// Create the source reader.
	//

	// Create an attribute store to hold initialization settings.

	if (SUCCEEDED(hr))
		hr = MFCreateAttributes(&pAttributes, 2);

	if (SUCCEEDED(hr))
		hr = pAttributes->SetUINT32(MF_READWRITE_DISABLE_CONVERTERS, TRUE);

	// Set the callback pointer.
	if (SUCCEEDED(hr))
		hr = pAttributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, this);

	if (SUCCEEDED(hr))
		hr = MFCreateSourceReaderFromMediaSource(pSource, pAttributes, &Reader);

	// Try to find a suitable input type.
	if (SUCCEEDED(hr))
	{
		for (uint i = 0; ; i++)
		{
			hr = Reader->GetNativeMediaType((DWORD) MF_SOURCE_READER_FIRST_VIDEO_STREAM, i, &pType);
			if (FAILED(hr))
			{
				error = "Failed to find a supported output format (ie RGB24)";
				break;
			}
			memset(&InputType, 0, sizeof(InputType));
			bool isTypeOK = IsMediaTypeSupported(pType, InputType);
			if (isTypeOK)
			{
				// Get the frame size.
				hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &InputWidth, &InputHeight);
				// Get the image stride.
				hr = GetDefaultStride(pType, &InputDefaultStride);
				// Get the interlace mode. Default: assume progressive.
				InputInterlaceMode = (MFVideoInterlaceMode) MFGetAttributeUINT32(pType, MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
			}
			SafeRelease(&pType);
			if (isTypeOK)
				break;
		}
	}

	if (SUCCEEDED(hr))
	{
		// Ask for the first sample.
		EnableCapture = 1;
		hr = Reader->ReadSample((DWORD) MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0, NULL, NULL, NULL, NULL);
	}

	if (FAILED(hr))
	{
		if (pSource)
		{
			pSource->Shutdown();
			// NOTE: The source reader shuts down the media source by default, but we might not have gotten that far.
		}
		Close();
	}

	SafeRelease(&pSource);
	SafeRelease(&pAttributes);
	SafeRelease(&pType);
	SafeRelease(&activate);

	if (FAILED(hr) && error.length() == 0)
		error = ErrorMessage(L"Failed to initialize video capture device", hr);

	return SUCCEEDED(hr);
}