Пример #1
0
/*static*/ void AVInfo::StaticMakeUncompressed(AM_MEDIA_TYPE *type)
{
    _ASSERT(type->majortype == MEDIATYPE_Audio || type->majortype == MEDIATYPE_Video);

    type->bFixedSizeSamples = true;
    type->bTemporalCompression = false;

    if (type->majortype == MEDIATYPE_Audio)
    {
        WAVEFORMATEX *pWavHeader = (WAVEFORMATEX *)type->pbFormat;
        _ASSERT(pWavHeader);

        bool isPcm = pWavHeader->wFormatTag == WAVE_FORMAT_PCM;

        if (!isPcm)
        {
            pWavHeader->wFormatTag = WAVE_FORMAT_PCM;

            int uncompressedBitCount = pWavHeader->wBitsPerSample == 8 ? 8 : 16;


            type->subtype = MEDIASUBTYPE_PCM;

            pWavHeader->wBitsPerSample = uncompressedBitCount;
            pWavHeader->nBlockAlign =
                (pWavHeader->wBitsPerSample*pWavHeader->nChannels)/8;

            type->lSampleSize =
                (pWavHeader->nSamplesPerSec*pWavHeader->nBlockAlign);

            pWavHeader->nAvgBytesPerSec = type->lSampleSize;

        }
    }
    else if (type->majortype == MEDIATYPE_Video)
    {
        VIDEOINFOHEADER *pVideoHeader = (VIDEOINFOHEADER *)type->pbFormat;
        _ASSERT(pVideoHeader);

        // TODO: Das sind nicht alle unkomprimierten Fälle:
        if (type->subtype != MEDIASUBTYPE_RGB24)
        {
            type->subtype = MEDIASUBTYPE_RGB24;

            pVideoHeader->bmiHeader.biCompression = BI_RGB;
            pVideoHeader->bmiHeader.biBitCount = 24;
            pVideoHeader->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
            pVideoHeader->bmiHeader.biSizeImage = DIBSIZE(pVideoHeader->bmiHeader);

            type->lSampleSize = DIBSIZE(pVideoHeader->bmiHeader);
        }
    }
}
Пример #2
0
static HRESULT FFMVWrapper_GetOutputTypes( CTransformBaseImpl* pImpl, const AM_MEDIA_TYPE* pmtIn, const AM_MEDIA_TYPE** ppmtAcceptTypes, ULONG* pcAcceptTypes )
{
	CFFMVWrapperImpl*	This = pImpl->m_pUserData;
	HRESULT hr;
	BITMAPINFO*	pbiIn = NULL;
	BITMAPINFO*	pbiOut = NULL;
	LONG	width, height;

	TRACE("(%p)\n",This);
	hr = FFMVWrapper_CheckMediaType( pImpl, pmtIn, NULL );
	if ( FAILED(hr) )
		return hr;

	pbiIn = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtIn->pbFormat)->bmiHeader);

	width = pbiIn->bmiHeader.biWidth;
	height = (pbiIn->bmiHeader.biHeight < 0) ?
		 -pbiIn->bmiHeader.biHeight :
		  pbiIn->bmiHeader.biHeight;

	QUARTZ_MediaType_Free( &This->m_mtOut );
	ZeroMemory( &This->m_mtOut, sizeof(AM_MEDIA_TYPE) );

	memcpy( &This->m_mtOut.majortype, &MEDIATYPE_Video, sizeof(GUID) );
	memcpy( &This->m_mtOut.formattype, &FORMAT_VideoInfo, sizeof(GUID) );
	This->m_mtOut.cbFormat = sizeof(VIDEOINFO);
	This->m_mtOut.pbFormat = (BYTE*)CoTaskMemAlloc(This->m_mtOut.cbFormat);
	if ( This->m_mtOut.pbFormat == NULL )
		return E_OUTOFMEMORY;
	ZeroMemory( This->m_mtOut.pbFormat, This->m_mtOut.cbFormat );

	pbiOut = (BITMAPINFO*)(&((VIDEOINFOHEADER*)This->m_mtOut.pbFormat)->bmiHeader);

	/* suggest 24bpp RGB output */
	pbiOut->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
	pbiOut->bmiHeader.biWidth = width;
	pbiOut->bmiHeader.biHeight = -height;
	pbiOut->bmiHeader.biPlanes = 1;
	pbiOut->bmiHeader.biBitCount = 24;
	pbiOut->bmiHeader.biSizeImage = DIBSIZE(pbiOut->bmiHeader);
	memcpy( &This->m_mtOut.subtype, &MEDIASUBTYPE_RGB24, sizeof(GUID) );

	This->m_mtOut.bFixedSizeSamples = 1;
	This->m_mtOut.lSampleSize = DIBSIZE(pbiOut->bmiHeader);

	TRACE("(%p) - return format\n",This);
	*ppmtAcceptTypes = &This->m_mtOut;
	*pcAcceptTypes = 1;

	return NOERROR;
}
Пример #3
0
static HRESULT FFMVWrapper_GetAllocProp( CTransformBaseImpl* pImpl, const AM_MEDIA_TYPE* pmtIn, const AM_MEDIA_TYPE* pmtOut, ALLOCATOR_PROPERTIES* pProp, BOOL* pbTransInPlace, BOOL* pbTryToReuseSample )
{
	CFFMVWrapperImpl*	This = pImpl->m_pUserData;
	BITMAPINFO*	pbiOut = NULL;
	HRESULT hr;

	TRACE("(%p)\n",This);

	if ( This == NULL )
		return E_UNEXPECTED;

	hr = FFMVWrapper_CheckMediaType( pImpl, pmtIn, pmtOut );
	if ( FAILED(hr) )
		return hr;

	pbiOut = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtOut->pbFormat)->bmiHeader);

	pProp->cBuffers = 1;
	if ( pbiOut->bmiHeader.biCompression == 0 )
		pProp->cbBuffer = DIBSIZE(pbiOut->bmiHeader);
	else
		pProp->cbBuffer = pbiOut->bmiHeader.biSizeImage;

	*pbTransInPlace = FALSE;
	*pbTryToReuseSample = TRUE;

	return NOERROR;
}
Пример #4
0
HRESULT TffdshowDecVideoDXVA::setOutputMediaType(const CMediaType &mt)
{
    DPRINTF(_l("TffdshowDecVideoDXVA::setOutputMediaType"));
    TvideoCodecDec *pDecoder=NULL;
    getMovieSource((const TvideoCodecDec**)&pDecoder);

    TcspInfos ocsps;
    // DXVA mode : special output format
    TvideoCodecLibavcodecDxva *pDecoderDxva = (TvideoCodecLibavcodecDxva*)pDecoder;
    pDecoderDxva->getDXVAOutputFormats(ocsps);


    for (int i=0; cspFccs[i].name; i++) {
        const TcspInfo *cspInfo;
        // Look for the right DXVA colorspace
        bool ok=false;
        for (TcspInfos::const_iterator oc=ocsps.begin(); oc!=ocsps.end(); oc++) {
            if (mt.subtype==*(*oc)->subtype) {
                cspInfo=(const TcspInfo *)(*oc);
                ok=true;
                break;
            }
        }
        if (!ok) {
            continue;
        }
        m_frame.dstColorspace=FF_CSP_NV12;

        int biWidth,outDy;
        BITMAPINFOHEADER *bih;
        if (mt.formattype==FORMAT_VideoInfo && mt.pbFormat) { // && mt.pbFormat = work around other filter's bug.
            VIDEOINFOHEADER *vih=(VIDEOINFOHEADER*)mt.pbFormat;
            m_frame.dstStride=calcBIstride(biWidth=vih->bmiHeader.biWidth,cspInfo->Bpp*8);
            outDy=vih->bmiHeader.biHeight;
            bih=&vih->bmiHeader;
        } else if (mt.formattype==FORMAT_VideoInfo2 && mt.pbFormat) {
            VIDEOINFOHEADER2 *vih2=(VIDEOINFOHEADER2*)mt.pbFormat;
            m_frame.dstStride=calcBIstride(biWidth=vih2->bmiHeader.biWidth,cspInfo->Bpp*8);
            outDy=vih2->bmiHeader.biHeight;
            bih=&vih2->bmiHeader;
        } else {
            return VFW_E_TYPE_NOT_ACCEPTED;    //S_FALSE;
        }
        m_frame.dstSize=DIBSIZE(*bih);

        char_t s[256];
        DPRINTF(_l("TffdshowDecVideoDXVA::setOutputMediaType: colorspace:%s, biWidth:%i, dstStride:%i, Bpp:%i, dstSize:%i"),csp_getName(m_frame.dstColorspace,s,256),biWidth,m_frame.dstStride,cspInfo->Bpp,m_frame.dstSize);
        if (csp_isRGB(m_frame.dstColorspace) && outDy>0) {
            m_frame.dstColorspace|=FF_CSP_FLAGS_VFLIP;
        }
        //else if (biheight<0)
        // m_frame.colorspace|=FF_CSP_FLAGS_VFLIP;
        return S_OK;
    }
    m_frame.dstColorspace=FF_CSP_NULL;
    DPRINTF(_l("TffdshowDecVideoDXVA::setOutputMediaType Type not supported by FFDShow DXVA"));
    return VFW_E_TYPE_NOT_ACCEPTED; //S_FALSE;
}
Пример #5
0
STDAPI_(DWORD) GetBitmapSize(const BITMAPINFOHEADER *pHeader)
{
    return DIBSIZE(*pHeader);
}
Пример #6
0
VIDEOINFOHEADER *CLAVFVideoHelper::CreateVIH(const AVStream* avstream, ULONG *size, std::string container)
{
  VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER*)CoTaskMemAlloc(ULONG(sizeof(VIDEOINFOHEADER) + avstream->codec->extradata_size));
  if (!pvi) return NULL;
  memset(pvi, 0, sizeof(VIDEOINFOHEADER));
  // Get the frame rate
  REFERENCE_TIME r_avg = 0, avg_avg = 0, tb_avg = 0;
  if (avstream->r_frame_rate.den > 0 &&  avstream->r_frame_rate.num > 0) {
    r_avg = av_rescale(DSHOW_TIME_BASE, avstream->r_frame_rate.den, avstream->r_frame_rate.num);
  }
  if (avstream->avg_frame_rate.den > 0 &&  avstream->avg_frame_rate.num > 0) {
    avg_avg = av_rescale(DSHOW_TIME_BASE, avstream->avg_frame_rate.den, avstream->avg_frame_rate.num);
  }
  if (avstream->codec->time_base.den > 0 &&  avstream->codec->time_base.num > 0 && avstream->codec->ticks_per_frame > 0) {
    tb_avg = av_rescale(DSHOW_TIME_BASE, avstream->codec->time_base.num * avstream->codec->ticks_per_frame, avstream->codec->time_base.den);
  }

  DbgLog((LOG_TRACE, 10, L"CreateVIH: r_avg: %I64d, avg_avg: %I64d, tb_avg: %I64d", r_avg, avg_avg, tb_avg));
  if (r_avg >= MIN_TIME_PER_FRAME && r_avg <= MAX_TIME_PER_FRAME)
    pvi->AvgTimePerFrame = r_avg;
  else if (avg_avg >= MIN_TIME_PER_FRAME && avg_avg <= MAX_TIME_PER_FRAME)
    pvi->AvgTimePerFrame = avg_avg;
  else if (tb_avg >= MIN_TIME_PER_FRAME && tb_avg <= MAX_TIME_PER_FRAME)
    pvi->AvgTimePerFrame = tb_avg;

  if (container == "matroska" && r_avg && tb_avg && (avstream->codec->codec_id == AV_CODEC_ID_H264 || avstream->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)) {
    float factor = (float)r_avg / (float)tb_avg;
    if ((factor > 0.4 && factor < 0.6) || (factor > 1.9 && factor < 2.1)) {
      pvi->AvgTimePerFrame = tb_avg;
    }
  }

  pvi->dwBitErrorRate = 0;
  pvi->dwBitRate = avstream->codec->bit_rate;
  RECT empty_tagrect = {0,0,0,0};
  pvi->rcSource = empty_tagrect;//Some codecs like wmv are setting that value to the video current value
  pvi->rcTarget = empty_tagrect;
  pvi->rcTarget.right = pvi->rcSource.right = avstream->codec->width;
  pvi->rcTarget.bottom = pvi->rcSource.bottom = avstream->codec->height;

  memcpy((BYTE*)&pvi->bmiHeader + sizeof(BITMAPINFOHEADER), avstream->codec->extradata, avstream->codec->extradata_size);
  pvi->bmiHeader.biSize = ULONG(sizeof(BITMAPINFOHEADER) + avstream->codec->extradata_size);

  pvi->bmiHeader.biWidth = avstream->codec->width;
  pvi->bmiHeader.biHeight = avstream->codec->height;
  pvi->bmiHeader.biBitCount = avstream->codec->bits_per_coded_sample;
  // Validate biBitCount is set to something useful
  if ((pvi->bmiHeader.biBitCount == 0 || avstream->codec->codec_id == AV_CODEC_ID_RAWVIDEO) && avstream->codec->pix_fmt != AV_PIX_FMT_NONE) {
    const AVPixFmtDescriptor *pixdecs = av_pix_fmt_desc_get(avstream->codec->pix_fmt);
    if (pixdecs)
      pvi->bmiHeader.biBitCount = av_get_bits_per_pixel(pixdecs);
  }
  pvi->bmiHeader.biSizeImage = DIBSIZE(pvi->bmiHeader); // Calculating this value doesn't really make alot of sense, but apparently some decoders freak out if its 0

  pvi->bmiHeader.biCompression = avstream->codec->codec_tag;
  //TOFIX The bitplanes is depending on the subtype
  pvi->bmiHeader.biPlanes = 1;
  pvi->bmiHeader.biClrUsed = 0;
  pvi->bmiHeader.biClrImportant = 0;
  pvi->bmiHeader.biYPelsPerMeter = 0;
  pvi->bmiHeader.biXPelsPerMeter = 0;

  *size = sizeof(VIDEOINFOHEADER) + avstream->codec->extradata_size;
  return pvi;
}
static gboolean
gst_dshowvideosrc_set_caps (GstBaseSrc * bsrc, GstCaps * caps)
{
    HRESULT hres;
    IPin *input_pin = NULL;
    IPin *output_pin = NULL;
    GstDshowVideoSrc *src = GST_DSHOWVIDEOSRC (bsrc);
    GstStructure *s = gst_caps_get_structure (caps, 0);
    OAFilterState ds_graph_state;
    GstCaps *current_caps;

    /* search the negociated caps in our caps list to get its index and the corresponding mediatype */
    if (gst_caps_is_subset (caps, src->caps)) {
        guint i = 0;
        gint res = -1;

        hres = src->media_control->GetState(0, &ds_graph_state);
        if(ds_graph_state == State_Running) {
            GST_INFO("Setting caps while DirectShow graph is already running");
            current_caps = gst_pad_get_current_caps(GST_BASE_SRC_PAD(src));

            if(gst_caps_is_equal(current_caps, caps)) {
                /* no need to set caps, just return */
                GST_INFO("Not resetting caps");
                gst_caps_unref(current_caps);
                return TRUE;
            }
            else {
                /* stop graph and disconnect filters so new caps can be set */
                GST_INFO("Different caps, stopping DirectShow graph");
                hres = src->media_control->Stop();
                hres = src->media_control->GetState(2000, &ds_graph_state);
                if(hres != S_OK) {
                    GST_ERROR("Could not stop DirectShow graph. Cannot renegoiate pins.");
                    goto error;
                }
                gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT,
                                               &input_pin);
                if (!input_pin) {
                    input_pin->Release();
                    GST_ERROR ("Can't get input pin from our dshow fakesink");
                    goto error;
                }
                input_pin->ConnectedTo(&output_pin);
                hres = input_pin->Disconnect();
                hres = output_pin->Disconnect();
                input_pin->Release();
                output_pin->Release();
            }
            gst_caps_unref(current_caps);
        }

        for (; i < gst_caps_get_size (src->caps) && res == -1; i++) {
            GstCaps *capstmp = gst_caps_copy_nth (src->caps, i);

            if (gst_caps_is_subset (caps, capstmp)) {
                res = i;
            }
            gst_caps_unref (capstmp);
        }

        if (res != -1 && src->pins_mediatypes) {
            /* get the corresponding media type and build the dshow graph */
            GList *type_pin_mediatype = g_list_nth (src->pins_mediatypes, res);

            if (type_pin_mediatype) {
                GstCapturePinMediaType *pin_mediatype =
                    (GstCapturePinMediaType *) type_pin_mediatype->data;
                gchar *src_caps_string = NULL;
                const gchar *format_string = NULL;

                /* retrieve the desired video size */
                VIDEOINFOHEADER *video_info = NULL;
                gint width = 0;
                gint height = 0;
                gint numerator = 0;
                gint denominator = 0;
                gst_structure_get_int (s, "width", &width);
                gst_structure_get_int (s, "height", &height);
                gst_structure_get_fraction (s, "framerate", &numerator, &denominator);

                /* check if the desired video size is valid about granularity  */
                /* This check will be removed when GST_TYPE_INT_RANGE_STEP exits */
                /* See remarks in gst_dshow_new_video_caps function */
                if (pin_mediatype->granularityWidth != 0
                        && width % pin_mediatype->granularityWidth != 0)
                    g_warning ("your desired video size is not valid : %d mod %d !=0\n",
                               width, pin_mediatype->granularityWidth);
                if (pin_mediatype->granularityHeight != 0
                        && height % pin_mediatype->granularityHeight != 0)
                    g_warning ("your desired video size is not valid : %d mod %d !=0\n",
                               height, pin_mediatype->granularityHeight);

                /* update mediatype */
                video_info = (VIDEOINFOHEADER *) pin_mediatype->mediatype->pbFormat;
                video_info->bmiHeader.biWidth = width;
                video_info->bmiHeader.biHeight = height;
                video_info->AvgTimePerFrame =
                    (LONGLONG) (10000000 * denominator / (double) numerator);
                video_info->bmiHeader.biSizeImage = DIBSIZE (video_info->bmiHeader);
                pin_mediatype->mediatype->lSampleSize = DIBSIZE (video_info->bmiHeader);

                src->dshow_fakesink->gst_set_media_type (pin_mediatype->mediatype);
                src->dshow_fakesink->gst_set_buffer_callback (
                    (push_buffer_func) gst_dshowvideosrc_push_buffer, src);

                gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT,
                                               &input_pin);
                if (!input_pin) {
                    GST_ERROR ("Can't get input pin from our dshow fakesink");
                    goto error;
                }

                hres = src->filter_graph->ConnectDirect (pin_mediatype->capture_pin,
                        input_pin, pin_mediatype->mediatype);
                input_pin->Release ();

                if (hres != S_OK) {
                    GST_ERROR
                    ("Can't connect capture filter with fakesink filter (error=0x%x)",
                     hres);
                    goto error;
                }

                /* save width and height negociated */
                gst_structure_get_int (s, "width", &src->width);
                gst_structure_get_int (s, "height", &src->height);

                src->is_rgb = FALSE;
                format_string = gst_structure_get_string (s, "format");
                if(format_string) {
                    if(!strcmp(format_string, "BGR")) {
                        src->is_rgb = TRUE;
                    }
                    else {
                        src->is_rgb = FALSE;
                    }
                }

                hres = src->media_control->Run();

                hres = src->media_control->GetState(5000, &ds_graph_state);
                if(hres != S_OK || ds_graph_state != State_Running) {
                    GST_ERROR("Could not run graph");
                    goto error;
                }
            }
        }
    }

    return TRUE;

error:
    return FALSE;
}
Пример #8
0
void DirectShowGrabber::setCaptureOutputFormat() {
   IAMStreamConfig          *pConfig;
   int                      iCount;
   int                      iSize;
   VIDEOINFOHEADER          *pVih;
   VIDEO_STREAM_CONFIG_CAPS scc;
   AM_MEDIA_TYPE            *pmtConfig;
   int                      formatSet;
   HRESULT                  hr;

   // Reference http://msdn.microsoft.com/library/default.asp?url=/library/en-us/directshow/htm/configurethevideooutputformat.asp

   debug_msg("DirectShowGrabber::setCaptureOutputFormat(): enter...\n");

   formatSet = 0;
   pConfig   = NULL;
   hr        = pBuild_->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video,
                                     pCaptureFilter_, IID_IAMStreamConfig, (void**)&pConfig);
   if (FAILED(hr)) {
   		Grabber::status_=-1;
		return;
   }

   debug_msg("DirectShowGrabber::setCaptureOutputFormat(): IAMStreamConfig interface acquired\n");

   iCount = iSize = 0;
   hr     = pConfig->GetNumberOfCapabilities(&iCount, &iSize);
   // Check the size to make sure we pass in the correct structure.
   // The alternative output of iSize is AUDIO_STREAM_CONFIG_CAPS, btw.
   if ( iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS) ) {

      for (int iFormat = 0; iFormat < iCount; iFormat++) {
         hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE *)&scc);
         //showErrorMessage(hr);

         if( SUCCEEDED(hr) ) {
            if ((pmtConfig->majortype  == MEDIATYPE_Video)            &&
                  (pmtConfig->subtype    == MEDIASUBTYPE_RGB24)       &&
                  (pmtConfig->formattype == FORMAT_VideoInfo)         &&
                  (pmtConfig->cbFormat   >= sizeof (VIDEOINFOHEADER)) &&
                  (pmtConfig->pbFormat   != NULL)) {

               pVih                        = (VIDEOINFOHEADER *)pmtConfig->pbFormat;
               pVih->bmiHeader.biWidth     = 320;
               pVih->bmiHeader.biHeight    = 240;
               pVih->bmiHeader.biSizeImage = DIBSIZE(pVih->bmiHeader);

               debug_msg("Windows GDI BITMAPINFOHEADER follows:\n");
               debug_msg("biWidth=        %d\n", pVih->bmiHeader.biWidth);
               debug_msg("biHeight=       %d\n", pVih->bmiHeader.biHeight);
               debug_msg("biSize=         %d\n", pVih->bmiHeader.biSize);
               debug_msg("biPlanes=       %d\n", pVih->bmiHeader.biPlanes);
               debug_msg("biBitCount=     %d\n", pVih->bmiHeader.biBitCount);
               debug_msg("biCompression=  %d\n", pVih->bmiHeader.biCompression);
               debug_msg("biSizeImage=    %d\n", pVih->bmiHeader.biSizeImage);
               debug_msg("biXPelsPerMeter=%d\n", pVih->bmiHeader.biXPelsPerMeter);
               debug_msg("biYPelsPerMeter=%d\n", pVih->bmiHeader.biYPelsPerMeter);
               debug_msg("biClrUsed=      %d\n", pVih->bmiHeader.biClrUsed);
               debug_msg("biClrImportant= %d\n", pVih->bmiHeader.biClrImportant);

               hr = pConfig->SetFormat(pmtConfig);
               //showErrorMessage(hr);

               // XXX:  leak.  need to deal with this - msp
               //DeleteMediaType(pmtConfig);

               formatSet = 1;
               break;

            }
         }
      }
   }
   pConfig->Release();

   if( formatSet )
      debug_msg("DirectShowGrabber::setCaptureOutputFormat:  format set\n");
   else
      debug_msg("DirectShowGrabber::setCaptureOutputFormat:  format not set\n");
}
static gboolean
gst_dshowvideosrc_set_caps (GstBaseSrc * bsrc, GstCaps * caps)
{
  HRESULT hres;
  IPin *input_pin = NULL;
  GstDshowVideoSrc *src = GST_DSHOWVIDEOSRC (bsrc);
  GstStructure *s = gst_caps_get_structure (caps, 0);
  GstCaps *current_caps = gst_pad_get_current_caps (GST_BASE_SRC_PAD (bsrc));

  if (current_caps) {
    if (gst_caps_is_equal (caps, current_caps)) {
      gst_caps_unref (current_caps);
      return TRUE;
    }
    gst_caps_unref (current_caps);
  }

  /* Same remark as in gstdshowaudiosrc. */
  gboolean was_running = src->is_running;
  if (was_running) {
    HRESULT hres = src->media_filter->Stop ();
    if (hres != S_OK) {
      GST_ERROR ("Can't STOP the directshow capture graph (error=0x%x)", hres);
      return FALSE;
    }
    src->is_running = FALSE;
  }

  /* search the negociated caps in our caps list to get its index and the corresponding mediatype */
  if (gst_caps_is_subset (caps, src->caps)) {
    guint i = 0;
    gint res = -1;

    for (; i < gst_caps_get_size (src->caps) && res == -1; i++) {
      GstCaps *capstmp = gst_caps_copy_nth (src->caps, i);

      if (gst_caps_is_subset (caps, capstmp)) {
        res = i;
      }
      gst_caps_unref (capstmp);
    }

    if (res != -1 && src->pins_mediatypes) {
      /* get the corresponding media type and build the dshow graph */
      GList *type_pin_mediatype = g_list_nth (src->pins_mediatypes, res);

      if (type_pin_mediatype) {
        GstCapturePinMediaType *pin_mediatype =
            (GstCapturePinMediaType *) type_pin_mediatype->data;
        gchar *caps_string = NULL;
        gchar *src_caps_string = NULL;

        /* retrieve the desired video size */
        VIDEOINFOHEADER *video_info = NULL;
        gint width = 0;
        gint height = 0;
        gint numerator = 0;
        gint denominator = 0;
        gst_structure_get_int (s, "width", &width);
        gst_structure_get_int (s, "height", &height);
        gst_structure_get_fraction (s, "framerate", &numerator, &denominator);

        /* check if the desired video size is valid about granularity  */
        /* This check will be removed when GST_TYPE_INT_RANGE_STEP exits */
        /* See remarks in gst_dshow_new_video_caps function */
        if (pin_mediatype->granularityWidth != 0
            && width % pin_mediatype->granularityWidth != 0)
          g_warning ("your desired video size is not valid : %d mod %d !=0\n",
              width, pin_mediatype->granularityWidth);
        if (pin_mediatype->granularityHeight != 0
            && height % pin_mediatype->granularityHeight != 0)
          g_warning ("your desired video size is not valid : %d mod %d !=0\n",
              height, pin_mediatype->granularityHeight);

        /* update mediatype */
        video_info = (VIDEOINFOHEADER *) pin_mediatype->mediatype->pbFormat;
        video_info->bmiHeader.biWidth = width;
        video_info->bmiHeader.biHeight = height;
        video_info->AvgTimePerFrame =
            (LONGLONG) (10000000 * denominator / (double) numerator);
        video_info->bmiHeader.biSizeImage = DIBSIZE (video_info->bmiHeader);
        pin_mediatype->mediatype->lSampleSize = DIBSIZE (video_info->bmiHeader);

        src->dshow_fakesink->gst_set_media_type (pin_mediatype->mediatype);
        src->dshow_fakesink->gst_set_buffer_callback (
            (push_buffer_func) gst_dshowvideosrc_push_buffer, src);

        gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT,
            &input_pin);
        if (!input_pin) {
          GST_ERROR ("Can't get input pin from our dshow fakesink");
          goto error;
        }

        if (gst_dshow_is_pin_connected (pin_mediatype->capture_pin)) {
          GST_DEBUG_OBJECT (src,
              "capture_pin already connected, disconnecting");
          src->filter_graph->Disconnect (pin_mediatype->capture_pin);
        }

        if (gst_dshow_is_pin_connected (input_pin)) {
          GST_DEBUG_OBJECT (src, "input_pin already connected, disconnecting");
          src->filter_graph->Disconnect (input_pin);
        }

        hres = src->filter_graph->ConnectDirect (pin_mediatype->capture_pin,
            input_pin, pin_mediatype->mediatype);
        input_pin->Release ();

        if (hres != S_OK) {
          GST_ERROR
              ("Can't connect capture filter with fakesink filter (error=0x%x)",
              hres);
          goto error;
        }

        /* save width and height negociated */
        gst_structure_get_int (s, "width", &src->width);
        gst_structure_get_int (s, "height", &src->height);

	GstVideoInfo info;
	gst_video_info_from_caps(&info, caps);
	switch (GST_VIDEO_INFO_FORMAT(&info)) {
          case GST_VIDEO_FORMAT_RGB:
          case GST_VIDEO_FORMAT_BGR:
	    src->is_rgb = TRUE;
	    break;
	default:
	  src->is_rgb = FALSE;
	  break;
	}
      }
    }
  }

  if (was_running) {
    HRESULT hres = src->media_filter->Run (0);
    if (hres != S_OK) {
      GST_ERROR ("Can't RUN the directshow capture graph (error=0x%x)", hres);
      return FALSE;
    }
    src->is_running = TRUE;
  }

  return TRUE;

error:
  return FALSE;
}
Пример #10
0
bool directx_camera_server::open_and_find_parameters(const int which, unsigned width, unsigned height)
{
  HRESULT hr;

  //-------------------------------------------------------------------
  // Create COM and DirectX objects needed to access a video stream.

  // Initialize COM.  This must have a matching uninitialize somewhere before
  // the object is destroyed.
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before CoInitialize\n");
#endif
  CoInitialize(NULL);

  // Create the filter graph manager
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before CoCreateInstance FilterGraph\n");
#endif
  CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER, 
		      IID_IGraphBuilder, (void **)&_pGraph);
  if (_pGraph == NULL) {
    fprintf(stderr, "directx_camera_server::open_and_find_parameters(): Can't create graph manager\n");
    return false;
  }
  _pGraph->QueryInterface(IID_IMediaControl, (void **)&_pMediaControl);
  _pGraph->QueryInterface(IID_IMediaEvent, (void **)&_pEvent);

  // Create the Capture Graph Builder.
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before CoCreateInstance CaptureGraphBuilder2\n");
#endif
  CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC, 
      IID_ICaptureGraphBuilder2, (void **)&_pBuilder);
  if (_pBuilder == NULL) {
    fprintf(stderr, "directx_camera_server::open_and_find_parameters(): Can't create graph builder\n");
    return false;
  }

  // Associate the graph with the builder.
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before SetFilterGraph\n");
#endif
  _pBuilder->SetFiltergraph(_pGraph);

  //-------------------------------------------------------------------
  // Go find a video device to use: in this case, we are using the Nth
  // one we find, where the number N is the "which" parameter.

  // Create the system device enumerator.
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before CoCreateInstance SystemDeviceEnum\n");
#endif
  ICreateDevEnum *pDevEnum = NULL;
  CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC, 
      IID_ICreateDevEnum, (void **)&pDevEnum);
  if (pDevEnum == NULL) {
    fprintf(stderr, "directx_camera_server::open_and_find_parameters(): Can't create device enumerator\n");
    return false;
  }

  // Create an enumerator for video capture devices.
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before CreateClassEnumerator\n");
#endif
  IEnumMoniker *pClassEnum = NULL;
  pDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pClassEnum, 0);
  if (pClassEnum == NULL) {
    fprintf(stderr, "directx_camera_server::open_and_find_parameters(): Can't create video enumerator (no cameras?)\n");
    pDevEnum->Release();
    return false;
  }

#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before Loop over enumerators\n");
#endif
  ULONG cFetched;
  IMoniker *pMoniker = NULL;
  IBaseFilter *pSrc = NULL;
  // Skip (which - 1) cameras
  int i;
  for (i = 0; i < which-1 ; i++) {
    if (pClassEnum->Next(1, &pMoniker, &cFetched) != S_OK) {
      fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't open camera (not enough cameras)\n");
      pMoniker->Release();
      return false;
    }
  }
  // Take the next camera and bind it
  if (pClassEnum->Next(1, &pMoniker, &cFetched) == S_OK) {
    // Bind the first moniker to a filter object.
    pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&pSrc);
    pMoniker->Release();
  } else {
    fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't open camera (not enough cameras)\n");
    pMoniker->Release();
    return false;
  }

  pClassEnum->Release();
  pDevEnum->Release();

  //-------------------------------------------------------------------
  // Construct the sample grabber callback handler that will be used
  // to receive image data from the sample grabber.
  if ( (_pCallback = new directx_samplegrabber_callback()) == NULL) {
    fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't create sample grabber callback handler (out of memory?)\n");
    return false;
  }

  //-------------------------------------------------------------------
  // Construct the sample grabber that will be used to snatch images from
  // the video stream as they go by.  Set its media type and callback.

  // Create the Sample Grabber.
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before CoCreateInstance SampleGrabber\n");
#endif
  CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
      IID_IBaseFilter, reinterpret_cast<void**>(&_pSampleGrabberFilter));
  if (_pSampleGrabberFilter == NULL) {
    fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't get SampleGrabber filter (not DirectX 8.1+?)\n");
    return false;
  }
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before QueryInterface\n");
#endif
  _pSampleGrabberFilter->QueryInterface(IID_ISampleGrabber,
      reinterpret_cast<void**>(&_pGrabber));

  // Set the media type to video
#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before SetMediaType\n");
#endif
  AM_MEDIA_TYPE mt;
  // Ask for video media producers that produce 8-bit RGB
  ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
  mt.majortype = MEDIATYPE_Video;	  // Ask for video media producers
  mt.subtype = MEDIASUBTYPE_RGB24;	  // Ask for 8 bit RGB
  _pGrabber->SetMediaType(&mt);

  //-------------------------------------------------------------------
  // Ask for the video resolution that has been passed in.
  // This code is based on
  // intuiting that we need to use the SetFormat call on the IAMStreamConfig
  // interface; this interface is described in the help pages.
  // If the width and height are specified as 0, then they are not set
  // in the header, letting them use whatever is the default.
  if ( (width != 0) && (height != 0) ) {
    _pBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, pSrc,
			      IID_IAMStreamConfig, (void **)&_pStreamConfig);
    if (_pStreamConfig == NULL) {
      fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't get StreamConfig interface\n");
      return false;
    }

    ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
    mt.majortype = MEDIATYPE_Video;	  // Ask for video media producers
    mt.subtype = MEDIASUBTYPE_RGB24;	  // Ask for 8 bit RGB
    mt.pbFormat = (BYTE*)CoTaskMemAlloc(sizeof(VIDEOINFOHEADER));
    VIDEOINFOHEADER *pVideoHeader = (VIDEOINFOHEADER*)mt.pbFormat;
    ZeroMemory(pVideoHeader, sizeof(VIDEOINFOHEADER));
    pVideoHeader->bmiHeader.biBitCount = 24;
    pVideoHeader->bmiHeader.biWidth = width;
    pVideoHeader->bmiHeader.biHeight = height;
    pVideoHeader->bmiHeader.biPlanes = 1;
    pVideoHeader->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
    pVideoHeader->bmiHeader.biSizeImage = DIBSIZE(pVideoHeader->bmiHeader);

    // Set the format type and size.
    mt.formattype = FORMAT_VideoInfo;
    mt.cbFormat = sizeof(VIDEOINFOHEADER);

    // Set the sample size.
    mt.bFixedSizeSamples = TRUE;
    mt.lSampleSize = DIBSIZE(pVideoHeader->bmiHeader);

    // Make the call to actually set the video type to what we want.
    if (_pStreamConfig->SetFormat(&mt) != S_OK) {
      fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't set resolution to %dx%d\n",
	pVideoHeader->bmiHeader.biWidth, pVideoHeader->bmiHeader.biHeight);
      return false;
    }

    // Clean up the pbFormat header memory we allocated above.
    CoTaskMemFree(mt.pbFormat);
  }

  //-------------------------------------------------------------------
  // Create a NULL renderer that will be used to discard the video frames
  // on the output pin of the sample grabber

#ifdef	DEBUG
  printf("directx_camera_server::open_and_find_parameters(): Before CoCreateInstance NullRenderer\n");
#endif
  IBaseFilter *pNull = NULL;
  CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER,
      IID_IBaseFilter, reinterpret_cast<void**>(&pNull));

  //-------------------------------------------------------------------
  // Build the filter graph.  First add the filters and then connect them.

  // pSrc is the capture filter for the video device we found above.
  _pGraph->AddFilter(pSrc, L"Video Capture");

  // Add the sample grabber filter
  _pGraph->AddFilter(_pSampleGrabberFilter, L"SampleGrabber");

  // Add the null renderer filter
  _pGraph->AddFilter(pNull, L"NullRenderer");

  // Connect the output of the video reader to the sample grabber input
  ConnectTwoFilters(_pGraph, pSrc, _pSampleGrabberFilter);

  // Connect the output of the sample grabber to the NULL renderer input
  ConnectTwoFilters(_pGraph, _pSampleGrabberFilter, pNull);

  //-------------------------------------------------------------------
  // XXX See if this is a video tuner card by querying for that interface.
  // Set it to read the video channel if it is one.
  IAMTVTuner  *pTuner = NULL;
  hr = _pBuilder->FindInterface(NULL, NULL, pSrc, IID_IAMTVTuner, (void**)&pTuner);
  if (pTuner != NULL) {
#ifdef	DEBUG
    printf("directx_camera_server::open_and_find_parameters(): Found a TV Tuner!\n");
#endif

    //XXX Put code here.
    // Set the first input pin to use the cable as input
    hr = pTuner->put_InputType(0, TunerInputCable);
    if (FAILED(hr)) {
      fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't set input to cable\n");
    }

    // Set the channel on the video to be baseband (is this channel zero?)
    hr = pTuner->put_Channel(0, -1, -1);
    if (FAILED(hr)) {
      fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't set channel\n");
    }

    pTuner->Release();
  }
  

  //-------------------------------------------------------------------
  // Find _num_rows and _num_columns in the video stream.
  _pGrabber->GetConnectedMediaType(&mt);
  VIDEOINFOHEADER *pVih;
  if (mt.formattype == FORMAT_VideoInfo) {
      pVih = reinterpret_cast<VIDEOINFOHEADER*>(mt.pbFormat);
  } else {
    fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Can't get video header type\n");
    return false;
  }

  // Number of rows and columns.  This is different if we are using a target
  // rectangle (rcTarget) than if we are not.
  if (IsRectEmpty(&pVih->rcTarget)) {
    _num_columns = pVih->bmiHeader.biWidth;
    _num_rows = pVih->bmiHeader.biHeight;
  } else {
    _num_columns = pVih->rcTarget.right;
    _num_rows = pVih->bmiHeader.biHeight;
    printf("XXX directx_camera_server::open_and_find_parameters(): Warning: may not work correctly with target rectangle\n");
  }
  _minX = 0;
  _maxX = _num_columns - 1;
  _minY = 0;
  _maxY = _num_rows - 1;
#ifdef DEBUG
  printf("Got %dx%d video\n", _num_columns, _num_rows);
#endif

  // Make sure that the image is not compressed and that we have 8 bits
  // per pixel.
  if (pVih->bmiHeader.biCompression != BI_RGB) {
    fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Compression not RGB\n");
    switch (pVih->bmiHeader.biCompression) {
      case BI_RLE8:
	fprintf(stderr,"  (It is BI_RLE8)\n");
	break;
      case BI_RLE4:
	fprintf(stderr,"  (It is BI_RLE4)\n");
      case BI_BITFIELDS:
	fprintf(stderr,"  (It is BI_BITFIELDS)\n");
	break;
      default:
	fprintf(stderr,"  (Unknown compression type)\n");
    }
    return false;
  }
  int BytesPerPixel = pVih->bmiHeader.biBitCount / 8;
  if (BytesPerPixel != 3) {
    fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Not 3 bytes per pixel (%d)\n",
      pVih->bmiHeader.biBitCount);
    return false;
  }

  // A negative height indicates that the images are stored non-inverted in Y
  // Not sure what to do with images that have negative height -- need to
  // read the book some more to find out.
  if (_num_rows < 0) {
    fprintf(stderr,"directx_camera_server::open_and_find_parameters(): Num Rows is negative (internal error)\n");
    return false;
  }

  // Find the stride to take when moving from one row of video to the
  // next.  This is rounded up to the nearest DWORD.
  _stride = (_num_columns * BytesPerPixel + 3) & ~3;

  // Set the callback, where '0' means 'use the SampleCB callback'
  _pGrabber->SetCallback(_pCallback, 0);

  //-------------------------------------------------------------------
  // Release resources that won't be used later and return
  pSrc->Release();
  pNull->Release();
  return true;
}
Пример #11
0
bool MIPDirectShowCapture::setFormat(int w, int h, real_t rate)
{
	HRESULT hr;

	IAMStreamConfig *pConfig = 0;

	hr = m_pBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, 0, m_pCaptDevice, IID_IAMStreamConfig, (void**)&pConfig);
	if (HR_FAILED(hr))
	{
		setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTGETDEVICECONFIG);
		return false;
	}

	int count = 0;
	int s = 0;
	
	hr = pConfig->GetNumberOfCapabilities(&count, &s);
	if (HR_FAILED(hr))
	{
		pConfig->Release();
		setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTGETDEVICECAPS);
		return false;
	}

	if (s != sizeof(VIDEO_STREAM_CONFIG_CAPS))
	{
		pConfig->Release();
		setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_INVALIDCAPS);
		return false;
	}

	for (int i = 0; i < count; i++)
	{
        VIDEO_STREAM_CONFIG_CAPS caps;
        AM_MEDIA_TYPE *pMediaType;

        hr = pConfig->GetStreamCaps(i, &pMediaType, (BYTE*)&caps);
        if (HR_SUCCEEDED(hr))
        {
			if ((pMediaType->majortype == MEDIATYPE_Video) &&
				(pMediaType->subtype == m_selectedGuid) &&
				(pMediaType->formattype == FORMAT_VideoInfo) &&
				(pMediaType->cbFormat >= sizeof (VIDEOINFOHEADER)) &&
				(pMediaType->pbFormat != 0))
			{
				VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pMediaType->pbFormat;
				
				pVih->bmiHeader.biWidth = w;
				pVih->bmiHeader.biHeight = h;
				pVih->bmiHeader.biSizeImage = DIBSIZE(pVih->bmiHeader);
				pVih->AvgTimePerFrame = (REFERENCE_TIME)(10000000.0/rate);

				hr = pConfig->SetFormat(pMediaType);
				if (HR_SUCCEEDED(hr))
				{
					CoTaskMemFree(pMediaType->pbFormat);
					pConfig->Release();
					return true;
				}
			}

			if (pMediaType->pbFormat != 0)
				CoTaskMemFree(pMediaType->pbFormat);
		}
	}

	pConfig->Release();
	setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTSETCAPS);
	return false;
}
Пример #12
0
// get list of supported output colorspaces
HRESULT TffdshowDecVideoDXVA::GetMediaType(int iPosition, CMediaType *mtOut)
{
    DPRINTF(_l("TffdshowDecVideoDXVA::GetMediaType"));
    CAutoLock lock(&inpin->m_csCodecs_and_imgFilters);

    if (m_pInput->IsConnected()==FALSE) {
        return E_UNEXPECTED;
    }

    if (!presetSettings) {
        initPreset();
    }

    bool isVIH2;

    if (m_pOutput->IsConnected()) {
        const CLSID &ref=GetCLSID(m_pOutput->GetConnected());
        if (ref==CLSID_VideoMixingRenderer || ref==CLSID_VideoMixingRenderer9) {
            isVIH2=true;
        }
    }

    isVIH2 = (iPosition&1)==0;

    iPosition/=2;

    if (iPosition<0) {
        return E_INVALIDARG;
    }

    TvideoCodecDec *pDecoder=NULL;
    getMovieSource((const TvideoCodecDec**)&pDecoder);
    if (!pDecoder->useDXVA()) {
        return VFW_S_NO_MORE_ITEMS;
    }

    TcspInfos ocsps;
    size_t osize;


    // DXVA mode : special output format
    TvideoCodecLibavcodecDxva *pDecoderDxva = (TvideoCodecLibavcodecDxva*)pDecoder;
    pDecoderDxva->getDXVAOutputFormats(ocsps);
    osize=ocsps.size();

    if ((size_t)iPosition>=osize) {
        return VFW_S_NO_MORE_ITEMS;
    }

    TffPictBase pictOut;
    if (inReconnect) {
        pictOut=reconnectRect;
    } else {
        pictOut=inpin->pictIn;
    }

    // Support mediatype with unknown dimension. This is necessary to support MEDIASUBTYPE_H264.
    // http://msdn.microsoft.com/en-us/library/dd757808(VS.85).aspx
    // The downstream filter has to support reconnecting after this.
    if (pictOut.rectFull.dx == 0) {
        pictOut.rectFull.dx = 320;
    }
    if (pictOut.rectFull.dy == 0) {
        pictOut.rectFull.dy = 160;
    }

    oldRect=pictOut.rectFull;

    const TcspInfo *c=ocsps[iPosition];
    BITMAPINFOHEADER bih;
    memset(&bih,0,sizeof(bih));
    bih.biSize  =sizeof(BITMAPINFOHEADER);
    bih.biWidth =pDecoderDxva->pictWidthRounded();
    if(c->id == FF_CSP_420P) {  // YV12 and odd number lines.
        pictOut.rectFull.dy=odd2even(pictOut.rectFull.dy);
    }
    bih.biHeight=pDecoderDxva->pictHeightRounded();
    bih.biPlanes=WORD(c->numPlanes);
    bih.biCompression=c->fcc;
    bih.biBitCount=WORD(c->bpp);
    bih.biSizeImage=DIBSIZE(bih);// bih.biWidth*bih.biHeight*bih.biBitCount>>3;

    mtOut->majortype=MEDIATYPE_Video;
    mtOut->subtype=*c->subtype;
    mtOut->formattype=isVIH2?FORMAT_VideoInfo2:FORMAT_VideoInfo;
    mtOut->SetTemporalCompression(FALSE);
    mtOut->SetSampleSize(bih.biSizeImage);

    if (!isVIH2) {
        VIDEOINFOHEADER *vih=(VIDEOINFOHEADER*)mtOut->ReallocFormatBuffer(sizeof(VIDEOINFOHEADER));
        if (!vih) {
            return E_OUTOFMEMORY;
        }
        ZeroMemory(vih,sizeof(VIDEOINFOHEADER));

        vih->rcSource.left=0;
        vih->rcSource.right=pictOut.rectFull.dx;
        vih->rcSource.top=0;
        vih->rcSource.bottom=pictOut.rectFull.dy;
        vih->rcTarget=vih->rcSource;
        vih->AvgTimePerFrame=inpin->avgTimePerFrame;
        vih->bmiHeader=bih;
    } else {
        VIDEOINFOHEADER2 *vih2=(VIDEOINFOHEADER2*)mtOut->ReallocFormatBuffer(sizeof(VIDEOINFOHEADER2));
        if (!vih2) {
            return E_OUTOFMEMORY;
        }
        ZeroMemory(vih2,sizeof(VIDEOINFOHEADER2));
        if((presetSettings->resize && presetSettings->resize->is && presetSettings->resize->SARinternally && presetSettings->resize->mode==0)) {
            pictOut.rectFull.sar.num= 1;//pictOut.rectFull.dx; // VMR9 behaves better when this is set to 1(SAR). But in reconnectOutput, it is different(DAR) in my system.
            pictOut.rectFull.sar.den= 1;//pictOut.rectFull.dy;
        }
        setVIH2aspect(vih2,pictOut.rectFull,presetSettings->output->hwOverlayAspect);

        //DPRINTF(_l("AR getMediaType: %i:%i"),vih2->dwPictAspectRatioX,vih2->dwPictAspectRatioY);

        vih2->rcSource.left=0;
        vih2->rcSource.right=pictOut.rectFull.dx;
        vih2->rcSource.top=0;
        vih2->rcSource.bottom=pictOut.rectFull.dy;
        vih2->rcTarget=vih2->rcSource;
        vih2->AvgTimePerFrame=inpin->avgTimePerFrame;
        vih2->bmiHeader=bih;
        //vih2->dwControlFlags=AMCONTROL_USED | AMCONTROL_COLORINFO_PRESENT | (DXVA_NominalRange_Wide << DXVA_NominalRangeShift) | (DXVA_VideoTransferMatrix_BT601 << DXVA_VideoTransferMatrixShift);
        hwDeinterlace=1; // HW deinterlace for DXVA

        if (hwDeinterlace) {
            vih2->dwInterlaceFlags=AMINTERLACE_IsInterlaced|AMINTERLACE_DisplayModeBobOrWeave;
        }
    }
    return S_OK;
}
Пример #13
0
HRESULT CConvert::AddVideoGroup(double dSourceFramerate, long nSourceWidth, long nSourceHeight)
{

    // make the root group/composition
	HRESULT hr = S_OK;
    hr = m_pTimeline->CreateEmptyNode(&m_pVideoGroupObj, TIMELINE_MAJOR_TYPE_GROUP);
    if(FAILED( hr )) 
    {
        return hr;
    }

	CComQIPtr<IAMTimelineGroup, &IID_IAMTimelineGroup> pVideoGroup(m_pVideoGroupObj);
 
	//// Set Media Type
	CMediaType VideoGroupType;
	VideoGroupType.SetType(&MEDIATYPE_Video);
	VideoGroupType.SetSubtype(&MEDIASUBTYPE_RGB24);
	VideoGroupType.SetFormatType(&FORMAT_VideoInfo);
	
	VIDEOINFOHEADER *pVideoHeader = (VIDEOINFOHEADER*)VideoGroupType.AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
	ZeroMemory(pVideoHeader, sizeof(VIDEOINFOHEADER));
	
	pVideoHeader->bmiHeader.biBitCount = 24;
	pVideoHeader->bmiHeader.biWidth = nSourceWidth;
	pVideoHeader->bmiHeader.biHeight = nSourceHeight;
	pVideoHeader->bmiHeader.biPlanes = 1;
	pVideoHeader->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
	pVideoHeader->bmiHeader.biSizeImage = DIBSIZE(pVideoHeader->bmiHeader);

	VideoGroupType.SetSampleSize(DIBSIZE(pVideoHeader->bmiHeader));
	
	hr = pVideoGroup->SetMediaType(&VideoGroupType);
    if(FAILED( hr )) 
    {
        return hr;
    }

	
//	double dRequiredInputFramerate = 0;
//	m_pMPEGWriterProps->SetSourceFramerate(dSourceFramerate, &dRequiredInputFramerate);

// 	hr = pVideoGroup->SetOutputFPS(15);


	/*
	if (GetOutputFPS() != 0)
	{
		// the user set a framerate
		hr = pVideoGroup->SetOutputFPS(GetOutputFPS());
		GetMPEGWriterProps()->OverrideSourceFPS(GetOutputFPS());
	}
	else if (IsFrameRateSupported((float)dSourceFramerate))
	{
		// the user did not set a framerate. If the source
		// framerate is supported, we use it.
		hr = pVideoGroup->SetOutputFPS(dSourceFramerate);
		GetMPEGWriterProps()->OverrideSourceFPS((float)dSourceFramerate);
	}
	else
	{
		// the user did not want a framerate, and the framerate
		// of the file is not supported. We use 25fps
		hr = pVideoGroup->SetOutputFPS(25);
		GetMPEGWriterProps()->OverrideSourceFPS(25);
	}
	*/

	hr = m_pTimeline->AddGroup(m_pVideoGroupObj);

	return hr;
}
Пример #14
0
HRESULT CaptureVideo()
{
    HRESULT hr;
    IBaseFilter *pSrcFilter=NULL;

    // Get DirectShow interfaces
    hr = GetInterfaces();
    if (FAILED(hr))
    {
        Msg(TEXT("Failed to get video interfaces!  hr=0x%x"), hr);
        return hr;
    }

    // Attach the filter graph to the capture graph
    hr = g_pCapture->SetFiltergraph(g_pGraph);
    if (FAILED(hr))
    {
        Msg(TEXT("Failed to set capture filter graph!  hr=0x%x"), hr);
        return hr;
    }

    // Use the system device enumerator and class enumerator to find
    // a video capture/preview device, such as a desktop USB video camera.
    hr = FindCaptureDevice(&pSrcFilter);
    if (FAILED(hr))
    {
        // Don't display a message because FindCaptureDevice will handle it
        return hr;
    }
   
    // Add Capture filter to our graph.
    hr = g_pGraph->AddFilter(pSrcFilter, L"Video Capture");
    if (FAILED(hr))
    {
        Msg(TEXT("Couldn't add the capture filter to the graph!  hr=0x%x\r\n\r\n") 
            TEXT("If you have a working video capture device, please make sure\r\n")
            TEXT("that it is connected and is not being used by another application.\r\n\r\n")
            TEXT("The sample will now close."), hr);
        pSrcFilter->Release();
        return hr;
    }


    // Copied code
    //========================================
    IAMStreamConfig *pSC;

    hr = g_pCapture->FindInterface(&PIN_CATEGORY_PREVIEW,
                                      &MEDIATYPE_Interleaved,
                                      pSrcFilter, IID_IAMStreamConfig, (void **)&pSC);

    if(FAILED(hr))
        hr = g_pCapture->FindInterface(&PIN_CATEGORY_PREVIEW,
                                      &MEDIATYPE_Video, pSrcFilter,
                                      IID_IAMStreamConfig, (void **)&pSC);

    if (!pSC) {
        return hr;
    }

    int iCount = 0, iSize = 0;
    hr = pSC->GetNumberOfCapabilities(&iCount, &iSize);

    // Check the size to make sure we pass in the correct structure.
    if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
    {
        // Use the video capabilities structure.

        int i = 0;

        for (int iFormat = 0; iFormat < iCount; iFormat++)
        {
            VIDEO_STREAM_CONFIG_CAPS scc;
            AM_MEDIA_TYPE *pmtConfig;

            hr = pSC->GetFormat(&pmtConfig);

            VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *)pmtConfig->pbFormat;

            double fps = 30;

            pvi->AvgTimePerFrame = (LONGLONG)(10000000/fps);

            pvi->bmiHeader.biSizeImage = DIBSIZE(pvi->bmiHeader); 

            pvi->bmiHeader.biWidth = 1920;

            pvi->bmiHeader.biHeight = 1080;

            hr = pSC->SetFormat(pmtConfig);

            

            //hr = pSC->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
            //if (SUCCEEDED(hr))
            //{
            //    /* Examine the format, and possibly use it. */
            //    if (pmtConfig->formattype == FORMAT_VideoInfo) {
            //        long width = HEADER(pmtConfig->pbFormat)->biWidth;
            //        long height = HEADER(pmtConfig->pbFormat)->biHeight;

            //        

            //        if (width == 1920 && height == 1080) {
            //            VIDEOINFOHEADER *info = (VIDEOINFOHEADER *)pmtConfig->pbFormat;

            //            if (i == 0) {
            //                pSC->SetFormat(pmtConfig);
            //                DeleteMediaType(pmtConfig);
            //                break;
            //            }
            //            i++;
            //        }
            //    }

            //    // Delete the media type when you are done.
            //    DeleteMediaType(pmtConfig);
            //}
        }
    }

    if(SUCCEEDED(hr)) {
        pSC->Release();
    }

    

    //========================================

    // Render the preview pin on the video capture filter
    // Use this instead of g_pGraph->RenderFile
    hr = g_pCapture->RenderStream (&PIN_CATEGORY_PREVIEW, &MEDIATYPE_Video,
                                   pSrcFilter, NULL, NULL);
    if (FAILED(hr))
    {
        Msg(TEXT("Couldn't render the video capture stream.  hr=0x%x\r\n")
            TEXT("The capture device may already be in use by another application.\r\n\r\n")
            TEXT("The sample will now close."), hr);
        pSrcFilter->Release();
        return hr;
    }

    // Now that the filter has been added to the graph and we have
    // rendered its stream, we can release this reference to the filter.
    pSrcFilter->Release();

    // Set video window style and position
    hr = SetupVideoWindow();
    if (FAILED(hr))
    {
        Msg(TEXT("Couldn't initialize video window!  hr=0x%x"), hr);
        return hr;
    }

#ifdef REGISTER_FILTERGRAPH
    // Add our graph to the running object table, which will allow
    // the GraphEdit application to "spy" on our graph
    hr = AddGraphToRot(g_pGraph, &g_dwGraphRegister);
    if (FAILED(hr))
    {
        Msg(TEXT("Failed to register filter graph with ROT!  hr=0x%x"), hr);
        g_dwGraphRegister = 0;
    }
#endif

    // Start previewing video data
    hr = g_pMC->Run();
    if (FAILED(hr))
    {
        Msg(TEXT("Couldn't run the graph!  hr=0x%x"), hr);
        return hr;
    }

    // Remember current state
    g_psCurrent = Running;
        
    return S_OK;
}
Пример #15
0
static HRESULT FFMVWrapper_BeginTransform( CTransformBaseImpl* pImpl, const AM_MEDIA_TYPE* pmtIn, const AM_MEDIA_TYPE* pmtOut, BOOL bReuseSample )
{
	CFFMVWrapperImpl*	This = pImpl->m_pUserData;
	BITMAPINFO*	pbiIn = NULL;
	BITMAPINFO*	pbiOut = NULL;
	LONG	width, height;
	DWORD	dwFourCC;
	AVCodec*	codec;
	HRESULT hr;
	int	i;

	TRACE("(%p,%p,%p,%d)\n",This,pmtIn,pmtOut,bReuseSample);

	if ( This == NULL || This->ctx.codec )
		return E_UNEXPECTED;

	hr = FFMVWrapper_CheckMediaType( pImpl, pmtIn, pmtOut );
	if ( FAILED(hr) )
		return hr;

	FFMVWrapper_ReleaseDIBBuffers(This);

	if ( IsEqualGUID( &pmtIn->formattype, &FORMAT_VideoInfo ) )
	{
		pbiIn = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtIn->pbFormat)->bmiHeader);
		dwFourCC = pbiIn->bmiHeader.biCompression;
	}
	else if ( IsEqualGUID( &pmtIn->formattype, &FORMAT_MPEGVideo ) )
	{
		pbiIn = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtIn->pbFormat)->bmiHeader);
		dwFourCC = mmioFOURCC('P','I','M','1');
	}
	else return E_FAIL;

	width = pbiIn->bmiHeader.biWidth;
	height = (pbiIn->bmiHeader.biHeight < 0) ?
		 -pbiIn->bmiHeader.biHeight :
		  pbiIn->bmiHeader.biHeight;

	pbiOut = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtOut->pbFormat)->bmiHeader);

	This->m_pbiIn = FFMVWrapper_DuplicateBitmapInfo(pbiIn);
	This->m_pbiOut = FFMVWrapper_DuplicateBitmapInfo(pbiOut);
	if ( This->m_pbiIn == NULL || This->m_pbiOut == NULL )
		return E_OUTOFMEMORY;
	if ( This->m_pbiOut->bmiHeader.biCompression == 0 || This->m_pbiOut->bmiHeader.biCompression == 3 )
		This->m_pbiOut->bmiHeader.biSizeImage = DIBSIZE(This->m_pbiOut->bmiHeader);

	for (i=0; ff_codecs[i].dwFourCC && ff_codecs[i].dwFourCC != dwFourCC; i++);
	if (!ff_codecs[i].dwFourCC)
	{
		TRACE("couldn't find codec format\n");
		return E_FAIL;
	}

	codec = avcodec_find_decoder(ff_codecs[i].codec);
	if (!codec)
	{
		TRACE("couldn't open codec\n");
		return E_FAIL;
	}

	if ( !bReuseSample )
	{
		This->m_pOutBuf = QUARTZ_AllocMem(This->m_pbiOut->bmiHeader.biSizeImage);
		if ( This->m_pOutBuf == NULL )
			return E_OUTOFMEMORY;
		ZeroMemory( This->m_pOutBuf, This->m_pbiOut->bmiHeader.biSizeImage );
	}

	This->rtCur = 0;
	This->rtInternal = 0;

	EnterCriticalSection( &This->m_cs );

	avcodec_get_context_defaults( &This->ctx );
	This->ctx.width = width;
	This->ctx.height = height;
	if (codec->id == CODEC_ID_MPEG1VIDEO || codec->id == CODEC_ID_H264)
		This->ctx.flags |= CODEC_FLAG_TRUNCATED;
	TRACE("opening codec for %dx%d video\n", This->ctx.width, This->ctx.height);

	if (avcodec_open( &This->ctx, codec) < 0) {
		TRACE("couldn't open codec\n");
		return E_FAIL;
	}

	LeaveCriticalSection( &This->m_cs );

	return NOERROR;
}