示例#1
0
HRESULT STDMETHODCALLTYPE CVCamStream::GetStreamCaps(int iIndex, AM_MEDIA_TYPE **pmt, BYTE *pSCC)
{
    *pmt = CreateMediaType(&m_mt);
    DECLARE_PTR(VIDEOINFOHEADER, pvi, (*pmt)->pbFormat);

//    if (iIndex == 0) iIndex = 4;
    if (iIndex == 0) iIndex = 8;

    pvi->bmiHeader.biCompression = BI_RGB;
    pvi->bmiHeader.biBitCount    = 24;
    pvi->bmiHeader.biSize       = sizeof(BITMAPINFOHEADER);
    pvi->bmiHeader.biWidth      = 80 * iIndex;
    pvi->bmiHeader.biHeight     = 60 * iIndex;
    pvi->bmiHeader.biPlanes     = 1;
    pvi->bmiHeader.biSizeImage  = GetBitmapSize(&pvi->bmiHeader);
    pvi->bmiHeader.biClrImportant = 0;

    SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
    SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle

    (*pmt)->majortype = MEDIATYPE_Video;
    (*pmt)->subtype = MEDIASUBTYPE_RGB24;
    (*pmt)->formattype = FORMAT_VideoInfo;
    (*pmt)->bTemporalCompression = FALSE;
    (*pmt)->bFixedSizeSamples= FALSE;
    (*pmt)->lSampleSize = pvi->bmiHeader.biSizeImage;
    (*pmt)->cbFormat = sizeof(VIDEOINFOHEADER);
    
    DECLARE_PTR(VIDEO_STREAM_CONFIG_CAPS, pvscc, pSCC);
    
    pvscc->guid = FORMAT_VideoInfo;
    pvscc->VideoStandard = AnalogVideo_None;
    pvscc->InputSize.cx = 640;
    pvscc->InputSize.cy = 480;
    pvscc->MinCroppingSize.cx = 80;
    pvscc->MinCroppingSize.cy = 60;
    pvscc->MaxCroppingSize.cx = 640;
    pvscc->MaxCroppingSize.cy = 480;
    pvscc->CropGranularityX = 80;
    pvscc->CropGranularityY = 60;
    pvscc->CropAlignX = 0;
    pvscc->CropAlignY = 0;

    pvscc->MinOutputSize.cx = 80;
    pvscc->MinOutputSize.cy = 60;
    pvscc->MaxOutputSize.cx = 640;
    pvscc->MaxOutputSize.cy = 480;
    pvscc->OutputGranularityX = 0;
    pvscc->OutputGranularityY = 0;
    pvscc->StretchTapsX = 0;
    pvscc->StretchTapsY = 0;
    pvscc->ShrinkTapsX = 0;
    pvscc->ShrinkTapsY = 0;
    pvscc->MinFrameInterval = 200000;   //50 fps
    pvscc->MaxFrameInterval = 50000000; // 0.2 fps
    pvscc->MinBitsPerSecond = (80 * 60 * 3 * 8) / 5;
    pvscc->MaxBitsPerSecond = 640 * 480 * 3 * 8 * 50;

    return S_OK;
}
示例#2
0
HRESULT CKCamStream::CheckMediaType(const CMediaType *pMediaType)
{
	// The CheckMediaType method determines if the pin accepts a specific media type. 
	// MSDN says to "Only accept the preferred media type (See SetFormat for more information)"
	//	but there applications that call CheckMediaType with a different media type than they call SetFormat (e.g. Flash in Chrome)
	//	=> we just check some crucial parameters of the requested media type match with something we offer
	DbgLog((LOG_TRACE, 1, "CheckMediaType"));

	if (!m_device)
		return E_FAIL;

    DECLARE_PTR(VIDEOINFOHEADER, f_pvi, pMediaType->pbFormat);
	DbgLog((LOG_TRACE, 1, "... CheckMediaType (%dx%dx%d)", f_pvi->bmiHeader.biWidth, f_pvi->bmiHeader.biHeight, f_pvi->bmiHeader.biBitCount));

	CAutoLock f_lock(m_pFilter->pStateLock());	// XXX not needed anymore ?
	bool f_ok = false;

	for (int f_idx = 0; !f_ok && f_idx < m_device->video_resolution_count(); ++f_idx)
	{
		auto f_res = m_device->video_resolution(f_idx);

		f_ok = (f_res.m_width == f_pvi->bmiHeader.biWidth &&
			    f_res.m_height == abs(f_pvi->bmiHeader.biHeight) &&
			    f_res.m_bits_per_pixel == f_pvi->bmiHeader.biBitCount &&
				CompressionFromPixelFormat(f_res.m_pixel_format) == f_pvi->bmiHeader.biCompression);
	}

	DbgLog((LOG_TRACE, 1, "... CheckMediaType (%s)", (f_ok) ? "OK" : "NOK"));
	return (f_ok) ? S_OK : E_INVALIDARG;
}
示例#3
0
HRESULT CKCamStream::SetMediaType(const CMediaType *pmt)
{
	DbgLog((LOG_TRACE, 1, "CKCamStream::SetMediaType : %x", pmt));
    DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->Format());

	DbgLog((LOG_TRACE, 1, "CKCamStream::SetMediaType : %d x %d x %d", pvi->bmiHeader.biWidth, pvi->bmiHeader.biHeight, pvi->bmiHeader.biBitCount));

	// make sure the device outputs in the correct format
	device::DeviceVideoResolution	f_devres;
	f_devres.m_width			= pvi->bmiHeader.biWidth;
	f_devres.m_height			= pvi->bmiHeader.biHeight;
	f_devres.m_bits_per_pixel	= pvi->bmiHeader.biBitCount;
	f_devres.m_framerate		= FrameRateFromInterval(pvi->AvgTimePerFrame);
	f_devres.m_pixel_format		= PixelFormatFromMediaSubType(*pmt->Subtype());
	m_device->video_set_resolution(f_devres);

	// see documentation of BITMAPINFOHEADER (http://msdn.microsoft.com/en-us/library/windows/desktop/dd318229%28v=vs.85%29.aspx) for more details
	// - For uncompressed RGB bitmaps, if biHeight is positive, the bitmap is a bottom-up DIB with the origin at the lower left corner. 
	//	 If biHeight is negative, the bitmap is a top-down DIB with the origin at the upper left corner.
	// - For YUV bitmaps, the bitmap is always top-down, regardless of the sign of biHeight. 
	if (pvi->bmiHeader.biCompression == BI_RGB || pvi->bmiHeader.biCompression)
		m_device->video_flip_output(pvi->bmiHeader.biHeight > 0);
	else
		m_device->video_flip_output(false);

	return CSourceStream::SetMediaType(pmt);
}
示例#4
0
文件: Filters.cpp 项目: Huppys/Spout2
// LJ version for one media type
//
// GetMediaType: This method tells the downstream pin what types we support.
//
// Here is how CSourceStream deals with media types:
//
// If you support exactly one type, override GetMediaType(CMediaType*). It will then be
// called when 
//		(a) our filter proposes a media type, 
//		(b) the other filter proposes a type and we have to check that type.
//
// If you support > 1 type, override GetMediaType(int,CMediaType*) AND CheckMediaType.
//
// In this case we support only one type, which we obtain from the bitmap.
//
// Can be called repeatedly
//
HRESULT CVCamStream::GetMediaType(CMediaType *pmt)
{
	unsigned int width, height;

	if(pmt == NULL) {
        return E_POINTER;
    }

    DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER)));
    ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));

	// Allow for default as well as width and height of memory share image
	if(g_Width == 0 || g_Height == 0) {
		width  = 320;
		height = 240;
	}
	else {
		// as per sending app
		width	=  g_Width;
		height	=  g_Height;
	}

	pvi->bmiHeader.biSize				= sizeof(BITMAPINFOHEADER);
	pvi->bmiHeader.biWidth				= (LONG)width;
	pvi->bmiHeader.biHeight				= (LONG)height;
	pvi->bmiHeader.biPlanes				= 1;
	pvi->bmiHeader.biBitCount			= 24;
	pvi->bmiHeader.biCompression		= 0;
	pvi->bmiHeader.biSizeImage			= 0;             // default 
	// pvi->bmiHeader.biXPelsPerMeter	= 0;             // default 
	// pvi->bmiHeader.biYPelsPerMeter	= 0;             // default 
	// pvi->bmiHeader.biClrUsed			= 0;
	pvi->bmiHeader.biClrImportant		= 0;
	pvi->bmiHeader.biSizeImage			= GetBitmapSize(&pvi->bmiHeader);

	// The desired average display time of the video frames, in 100-nanosecond units. 
	// 60fps = 166667
	// 30fps = 333333
	pvi->AvgTimePerFrame = 166667; // 60fps
	// pvi->AvgTimePerFrame = 200000; // 50fps
	// pvi->AvgTimePerFrame = 333333; // 30fps

    SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
    SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle

    pmt->SetType(&MEDIATYPE_Video);
    pmt->SetFormatType(&FORMAT_VideoInfo);
    pmt->SetTemporalCompression(false);

    // Work out the GUID for the subtype from the header info.
    const GUID SubTypeGUID = GetBitmapSubtype(&pvi->bmiHeader);
    pmt->SetSubtype(&SubTypeGUID);
	pmt->SetVariableSize(); // LJ - to be checked

    pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);

    return NOERROR;

}
示例#5
0
//////////////////////////////////////////////////////////////////////////
// This is called when the output format has been negotiated
//////////////////////////////////////////////////////////////////////////
HRESULT UVCamStream::SetMediaType(const CMediaType *pmt)
{
	CheckPointer(pmt,E_POINTER); 
	CAutoLock cAutoLock(m_pFilter->pStateLock());
	DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->Format());
	HRESULT hr = CSourceStream::SetMediaType(pmt);
	return hr;
}
示例#6
0
文件: Filters.cpp 项目: Huppys/Spout2
//////////////////////////////////////////////////////////////////////////
// This is called when the output format has been negotiated
// Called when a media type is agreed between filters
//////////////////////////////////////////////////////////////////////////
HRESULT CVCamStream::SetMediaType(const CMediaType *pmt)
{
	DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->Format());
    
	// Pass the call up to my base class
	HRESULT hr = CSourceStream::SetMediaType(pmt);

    return hr;
}
// returns the "range" of fps, etc. for this index
HRESULT STDMETHODCALLTYPE CPushPinDesktop::GetStreamCaps(int iIndex, AM_MEDIA_TYPE **pmt, BYTE *pSCC)
{
    CAutoLock cAutoLock(m_pFilter->pStateLock());
	HRESULT hr = GetMediaType(iIndex, &m_mt); // ensure setup/re-use m_mt ...
	// some are indeed shared, apparently.
    if(FAILED(hr))
    {
        return hr;
    }

    *pmt = CreateMediaType(&m_mt); // a windows lib method, also does a copy for us
	if (*pmt == NULL) return E_OUTOFMEMORY;

	
    DECLARE_PTR(VIDEO_STREAM_CONFIG_CAPS, pvscc, pSCC);
	
    /*
	  most of these are listed as deprecated by msdn... yet some still used, apparently. odd.
	*/

    pvscc->VideoStandard = AnalogVideo_None;
    pvscc->InputSize.cx = getCaptureDesiredFinalWidth();
	pvscc->InputSize.cy = getCaptureDesiredFinalHeight();

	// most of these values are fakes..
	pvscc->MinCroppingSize.cx = getCaptureDesiredFinalWidth();
    pvscc->MinCroppingSize.cy = getCaptureDesiredFinalHeight();

    pvscc->MaxCroppingSize.cx = getCaptureDesiredFinalWidth();
    pvscc->MaxCroppingSize.cy = getCaptureDesiredFinalHeight();

    pvscc->CropGranularityX = 1;
    pvscc->CropGranularityY = 1;
    pvscc->CropAlignX = 1;
    pvscc->CropAlignY = 1;

    pvscc->MinOutputSize.cx = 1;
    pvscc->MinOutputSize.cy = 1;
    pvscc->MaxOutputSize.cx = getCaptureDesiredFinalWidth();
    pvscc->MaxOutputSize.cy = getCaptureDesiredFinalHeight();
    pvscc->OutputGranularityX = 1;
    pvscc->OutputGranularityY = 1;

    pvscc->StretchTapsX = 1; // We do 1 tap. I guess...
    pvscc->StretchTapsY = 1;
    pvscc->ShrinkTapsX = 1;
    pvscc->ShrinkTapsY = 1;

	pvscc->MinFrameInterval = m_rtFrameLength; // the larger default is actually the MinFrameInterval, not the max
	pvscc->MaxFrameInterval = 500000000; // 0.02 fps :) [though it could go lower, really...]

    pvscc->MinBitsPerSecond = (LONG) 1*1*8*GetFps(); // if in 8 bit mode 1x1. I guess.
    pvscc->MaxBitsPerSecond = (LONG) getCaptureDesiredFinalWidth()*getCaptureDesiredFinalHeight()*32*GetFps() + 44; // + 44 header size? + the palette?

	return hr;
}
示例#8
0
HRESULT STDMETHODCALLTYPE CVCamPin::SetFormat(AM_MEDIA_TYPE *pmt)
{
    DECLARE_PTR(VIDEOINFOHEADER, pvi, m_mt.pbFormat);
    m_mt = *pmt;
    IPin* pin; 
    ConnectedTo(&pin);
    if(pin)
    {
        IFilterGraph *pGraph = m_pParent->GetGraph();
        pGraph->Reconnect(this);
    }
    return S_OK;
}
示例#9
0
// See Directshow help topic for IAMStreamConfig for details on this method
HRESULT CKCamStream::GetMediaType(int iPosition, CMediaType *pmt)
{
	DbgLog((LOG_TRACE, 1, "GetMediaType (iPosition = %d)", iPosition));

	if (!m_device)
		return E_FAIL;

    if (iPosition < 0) return E_INVALIDARG;
    if (iPosition > m_device->video_resolution_count()) return VFW_S_NO_MORE_ITEMS;

	if (iPosition == 0)
	{
		// return the default (preferred) resolution
		*pmt = m_mt;
		return S_OK;																// exit !!!
	}

	// check the device for information of this resolution
	auto f_devres = m_device->video_resolution(iPosition - 1);

	// fill in the VIDEOINFO_HEADER
    DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER)));
    ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));

    pvi->bmiHeader.biCompression	= CompressionFromPixelFormat(f_devres.m_pixel_format);
    pvi->bmiHeader.biBitCount		= f_devres.m_bits_per_pixel;
    pvi->bmiHeader.biSize			= sizeof(BITMAPINFOHEADER);
    pvi->bmiHeader.biWidth			= f_devres.m_width;
    pvi->bmiHeader.biHeight			= f_devres.m_height;
    pvi->bmiHeader.biPlanes			= 1;
    pvi->bmiHeader.biSizeImage		= GetBitmapSize(&pvi->bmiHeader);
    pvi->bmiHeader.biClrImportant	= 0;

    pvi->AvgTimePerFrame			= FrameIntervalFromRate(f_devres.m_framerate);

    SetRectEmpty(&(pvi->rcSource));		// we want the whole image area rendered.
    SetRectEmpty(&(pvi->rcTarget));		// no particular destination rectangle

    pmt->SetType(&MEDIATYPE_Video);
    pmt->SetFormatType(&FORMAT_VideoInfo);
    pmt->SetTemporalCompression(FALSE);

    // Work out the GUID for the subtype from the header info.
    const GUID SubTypeGUID = GetBitmapSubtype(&pvi->bmiHeader);
    pmt->SetSubtype(&SubTypeGUID);
    pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);

	DbgLog((LOG_TRACE, 1, "GetMediaType (iPosition = %d) <= %d x %d x %d", iPosition, pvi->bmiHeader.biWidth, pvi->bmiHeader.biHeight, pvi->bmiHeader.biBitCount));
    
    return S_OK;
}
示例#10
0
HRESULT STDMETHODCALLTYPE UVCamStream::SetFormat(AM_MEDIA_TYPE *pmt)
{
	if(!pmt) return E_POINTER;
	CAutoLock cAutoLock(m_pFilter->pStateLock());
	DECLARE_PTR(VIDEOINFOHEADER, pvi, m_mt.pbFormat);
	m_mt = *pmt;
	IPin* pin; 
	ConnectedTo(&pin);
	if(pin)
	{
		IFilterGraph *pGraph = m_pParent->GetGraph();
		pGraph->Reconnect(this);
	}
	return S_OK;
}
示例#11
0
HRESULT STDMETHODCALLTYPE CVCamStream::SetFormat(AM_MEDIA_TYPE *pmt)
{
	//m_mt is the media type of the pin (CMediaType)
	DECLARE_PTR(VIDEOINFOHEADER, pvi, m_mt.pbFormat); // Can probably change this to something more familiar
	m_mt = *pmt;
	IPin* pin;
	ConnectedTo(&pin);
	// if we are currently connected to a pin...
	if (pin)
	{
		IFilterGraph *pGraph = m_pParent->GetGraph(); // get the graph of the parent (What does that do?)
		pGraph->Reconnect(this); // connect the graph
	}
	return S_OK;
}
示例#12
0
// See Directshow help topic for IAMStreamConfig for details on this method
HRESULT CVCamStream::GetMediaType(int iPosition, CMediaType *pmt)
{
	if (iPosition < 0) return E_INVALIDARG;
	if (iPosition > 8) return VFW_S_NO_MORE_ITEMS;

	if (iPosition == 0)
	{
		*pmt = m_mt;
		return S_OK;
	}

	// TODO: the pvi is mostly duplicated from GetStreamCaps
	DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER)));
	ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));

	pvi->bmiHeader.biCompression = BI_RGB;
	pvi->bmiHeader.biBitCount = 24;
	pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
	pvi->bmiHeader.biWidth = 80 * iPosition;
	pvi->bmiHeader.biHeight = 60 * iPosition;
	pvi->bmiHeader.biPlanes = 1;
	pvi->bmiHeader.biSizeImage = GetBitmapSize(&pvi->bmiHeader);
	pvi->bmiHeader.biClrImportant = 0;

	pvi->AvgTimePerFrame = 1000000;

	SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
	SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle

	pmt->SetType(&MEDIATYPE_Video);
	pmt->SetFormatType(&FORMAT_VideoInfo);
	pmt->SetTemporalCompression(TRUE);

	// Work out the GUID for the subtype from the header info.
	/*const GUID SubTypeGUID = GetBitmapSubtype(&pvi->bmiHeader);
	pmt->SetSubtype(&SubTypeGUID);*/
	pmt->SetSubtype(&MEDIASUBTYPE_H264);
	pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);

	return NOERROR;

} // GetMediaType
HRESULT STDMETHODCALLTYPE CVCamStream::GetStreamCaps(int iIndex, AM_MEDIA_TYPE **ppMediaType, BYTE *pSCC)
{	 
	
	if(iIndex < 0)
		return E_INVALIDARG;
	if(iIndex > 0)
		return S_FALSE;
	if(pSCC == NULL)
		return E_POINTER;

    *ppMediaType = CreateMediaType(&m_mt);
	if (*ppMediaType == NULL) return E_OUTOFMEMORY;

    DECLARE_PTR(WAVEFORMATEX, pAudioFormat, (*ppMediaType)->pbFormat);
	
	AM_MEDIA_TYPE * pm = *ppMediaType;
	
	setupPwfex(pAudioFormat, pm);

	AUDIO_STREAM_CONFIG_CAPS* pASCC = (AUDIO_STREAM_CONFIG_CAPS*) pSCC;
	ZeroMemory(pSCC, sizeof(AUDIO_STREAM_CONFIG_CAPS)); 

	// Set up audio capabilities [one type only, for now]
	pASCC->guid = MEDIATYPE_Audio;
	pASCC->MaximumChannels = pAudioFormat->nChannels;
	pASCC->MinimumChannels = pAudioFormat->nChannels;
	pASCC->ChannelsGranularity = 1; // doesn't matter
	pASCC->MaximumSampleFrequency = pAudioFormat->nSamplesPerSec;
	pASCC->MinimumSampleFrequency = pAudioFormat->nSamplesPerSec;
	pASCC->SampleFrequencyGranularity = 11025; // doesn't matter
	pASCC->MaximumBitsPerSample = pAudioFormat->wBitsPerSample;
	pASCC->MinimumBitsPerSample = pAudioFormat->wBitsPerSample;
	pASCC->BitsPerSampleGranularity = 16; // doesn't matter

	return S_OK;
}
//////////////////////////////////////////////////////////////////////////
// This is called when the output format has been negotiated
//////////////////////////////////////////////////////////////////////////
HRESULT CVCamStream::SetMediaType(const CMediaType *pmt)
{
  DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->Format());
  HRESULT hr = CSourceStream::SetMediaType(pmt);
  return hr;
}
示例#15
0
HRESULT STDMETHODCALLTYPE CKCamStream::GetStreamCaps(int iIndex, AM_MEDIA_TYPE **pmt, BYTE *pSCC)
{
	if (!m_device)
		return E_FAIL;

    *pmt = CreateMediaType(&m_mt);
    DECLARE_PTR(VIDEOINFOHEADER, pvi, (*pmt)->pbFormat);

	DbgLog((LOG_TRACE, 1, "GetStreamCaps (iPosition = %d)", iIndex));

	auto f_devres = m_device->video_resolution(iIndex);

    pvi->bmiHeader.biCompression = CompressionFromPixelFormat(f_devres.m_pixel_format);
    pvi->bmiHeader.biBitCount    = f_devres.m_bits_per_pixel;
    pvi->bmiHeader.biSize        = sizeof(BITMAPINFOHEADER);
    pvi->bmiHeader.biWidth       = f_devres.m_width;
    pvi->bmiHeader.biHeight      = f_devres.m_height;
    pvi->bmiHeader.biPlanes      = 1;
    pvi->bmiHeader.biSizeImage   = GetBitmapSize(&pvi->bmiHeader);
    pvi->bmiHeader.biClrImportant = 0;

    SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
    SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle

    (*pmt)->majortype				= MEDIATYPE_Video;
    (*pmt)->subtype					= MediaSubTypeFromPixelFormat(f_devres.m_pixel_format);
    (*pmt)->formattype				= FORMAT_VideoInfo;
    (*pmt)->bTemporalCompression	= FALSE;
    (*pmt)->bFixedSizeSamples		= FALSE;
    (*pmt)->lSampleSize				= pvi->bmiHeader.biSizeImage;
    (*pmt)->cbFormat				= sizeof(VIDEOINFOHEADER);
    
    DECLARE_PTR(VIDEO_STREAM_CONFIG_CAPS, pvscc, pSCC);
    
    pvscc->guid					= FORMAT_VideoInfo;
    pvscc->VideoStandard		= AnalogVideo_None;
    pvscc->InputSize.cx			= f_devres.m_width;
    pvscc->InputSize.cy			= f_devres.m_height;
    pvscc->MinCroppingSize.cx	= f_devres.m_width;
    pvscc->MinCroppingSize.cy	= f_devres.m_height;
    pvscc->MaxCroppingSize.cx	= f_devres.m_width;
    pvscc->MaxCroppingSize.cy	= f_devres.m_height;
    pvscc->CropGranularityX		= 0;
    pvscc->CropGranularityY		= 0;
    pvscc->CropAlignX			= 0;
    pvscc->CropAlignY			= 0;

    pvscc->MinOutputSize.cx		= f_devres.m_width;
    pvscc->MinOutputSize.cy		= f_devres.m_height;
    pvscc->MaxOutputSize.cx		= f_devres.m_width;
    pvscc->MaxOutputSize.cy		= f_devres.m_height;
    pvscc->OutputGranularityX	= 0;
    pvscc->OutputGranularityY	= 0;
    pvscc->StretchTapsX			= 0;
    pvscc->StretchTapsY			= 0;
    pvscc->ShrinkTapsX			= 0;
    pvscc->ShrinkTapsY			= 0;

	int bitsPerFrame = f_devres.m_width * f_devres.m_height * f_devres.m_bits_per_pixel;
    pvscc->MinFrameInterval = FrameIntervalFromRate(f_devres.m_framerate);
    pvscc->MaxFrameInterval = FrameIntervalFromRate(10);
    pvscc->MinBitsPerSecond = bitsPerFrame * 10;
    pvscc->MaxBitsPerSecond = bitsPerFrame * f_devres.m_framerate;

    return S_OK;
}
示例#16
0
文件: Filters.cpp 项目: Huppys/Spout2
HRESULT STDMETHODCALLTYPE CVCamStream::GetStreamCaps(int iIndex, AM_MEDIA_TYPE **pmt, BYTE *pSCC)
{

	unsigned int width, height;

    *pmt = CreateMediaType(&m_mt);
    DECLARE_PTR(VIDEOINFOHEADER, pvi, (*pmt)->pbFormat);

	if (iIndex == 0) iIndex = 1;

	if(g_Width == 0 || g_Height == 0) {
		width  = 320;
		height = 240;
	}
	else {
		// as per sending app
		width	=  g_Width;
		height	=  g_Height;
	}

	pvi->bmiHeader.biCompression	= BI_RGB;
    pvi->bmiHeader.biBitCount		= 24;
    pvi->bmiHeader.biSize			= sizeof(BITMAPINFOHEADER);
    pvi->bmiHeader.biWidth			= (LONG)width;
    pvi->bmiHeader.biHeight			= (LONG)height;
    pvi->bmiHeader.biPlanes			= 1;
    pvi->bmiHeader.biSizeImage		= GetBitmapSize(&pvi->bmiHeader);
    pvi->bmiHeader.biClrImportant	= 0;

    SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
    SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle

    (*pmt)->majortype				= MEDIATYPE_Video;
    (*pmt)->subtype					= MEDIASUBTYPE_RGB24;
    (*pmt)->formattype				= FORMAT_VideoInfo;
    (*pmt)->bTemporalCompression	= false;
    (*pmt)->bFixedSizeSamples		= false;
    (*pmt)->lSampleSize				= pvi->bmiHeader.biSizeImage;
    (*pmt)->cbFormat				= sizeof(VIDEOINFOHEADER);
    
    DECLARE_PTR(VIDEO_STREAM_CONFIG_CAPS, pvscc, pSCC);
    
    pvscc->guid = FORMAT_VideoInfo;
    pvscc->VideoStandard = AnalogVideo_None;
	// Native size of the incoming video signal. 
	// For a compressor, the size is taken from the input pin.
	// For a capture filter, the size is the largest signal the filter 
	// can digitize with every pixel remaining unique.
	// Note  Deprecated.
    pvscc->InputSize.cx			= 1920;
    pvscc->InputSize.cy			= 1080;
    pvscc->MinCroppingSize.cx	= 0; // LJ was 80 but we don't want to limit it
    pvscc->MinCroppingSize.cy	= 0; // was 60
    pvscc->MaxCroppingSize.cx	= 1920;
    pvscc->MaxCroppingSize.cy	= 1080;
    pvscc->CropGranularityX		= 1; // seems 1 is not necessary
    pvscc->CropGranularityY		= 1;
    pvscc->CropAlignX = 0;
    pvscc->CropAlignY = 0;

    pvscc->MinOutputSize.cx		= 80; // LJ fair enough
    pvscc->MinOutputSize.cy		= 60;
    pvscc->MaxOutputSize.cx		= 1920; // 1080p
    pvscc->MaxOutputSize.cy		= 1080;
    pvscc->OutputGranularityX	= 1;
    pvscc->OutputGranularityY	= 1;
    pvscc->StretchTapsX			= 0;
    pvscc->StretchTapsY			= 0;
    pvscc->ShrinkTapsX			= 0;
    pvscc->ShrinkTapsY			= 0;
	pvscc->MinFrameInterval = 166667;   // 60 fps 333333; // 30fps  // LJ what is the consequence of this ?
    pvscc->MaxFrameInterval = 50000000; // 0.2 fps
    pvscc->MinBitsPerSecond = (80 * 60 * 3 * 8) / 5;
    pvscc->MaxBitsPerSecond = 1920 * 1080 * 3 * 8 * 30; // (integral overflow at 60 - anyway we lock on to 30fps and 1920 might not achieve 60fps)

    return S_OK;
}
示例#17
0
// See Directshow help topic for IAMStreamConfig for details on this method
HRESULT CVCamStream::GetMediaType(int iPosition, CMediaType *pmt)
{
	unsigned int width, height;

	if(iPosition < 0) {
		return E_INVALIDARG;
	}
    if(iPosition > 8) { // TODO - needs work - only one position
		return VFW_S_NO_MORE_ITEMS;
	}
	
    if(iPosition == 0) {
        *pmt = m_mt;
        return S_OK;
    }

    DECLARE_PTR(VIDEOINFOHEADER, pvi, pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER)));
    ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));

 	// Allow for default
	if(g_Width == 0 || g_Height == 0) {
		width  = 640;
		height = 480;
	}
	else {
		// as per Spout sender received
		width	=  g_Width;
		height	=  g_Height;
	}
	
	// width	=  g_Width;
	// height	=  g_Height;
	// printf("GetMediaType [%d] (%dx%d)\n", iPosition, width, height);

	pvi->bmiHeader.biSize				= sizeof(BITMAPINFOHEADER);
	pvi->bmiHeader.biWidth				= (LONG)width;
	pvi->bmiHeader.biHeight				= (LONG)height;
	pvi->bmiHeader.biPlanes				= 1;
	pvi->bmiHeader.biBitCount			= 24;
	pvi->bmiHeader.biCompression		= 0; // defaults 
	pvi->bmiHeader.biSizeImage			= 0;
	pvi->bmiHeader.biClrImportant		= 0;
	pvi->bmiHeader.biSizeImage			= GetBitmapSize(&pvi->bmiHeader);

	// The desired average display time of the video frames, in 100-nanosecond units. 
	// 60fps = 166667
	// 30fps = 333333
	pvi->AvgTimePerFrame = 166667; // 60fps
	// pvi->AvgTimePerFrame = 200000; // 50fps
	// pvi->AvgTimePerFrame = 333333; // 30fps

    SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
    SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle

    pmt->SetType(&MEDIATYPE_Video);
    pmt->SetFormatType(&FORMAT_VideoInfo);
    pmt->SetTemporalCompression(false);

    // Work out the GUID for the subtype from the header info.
    const GUID SubTypeGUID = GetBitmapSubtype(&pvi->bmiHeader);
    pmt->SetSubtype(&SubTypeGUID);
	pmt->SetVariableSize(); // LJ - to be checked

    pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);

    return NOERROR;

} // GetMediaType