// Set the stream time at which this sample should start and finish. // NULL pointers means the time is reset STDMETHODIMP CMediaSample::SetTime( REFERENCE_TIME * pTimeStart, REFERENCE_TIME * pTimeEnd ) { if (pTimeStart == NULL) { ASSERT(pTimeEnd == NULL); m_dwFlags &= ~(Sample_TimeValid | Sample_StopValid); } else { if (pTimeEnd == NULL) { m_Start = *pTimeStart; m_dwFlags |= Sample_TimeValid; m_dwFlags &= ~Sample_StopValid; } else { ValidateReadPtr(pTimeStart,sizeof(REFERENCE_TIME)); ValidateReadPtr(pTimeEnd,sizeof(REFERENCE_TIME)); ASSERT(*pTimeEnd >= *pTimeStart); m_Start = *pTimeStart; m_End = *pTimeEnd; m_dwFlags |= Sample_TimeValid | Sample_StopValid; } } return NOERROR; }
HRESULT CCINVideoDecompressor::SetMediaType( PIN_DIRECTION direction, const CMediaType *pmt ) { CAutoLock lock(m_pLock); // Catch only the input pin's media type if (direction == PINDIR_INPUT) { // Check and validate the pointer CheckPointer(pmt, E_POINTER); ValidateReadPtr(pmt, sizeof(CMediaType)); // Free the old format block and allocate a new one if (m_pFormat) CoTaskMemFree(m_pFormat); m_pFormat = (CIN_HEADER*)CoTaskMemAlloc(pmt->FormatLength()); if (m_pFormat == NULL) return E_OUTOFMEMORY; // Copy format block to the allocated storage CopyMemory(m_pFormat, pmt->Format(), pmt->FormatLength()); // Set up Huffman nodes and tree for (int i = 0; i < 256; i++) { for (int j = 0; j < HUFFMAN_TOKENS; j++) m_HuffmanNodes[i][j].iCount = (int)m_pFormat->bHuffmanTable[i][j]; HuffmanBuildTree(i); } } return NOERROR; }
HRESULT CMVEADPCMDecompressor::SetMediaType( PIN_DIRECTION direction, const CMediaType *pmt ) { CAutoLock lock(m_pLock); // Catch only the input pin's media type if (direction == PINDIR_INPUT) { // Check and validate the pointer CheckPointer(pmt, E_POINTER); ValidateReadPtr(pmt, sizeof(CMediaType)); // Free the old format block and allocate a new one if (m_pFormat) CoTaskMemFree(m_pFormat); m_pFormat = (MVE_AUDIO_INFO*)CoTaskMemAlloc(pmt->FormatLength()); if (m_pFormat == NULL) return E_OUTOFMEMORY; // Copy format block to the allocated storage CopyMemory(m_pFormat, pmt->Format(), pmt->FormatLength()); } return NOERROR; }
STDMETHODIMP CMediaSample::SetMediaType(AM_MEDIA_TYPE *pMediaType) { /* Delete the current media type */ if (m_pMediaType) { DeleteMediaType(m_pMediaType); m_pMediaType = NULL; } /* Mechanism for resetting the format type */ if (pMediaType == NULL) { m_dwFlags &= ~Sample_TypeChanged; return NOERROR; } ASSERT(pMediaType); ValidateReadPtr(pMediaType,sizeof(AM_MEDIA_TYPE)); /* Take a copy of the media type */ m_pMediaType = CreateMediaType(pMediaType); if (m_pMediaType == NULL) { m_dwFlags &= ~Sample_TypeChanged; return E_OUTOFMEMORY; } m_dwFlags |= Sample_TypeChanged; return NOERROR; }
STDMETHODIMP CBasePin::SetSink(IQualityControl * piqc) { CComAutoLock cObjectLock(m_pLock); if (piqc) ValidateReadPtr(piqc,sizeof(IQualityControl)); m_pQSink = piqc; return NOERROR; } // SetSink
HRESULT CCINVideoDecompressor::CheckTransform( const CMediaType *mtIn, const CMediaType *mtOut ) { // Check and validate the pointers CheckPointer(mtIn, E_POINTER); ValidateReadPtr(mtIn, sizeof(CMediaType)); CheckPointer(mtOut, E_POINTER); ValidateReadPtr(mtOut, sizeof(CMediaType)); // Check if the input media type is acceptable HRESULT hr = CheckInputType(mtIn); if (hr != S_OK) return hr; // Check if the output format is acceptable if ( !IsEqualGUID(*mtOut->Type(), MEDIATYPE_Video ) || !IsEqualGUID(*mtOut->Subtype(), MEDIASUBTYPE_RGB8 ) || !IsEqualGUID(*mtOut->FormatType(), FORMAT_VideoInfo ) ) return VFW_E_TYPE_NOT_ACCEPTED; // Get the media types' format blocks CIN_HEADER *pInFormat = (CIN_HEADER*)mtIn->Format(); VIDEOINFO *pOutFormat = (VIDEOINFO*)mtOut->Format(); // Check if the format length is enough if (mtOut->FormatLength() < sizeof(VIDEOINFO)) return VFW_E_TYPE_NOT_ACCEPTED; // Check the compatibility of the formats DWORD cbFrame = pInFormat->dwVideoWidth * pInFormat->dwVideoHeight; return ( //(pOutFormat->AvgTimePerFrame == UNITS / CIN_FPS ) && (pOutFormat->bmiHeader.biWidth == (LONG)pInFormat->dwVideoWidth ) && (pOutFormat->bmiHeader.biHeight == -(LONG)pInFormat->dwVideoHeight ) && (pOutFormat->bmiHeader.biPlanes == 1 ) && (pOutFormat->bmiHeader.biBitCount == 8 ) && (pOutFormat->bmiHeader.biCompression == BI_RGB ) && (pOutFormat->bmiHeader.biSizeImage == cbFrame ) ) ? S_OK : VFW_E_TYPE_NOT_ACCEPTED; }
//------------------------------------------------------------------------------ // ReleaseBuffer // Final release of a CMediaSample will call this STDMETHODIMP CCustomAllocator::ReleaseBuffer(IMediaSample* pSample) { CheckPointer(pSample, E_POINTER); ValidateReadPtr(pSample, sizeof(IMediaSample)); BOOL bRelease = FALSE; { CAutoLock cal(this); BYTE* pBuffer = NULL; pSample->GetPointer(&pBuffer); ASSERT(pBuffer); m_lBuffers.AddTail(pBuffer); /* Put back on the free list */ m_lFree.AddTail((CMediaSample*)pSample); if (m_lWaitingFree != 0) { NotifySampleFree(); } DbgLog((LOG_TRACE, DBG_MEM, TEXT("CCustomAllocator::ReleaseBuffer(): m_lFree: %d m_lDeliver: %d"), m_lFree.GetCount(), m_lDeliver.GetCount())); // if there is a pending Decommit, then we need to complete it by // calling Free() when the last buffer is placed on the free list LONG l1 = m_lFree.GetCount(); if (m_bDecommitInProgress && (l1 == m_lAllocated)) { Free(); m_bDecommitInProgress = FALSE; bRelease = TRUE; } } if (m_pNotify) { ASSERT(m_fEnableReleaseCallback); // // Note that this is not synchronized with setting up a notification // method. // m_pNotify->NotifyRelease(); } /* For each buffer there is one AddRef, made in GetBuffer and released here. This may cause the allocator and all samples to be deleted */ if (bRelease) { Release(); } return NOERROR; }
// Set the media times for this sample STDMETHODIMP CMediaSample::SetMediaTime( LONGLONG * pTimeStart, LONGLONG * pTimeEnd ) { if (pTimeStart == NULL) { ASSERT(pTimeEnd == NULL); m_dwFlags &= ~Sample_MediaTimeValid; } else { ValidateReadPtr(pTimeStart,sizeof(LONGLONG)); ValidateReadPtr(pTimeEnd,sizeof(LONGLONG)); ASSERT(*pTimeEnd >= *pTimeStart); m_MediaStart = *pTimeStart; m_MediaEnd = (LONG)(*pTimeEnd - *pTimeStart); m_dwFlags |= Sample_MediaTimeValid; } return NOERROR; }
HRESULT CCINVideoDecompressor::CheckInputType(const CMediaType *mtIn) { // Check and validate the pointer CheckPointer(mtIn, E_POINTER); ValidateReadPtr(mtIn, sizeof(CMediaType)); // Check for proper video media type return ( IsEqualGUID(*mtIn->Type(), MEDIATYPE_Video ) && IsEqualGUID(*mtIn->Subtype(), MEDIASUBTYPE_CINVideo ) && IsEqualGUID(*mtIn->FormatType(), FORMAT_CINVideo ) ) ? S_OK : VFW_E_TYPE_NOT_ACCEPTED; }
HRESULT CFSTSplitterFilter::CheckInputType(const CMediaType* pmt) { // Check and validate the pointer CheckPointer(pmt, E_POINTER); ValidateReadPtr(pmt, sizeof(CMediaType)); if ( (IsEqualGUID(*pmt->Type(), MEDIATYPE_Stream)) && // We accept byte stream (IsEqualGUID(*pmt->Subtype(), MEDIASUBTYPE_FST)) // ... from FST file ) return S_OK; else return S_FALSE; }
HRESULT CMVEADPCMDecompressor::CheckTransform( const CMediaType *mtIn, const CMediaType *mtOut ) { // Check and validate the pointers CheckPointer(mtIn, E_POINTER); ValidateReadPtr(mtIn, sizeof(CMediaType)); CheckPointer(mtOut, E_POINTER); ValidateReadPtr(mtOut, sizeof(CMediaType)); // Check if the input media type is acceptable HRESULT hr = CheckInputType(mtIn); if (hr != S_OK) return hr; // Check if the output format is acceptable if ( !IsEqualGUID(*mtOut->Type(), MEDIATYPE_Audio ) || !IsEqualGUID(*mtOut->Subtype(), MEDIASUBTYPE_PCM ) || !IsEqualGUID(*mtOut->FormatType(), FORMAT_WaveFormatEx ) ) return VFW_E_TYPE_NOT_ACCEPTED; // Get the media types' format blocks MVE_AUDIO_INFO *pInFormat = (MVE_AUDIO_INFO*)mtIn->Format(); WAVEFORMATEX *pOutFormat = (WAVEFORMATEX*)mtOut->Format(); // Compare the format blocks return ( (pOutFormat->wFormatTag == WAVE_FORMAT_PCM ) && (pOutFormat->nChannels == (pInFormat->wFlags & MVE_AUDIO_STEREO) ? 2 : 1 ) && (pOutFormat->nSamplesPerSec == pInFormat->wSampleRate ) && (pOutFormat->wBitsPerSample == (pInFormat->wFlags & MVE_AUDIO_16BIT) ? 16 : 8 ) ) ? S_OK : VFW_E_TYPE_NOT_ACCEPTED; }
HRESULT CMVEADPCMDecompressor::CheckInputType(const CMediaType *mtIn) { // Check and validate the pointer CheckPointer(mtIn, E_POINTER); ValidateReadPtr(mtIn, sizeof(CMediaType)); // Check for proper audio media type return ( IsEqualGUID(*mtIn->Type(), MEDIATYPE_Audio ) && IsEqualGUID(*mtIn->Subtype(), MEDIASUBTYPE_MVEADPCM ) && IsEqualGUID(*mtIn->FormatType(), FORMAT_MVEADPCM ) && (((MVE_AUDIO_INFO*)mtIn->Format())->wFlags & MVE_AUDIO_16BIT) && (((MVE_AUDIO_INFO*)mtIn->Format())->wFlags & MVE_AUDIO_COMPRESSED) ) ? S_OK : VFW_E_TYPE_NOT_ACCEPTED; }
STDMETHODIMP CBasePin::Connect( IPin * pReceivePin, const AM_MEDIA_TYPE *pmt // optional media type ) { CheckPointer(pReceivePin,E_POINTER); ValidateReadPtr(pReceivePin,sizeof(IPin)); CComAutoLock cObjectLock(m_pLock); DisplayPinInfo(pReceivePin); /* See if we are already connected */ if (m_Connected) { DbgLog((LOG_TRACE, CONNECT_TRACE_LEVEL, TEXT("Already connected"))); return VFW_E_ALREADY_CONNECTED; } /* See if the filter is active */ if (!IsStopped() && !m_bCanReconnectWhenActive) { return VFW_E_NOT_STOPPED; } // Find a mutually agreeable media type - // Pass in the template media type. If this is partially specified, // each of the enumerated media types will need to be checked against // it. If it is non-null and fully specified, we will just try to connect // with this. const CMediaType * ptype = (CMediaType*)pmt; HRESULT hr = AgreeMediaType(pReceivePin, ptype); if (FAILED(hr)) { DbgLog((LOG_TRACE, CONNECT_TRACE_LEVEL, TEXT("Failed to agree type"))); // Since the procedure is already returning an error code, there // is nothing else this function can do to report the error. EXECUTE_ASSERT( SUCCEEDED( BreakConnect() ) ); return hr; } DbgLog((LOG_TRACE, CONNECT_TRACE_LEVEL, TEXT("Connection succeeded"))); return NOERROR; }
STDMETHODIMP CParserOutputPin::Notify(IBaseFilter * pSender, Quality q) { UNREFERENCED_PARAMETER(pSender); ValidateReadPtr(pSender,sizeof(IBaseFilter)); // First see if we want to handle this ourselves HRESULT hr = m_pTransformFilter->AlterQuality(q); if(hr!=S_FALSE) { return hr; // either S_OK or a failure } // S_FALSE means we pass the message on. // Find the quality sink for our input pin and send it there ASSERT(m_pTransformFilter->m_pInput != NULL); //return m_pTransformFilter->m_pInput->PassNotify(q); return S_OK; } // Notify
STDMETHODIMP CBasePin::QueryAccept( const AM_MEDIA_TYPE *pmt ) { CheckPointer(pmt,E_POINTER); ValidateReadPtr(pmt,sizeof(AM_MEDIA_TYPE)); /* The CheckMediaType method is valid to return error codes if the media type is horrible, an example might be E_INVALIDARG. What we do here is map all the error codes into either S_OK or S_FALSE regardless */ HRESULT hr = CheckMediaType((CMediaType*)pmt); if (FAILED(hr)) { return S_FALSE; } // note that the only defined success codes should be S_OK and S_FALSE... return hr; }
STDMETHODIMP CTransInPlaceInputPin::NotifyAllocator( IMemAllocator * pAllocator, BOOL bReadOnly) { HRESULT hr = S_OK; CheckPointer(pAllocator,E_POINTER); ValidateReadPtr(pAllocator,sizeof(IMemAllocator)); CAutoLock cObjectLock(m_pLock); m_bReadOnly = bReadOnly; // If we modify data then don't accept the allocator if it's // the same as the output pin's allocator // If our output is not connected just accept the allocator // We're never going to use this allocator because when our // output pin is connected we'll reconnect this pin if (!m_pTIPFilter->OutputPin()->IsConnected()) { return CTransformInputPin::NotifyAllocator(pAllocator, bReadOnly); } // If the allocator is read-only and we're modifying data // and the allocator is the same as the output pin's // then reject if (bReadOnly && m_pTIPFilter->m_bModifiesData) { IMemAllocator *pOutputAllocator = m_pTIPFilter->OutputPin()->PeekAllocator(); // Make sure we have an output allocator if (pOutputAllocator == NULL) { hr = m_pTIPFilter->OutputPin()->ConnectedIMemInputPin()-> GetAllocator(&pOutputAllocator); if(FAILED(hr)) { hr = CreateMemoryAllocator(&pOutputAllocator); } if (SUCCEEDED(hr)) { m_pTIPFilter->OutputPin()->SetAllocator(pOutputAllocator); pOutputAllocator->Release(); } } if (pAllocator == pOutputAllocator) { hr = E_FAIL; } else if(SUCCEEDED(hr)) { // Must copy so set the allocator properties on the output ALLOCATOR_PROPERTIES Props, Actual; hr = pAllocator->GetProperties(&Props); if (SUCCEEDED(hr)) { hr = pOutputAllocator->SetProperties(&Props, &Actual); } if (SUCCEEDED(hr)) { if ( (Props.cBuffers > Actual.cBuffers) || (Props.cbBuffer > Actual.cbBuffer) || (Props.cbAlign > Actual.cbAlign) ) { hr = E_FAIL; } } // Set the allocator on the output pin if (SUCCEEDED(hr)) { hr = m_pTIPFilter->OutputPin()->ConnectedIMemInputPin() ->NotifyAllocator( pOutputAllocator, FALSE ); } } } else { hr = m_pTIPFilter->OutputPin()->ConnectedIMemInputPin() ->NotifyAllocator( pAllocator, bReadOnly ); if (SUCCEEDED(hr)) { m_pTIPFilter->OutputPin()->SetAllocator( pAllocator ); } } if (SUCCEEDED(hr)) { // It's possible that the old and the new are the same thing. // AddRef before release ensures that we don't unload it. pAllocator->AddRef(); if( m_pAllocator != NULL ) m_pAllocator->Release(); m_pAllocator = pAllocator; // We have an allocator for the input pin } return hr; } // NotifyAllocator
HRESULT CFSTSplitterFilter::Initialize(IPin *pPin, IAsyncReader *pReader) { // Check and validate the pointer CheckPointer(pReader, E_POINTER); ValidateReadPtr(pReader, sizeof(IAsyncReader)); // Read file header FST_HEADER header; HRESULT hr = pReader->SyncRead(0, sizeof(header), (BYTE*)&header); if (hr != S_OK) return hr; // Verify file header if (header.dwID != FST_ID_2TSF) return VFW_E_INVALID_FILE_FORMAT; // Protect the filter data CAutoLock datalock(&m_csData); // Set video stream info m_nVideoFrames = header.nVideoFrames; m_nFramesPerSecond = header.nFramesPerSecond; // Allocate frame table m_pFrameTable = (FST_FRAME_ENTRY*)CoTaskMemAlloc(m_nVideoFrames * sizeof(FST_FRAME_ENTRY)); if (m_pFrameTable == NULL) { Shutdown(); return E_OUTOFMEMORY; } // Read in the frame table hr = pReader->SyncRead(sizeof(header), m_nVideoFrames * sizeof(FST_FRAME_ENTRY), (BYTE*)m_pFrameTable); if (hr != S_OK) { Shutdown(); return hr; } // Walk the frame table and determine the maximum frame data sizes DWORD cbMaxImage = 0, cbMaxSound = 0; for (DWORD iFrame = 0; iFrame < m_nVideoFrames; iFrame++) { if (m_pFrameTable[iFrame].cbImage > cbMaxImage) cbMaxImage = m_pFrameTable[iFrame].cbImage; if (m_pFrameTable[iFrame].cbSound > cbMaxSound) cbMaxSound = m_pFrameTable[iFrame].cbSound; } // Set file positions m_llDefaultStart = (LONGLONG)sizeof(FST_HEADER) + m_nVideoFrames * sizeof(FST_FRAME_ENTRY); // Right after the header and frame table m_llDefaultStop = MAXLONGLONG; // Defaults to file end // Decide on the input pin properties m_cbInputAlign = 1; m_cbInputBuffer = cbMaxImage + cbMaxSound; // Protect the output pins state CAutoLock pinlock(&m_csPins); // Decide on the output pins count m_nOutputPins = 1; // Video is always present // Check if we have soundtrack if ( (header.dwAudioSampleRate != 0) && (header.nAudioBits != 0) && (m_pFrameTable[0].cbSound != 0) ) m_nOutputPins++; // Create output pins array ASSERT(m_ppOutputPin == NULL); m_ppOutputPin = new CParserOutputPin*[m_nOutputPins]; if (m_ppOutputPin == NULL) { m_nOutputPins = 0; Shutdown(); return E_OUTOFMEMORY; } // Reset the output pin array elements to NULLs for (int i = 0; i < m_nOutputPins; i++) m_ppOutputPin[i] = NULL; // Allocate video media type CMediaType *pmtVideo = new CMediaType(); if (pmtVideo == NULL) { Shutdown(); return E_OUTOFMEMORY; } // Initialize the video media type pmtVideo->InitMediaType(); pmtVideo->SetType(&MEDIATYPE_Video); pmtVideo->SetSubtype(&MEDIASUBTYPE_FSTVideo); pmtVideo->SetSampleSize(0); pmtVideo->SetTemporalCompression(TRUE); pmtVideo->SetFormatType(&FORMAT_FSTVideo); if (!pmtVideo->SetFormat((BYTE*)&header, sizeof(header))) { delete pmtVideo; Shutdown(); return E_FAIL; } // Allocate the video allocator properties ALLOCATOR_PROPERTIES *papVideo = (ALLOCATOR_PROPERTIES*)CoTaskMemAlloc(sizeof(ALLOCATOR_PROPERTIES)); if (papVideo == NULL) { delete pmtVideo; Shutdown(); return E_OUTOFMEMORY; } // Set the video allocator properties papVideo->cbAlign = 0; // No matter papVideo->cbPrefix = 0; // No matter papVideo->cBuffers = 4; // TEMP: No need to set larger value? papVideo->cbBuffer = cbMaxImage; // Allocate time formats array. If we fail here, it's not an error, // we'll just set zero seeker parameters and may proceed DWORD dwVideoCapabilities = 0; int nVideoTimeFormats = 0; GUID *pVideoTimeFormats = (GUID*)CoTaskMemAlloc(3 * sizeof(GUID)); if (pVideoTimeFormats) { nVideoTimeFormats = 3; // Fill in the time formats array pVideoTimeFormats[0] = TIME_FORMAT_MEDIA_TIME; pVideoTimeFormats[1] = TIME_FORMAT_FRAME; pVideoTimeFormats[2] = TIME_FORMAT_SAMPLE; dwVideoCapabilities = AM_SEEKING_CanGetCurrentPos | AM_SEEKING_CanGetStopPos | AM_SEEKING_CanGetDuration; } // Create video output pin (always the first one!) hr = NOERROR; m_ppOutputPin[0] = new CParserOutputPin( NAME("FST Splitter Video Output Pin"), this, &m_csFilter, pmtVideo, papVideo, dwVideoCapabilities, nVideoTimeFormats, pVideoTimeFormats, &hr, wszFSTVideoOutputName ); if ( (FAILED(hr)) || (m_ppOutputPin[0] == NULL) ) { if (m_ppOutputPin[0]) { delete m_ppOutputPin[0]; m_ppOutputPin[0] = NULL; } else { delete pmtVideo; CoTaskMemFree(papVideo); if (pVideoTimeFormats) CoTaskMemFree(pVideoTimeFormats); } Shutdown(); if (FAILED(hr)) return hr; else return E_OUTOFMEMORY; } // Hold a reference on the video output pin m_ppOutputPin[0]->AddRef(); // We've created a new pin -- so increment pin version IncrementPinVersion(); if (m_nOutputPins > 1) { // Allocate audio media type CMediaType *pmtAudio = new CMediaType(); if (pmtAudio == NULL) { Shutdown(); return E_OUTOFMEMORY; } // Initialize the audio media type pmtAudio->InitMediaType(); pmtAudio->SetType(&MEDIATYPE_Audio); pmtAudio->SetSubtype(&MEDIASUBTYPE_PCM); pmtAudio->SetSampleSize(header.nAudioBits * (header.wAudioChannels + 1) / 8); pmtAudio->SetTemporalCompression(FALSE); pmtAudio->SetFormatType(&FORMAT_WaveFormatEx); WAVEFORMATEX *pFormat = (WAVEFORMATEX*)pmtAudio->AllocFormatBuffer(sizeof(WAVEFORMATEX)); if (pFormat == NULL) { delete pmtAudio; Shutdown(); return E_OUTOFMEMORY; } // Fill in the audio format block pFormat->wFormatTag = WAVE_FORMAT_PCM; pFormat->nChannels = header.wAudioChannels + 1; // TEMP: Is it really so? pFormat->nSamplesPerSec = header.dwAudioSampleRate; pFormat->nAvgBytesPerSec = pFormat->nChannels * header.nAudioBits * header.dwAudioSampleRate / 8; pFormat->nBlockAlign = pFormat->nChannels * header.nAudioBits / 8; pFormat->wBitsPerSample = header.nAudioBits; pFormat->cbSize = 0; // Allocate the audio allocator properties ALLOCATOR_PROPERTIES *papAudio = (ALLOCATOR_PROPERTIES*)CoTaskMemAlloc(sizeof(ALLOCATOR_PROPERTIES)); if (papAudio == NULL) { delete pmtAudio; Shutdown(); return E_OUTOFMEMORY; } // Set the audio allocator properties papAudio->cbAlign = 0; // No matter papAudio->cbPrefix = 0; // No matter papAudio->cBuffers = 4; // No use to set different from video value papAudio->cbBuffer = cbMaxSound; // Set the wave format parameters needed for the calculation // of sample stream and media duration m_nSampleSize = pFormat->nBlockAlign; m_nAvgBytesPerSec = pFormat->nAvgBytesPerSec; // Allocate time formats array. If we fail here, it's not an error, // we'll just set zero seeker parameters and may proceed DWORD dwAudioCapabilities = 0; int nAudioTimeFormats = 0; GUID *pAudioTimeFormats = (GUID*)CoTaskMemAlloc(3 * sizeof(GUID)); if (pAudioTimeFormats) { nAudioTimeFormats = 3; // Fill in the time formats array pAudioTimeFormats[0] = TIME_FORMAT_MEDIA_TIME; pAudioTimeFormats[1] = TIME_FORMAT_SAMPLE; pAudioTimeFormats[2] = TIME_FORMAT_BYTE; dwAudioCapabilities = AM_SEEKING_CanGetCurrentPos | AM_SEEKING_CanGetStopPos | AM_SEEKING_CanGetDuration; } // Create audio output pin hr = NOERROR; m_ppOutputPin[1] = new CParserOutputPin( NAME("FST Splitter Audio Output Pin"), this, &m_csFilter, pmtAudio, papAudio, dwAudioCapabilities, nAudioTimeFormats, pAudioTimeFormats, &hr, wszFSTAudioOutputName ); if ( (FAILED(hr)) || (m_ppOutputPin[1] == NULL) ) { if (m_ppOutputPin[1]) { delete m_ppOutputPin[1]; m_ppOutputPin[1] = NULL; } else { delete pmtAudio; CoTaskMemFree(papAudio); if (pAudioTimeFormats) CoTaskMemFree(pAudioTimeFormats); } Shutdown(); if (FAILED(hr)) return hr; else return E_OUTOFMEMORY; } // Hold a reference on the audio output pin m_ppOutputPin[1]->AddRef(); // We've created a new pin -- so increment pin version IncrementPinVersion(); } // Scope for the locking { // Protect media content information CAutoLock infolock(&m_csInfo); // Set the media content strings m_wszAuthorName = (OLECHAR*)CoTaskMemAlloc(sizeof(OLECHAR) * (lstrlenW(wszFSTAuthorName) + 1)); if (m_wszAuthorName) lstrcpyW(m_wszAuthorName, wszFSTAuthorName); m_wszDescription = (OLECHAR*)CoTaskMemAlloc(sizeof(OLECHAR) * (lstrlenW(wszFSTDescription) + 1)); if (m_wszDescription) lstrcpyW(m_wszDescription, wszFSTDescription); } return NOERROR; }
HRESULT CMVEADPCMDecompressor::Transform( IMediaSample *pIn, IMediaSample *pOut ) { // Check and validate the pointers CheckPointer(pIn, E_POINTER); ValidateReadPtr(pIn, sizeof(IMediaSample)); CheckPointer(pOut, E_POINTER); ValidateReadPtr(pOut, sizeof(IMediaSample)); // Get the input sample's buffer BYTE *pbInBuffer = NULL; HRESULT hr = pIn->GetPointer(&pbInBuffer); if (FAILED(hr)) return hr; // Get the input sample's data length LONG lInDataLength = pIn->GetActualDataLength(); // Get the output sample's buffer SHORT *piOutBuffer = NULL; hr = pOut->GetPointer((BYTE**)&piOutBuffer); if (FAILED(hr)) return hr; // Set up the number of channels WORD nChannels = (m_pFormat->wFlags & MVE_AUDIO_STEREO) ? 2 : 1; // Allocate the array for current sample values SHORT *piSample = new SHORT[nChannels]; if (piSample == NULL) return E_OUTOFMEMORY; LONG lOutDataLength = 0; // Initialize sample values for (WORD i = 0; i < nChannels; i++) { piSample[i] = *((SHORT*)pbInBuffer); pbInBuffer += 2; lInDataLength -= 2; *piOutBuffer++ = piSample[i]; lOutDataLength += 2; } // Walk the input buffer until its length is exhausted while (lInDataLength > 0) { // Process a byte per channel for all channels int i = 0; for (i = 0; i < nChannels; i++) { // Calculate the current sample piSample[i] += g_iStepTable[*pbInBuffer++]; lInDataLength--; // Clip the sample value (if needed) if (piSample[i] > 32767) piSample[i] = 32767; else if (piSample[i] < -32768) piSample[i] = -32768; // Place the sample into the output buffer *piOutBuffer++ = piSample[i]; lOutDataLength += 2; } } // Free the current sample values array delete[] piSample; // Set the data length for the output sample hr = pOut->SetActualDataLength(lOutDataLength); if (FAILED(hr)) return hr; // Each PCM sample is a sync point hr = pOut->SetSyncPoint(TRUE); if (FAILED(hr)) return hr; // PCM sample should never be a preroll one hr = pOut->SetPreroll(FALSE); if (FAILED(hr)) return hr; // We rely on the upstream filter (which is most likely // a parser or splitter) in the matter of stream and media // times setting. As to the discontinuity property, we should // not drop samples, so we just retain this property's value // set by the upstream filter return NOERROR; }
STDMETHODIMP CBasePin::ReceiveConnection( IPin * pConnector, // this is the pin who we will connect to const AM_MEDIA_TYPE *pmt // this is the media type we will exchange ) { CheckPointer(pConnector,E_POINTER); CheckPointer(pmt,E_POINTER); ValidateReadPtr(pConnector,sizeof(IPin)); ValidateReadPtr(pmt,sizeof(AM_MEDIA_TYPE)); CComAutoLock cObjectLock(m_pLock); #ifdef _DEBUG PIN_INFO PinInfo; if(!FAILED(pConnector->QueryPinInfo(&PinInfo))) { CEasyString PinName=PinInfo.achName; PrintSystemLog(0,"RecvPin:%s",(LPCTSTR)PinName); if(PinInfo.pFilter) { FILTER_INFO FilterInfo; if (!FAILED(PinInfo.pFilter->QueryFilterInfo(&FilterInfo))) { CLSID ClassID; PinInfo.pFilter->GetClassID(&ClassID); CEasyString FilterName=FilterInfo.achName; PrintSystemLog(0,"RecvPinFilter:%s",(LPCTSTR)FilterName); // The FILTER_INFO structure holds a pointer to the Filter Graph // Manager, with a reference count that must be released. if (FilterInfo.pGraph != NULL) { FilterInfo.pGraph->Release(); } } PinInfo.pFilter->Release(); } //MessageBox(NULL, (LPCTSTR)PinName, TEXT("Filter Name"), MB_OK); } #endif /* Are we already connected */ if (m_Connected) { return VFW_E_ALREADY_CONNECTED; } /* See if the filter is active */ if (!IsStopped() && !m_bCanReconnectWhenActive) { return VFW_E_NOT_STOPPED; } HRESULT hr = CheckConnect(pConnector); if (FAILED(hr)) { // Since the procedure is already returning an error code, there // is nothing else this function can do to report the error. EXECUTE_ASSERT( SUCCEEDED( BreakConnect() ) ); return hr; } /* Ask derived class if this media type is ok */ CMediaType * pcmt = (CMediaType*) pmt; hr = CheckMediaType(pcmt); if (hr != NOERROR) { // no -we don't support this media type // Since the procedure is already returning an error code, there // is nothing else this function can do to report the error. EXECUTE_ASSERT( SUCCEEDED( BreakConnect() ) ); // return a specific media type error if there is one // or map a general failure code to something more helpful // (in particular S_FALSE gets changed to an error code) if (SUCCEEDED(hr) || (hr == E_FAIL) || (hr == E_INVALIDARG)) { hr = VFW_E_TYPE_NOT_ACCEPTED; } return hr; } /* Complete the connection */ m_Connected = pConnector; m_Connected->AddRef(); hr = SetMediaType(pcmt); if (SUCCEEDED(hr)) { hr = CompleteConnect(pConnector); if (SUCCEEDED(hr)) { return NOERROR; } } DbgLog((LOG_TRACE, CONNECT_TRACE_LEVEL, TEXT("Failed to set the media type or failed to complete the connection."))); m_Connected->Release(); m_Connected = NULL; // Since the procedure is already returning an error code, there // is nothing else this function can do to report the error. EXECUTE_ASSERT( SUCCEEDED( BreakConnect() ) ); return hr; }
HRESULT CCINVideoDecompressor::Transform( IMediaSample *pIn, IMediaSample *pOut ) { // Check and validate the pointers CheckPointer(pIn, E_POINTER); ValidateReadPtr(pIn, sizeof(IMediaSample)); CheckPointer(pOut, E_POINTER); ValidateReadPtr(pOut, sizeof(IMediaSample)); // Get the input sample's buffer BYTE *pbInBuffer = NULL; HRESULT hr = pIn->GetPointer(&pbInBuffer); if (FAILED(hr)) return hr; // Check if the frame contains new palette BYTE *pbHuffmanData = NULL; if (*((DWORD*)pbInBuffer) == CIN_COMMAND_PALETTE) { // Get the palette pointer from the input data BYTE *pbPalette = pbInBuffer + 4; // Get the output media type format CMediaType mt((AM_MEDIA_TYPE)m_pOutput->CurrentMediaType()); VIDEOINFO *pVideoInfo = (VIDEOINFO*)mt.Format(); // Fill in the output media type format palette for (int i = 0; i < 256; i++) { pVideoInfo->bmiColors[i].rgbRed = *pbPalette++; pVideoInfo->bmiColors[i].rgbGreen = *pbPalette++; pVideoInfo->bmiColors[i].rgbBlue = *pbPalette++; pVideoInfo->bmiColors[i].rgbReserved = 0; } // Set the changed media type for the output sample hr = pOut->SetMediaType(&mt); if (FAILED(hr)) return hr; // Set up Huffman data pointer pbHuffmanData = pbPalette; } else pbHuffmanData = pbInBuffer + 4; // Set up Huffman count LONG lHuffmanCount = pIn->GetActualDataLength(); lHuffmanCount -= (LONG)(pbHuffmanData - pbInBuffer); // Get the output sample's buffer BYTE *pbOutBuffer = NULL; hr = pOut->GetPointer(&pbOutBuffer); if (FAILED(hr)) return hr; // Call the decoder function to decompress the frame if (!HuffmanDecode(pbHuffmanData, lHuffmanCount, pbOutBuffer)) return E_FAIL; // Set the data length for the output sample. // The data length is the uncompressed frame size LONG lOutDataLength = m_pFormat->dwVideoWidth * m_pFormat->dwVideoHeight; hr = pOut->SetActualDataLength(lOutDataLength); if (FAILED(hr)) return hr; // Each RGB frame is a sync point hr = pOut->SetSyncPoint(TRUE); if (FAILED(hr)) return hr; // RGB sample should never be a preroll one hr = pOut->SetPreroll(FALSE); if (FAILED(hr)) return hr; // We rely on the upstream filter (which is most likely // a parser or splitter) in the matter of stream and media // times setting. As to the discontinuity property, we should // not drop samples, so we just retain this property's value // set by the upstream filter return NOERROR; }