HRESULT CFSTSplitterFilter::Shutdown(void) { // Shutdown the parser HRESULT hr = ShutdownParser(); if (FAILED(hr)) return hr; // Scope for the locking { // Protect the filter data CAutoLock datalock(&m_csData); // Reset the stream-specific variables m_nFramesPerSecond = 0; m_nSampleSize = 0; m_nAvgBytesPerSec = 0; m_nVideoFrames = 0; if (m_pFrameTable) { CoTaskMemFree(m_pFrameTable); m_pFrameTable = NULL; } } // Call the base-class implementation return CBaseParserFilter::Shutdown(); }
void QueryExecutor::ExecuteSearchQuery(szb_buffer_t* szb, TParam *p, DatabaseQuery::SearchData& sd) { if (szb == NULL) { sd.ok = true; sd.response = -1; return; } #ifndef NO_LUA if (p->GetType() == TParam::P_LUA && p->GetFormulaType() == TParam::LUA_AV) { boost::mutex::scoped_lock datalock(m_mutex); cancelHandle = new SzbCancelHandle(); cancelHandle->SetTimeout(60); } #endif sd.response = szb_search(szb, p, sd.start, sd.end, sd.direction, PeriodToProbeType(sd.period_type), cancelHandle, *sd.search_condition); if (szb->last_err != SZBE_OK) { sd.ok = false; sd.error = szb->last_err; sd.error_str = wcsdup(szb->last_err_string.c_str()); szb->last_err = SZBE_OK; szb->last_err_string = std::wstring(); } else sd.ok = true; #ifndef NO_LUA if (p->GetType() == TParam::P_LUA && p->GetFormulaType() == TParam::LUA_AV) { boost::mutex::scoped_lock datalock(m_mutex); delete cancelHandle; cancelHandle = NULL; } #endif }
HRESULT CFSTSplitterFilter::ResetParser(void) { // Protect the filter data CAutoLock datalock(&m_csData); // Reset the chunk parser if (m_pParser) { HRESULT hr = m_pParser->ResetParser(); if (FAILED(hr)) return hr; } return NOERROR; }
HRESULT CFSTSplitterFilter::InitializeParser(void) { // Protect the filter data CAutoLock datalock(&m_csData); HRESULT hr = NOERROR; // Create chunk parser if we have to if (m_pParser == NULL) { m_pParser = new CFSTChunkParser( this, m_ppOutputPin, m_nVideoFrames, m_pFrameTable, m_nFramesPerSecond, m_nSampleSize, m_nAvgBytesPerSec, &hr ); if ( (FAILED(hr)) || (m_pParser == NULL) ) { if (m_pParser) delete m_pParser; m_pParser = NULL; if (FAILED(hr)) return hr; else return E_OUTOFMEMORY; } } ASSERT(m_pParser); // Reset the parser hr = m_pParser->ResetParser(); if (FAILED(hr)) return hr; return NOERROR; }
HRESULT CFSTSplitterFilter::GetDuration( LPCWSTR pPinName, const GUID *pCurrentFormat, LONGLONG *pDuration ) { // Protect the filter data CAutoLock datalock(&m_csData); // Convert the duration in frames to the current time format return ConvertTimeFormat( wszFSTVideoOutputName, pDuration, pCurrentFormat, (LONGLONG)m_nVideoFrames, &TIME_FORMAT_FRAME ); }
HRESULT CFSTSplitterFilter::ShutdownParser(void) { // Protect the filter data CAutoLock datalock(&m_csData); // Delete chunk parser (if any) if (m_pParser) { // Reset the parser HRESULT hr = m_pParser->ResetParser(); if (FAILED(hr)) return hr; delete m_pParser; m_pParser = NULL; } return NOERROR; }
HRESULT CFSTSplitterFilter::ConvertTimeFormat( LPCWSTR pPinName, LONGLONG *pTarget, const GUID *pTargetFormat, LONGLONG llSource, const GUID *pSourceFormat ) { // If the formats coincide, no work to be done if (IsEqualGUID(*pTargetFormat, *pSourceFormat)) { *pTarget = llSource; return NOERROR; } if (!lstrcmpW(pPinName, wszFSTVideoOutputName)) { // Process the video output pin's request // Frames and samples are identical if ( ( (IsEqualGUID(*pSourceFormat, TIME_FORMAT_SAMPLE)) && (IsEqualGUID(*pTargetFormat, TIME_FORMAT_FRAME)) ) || ( (IsEqualGUID(*pSourceFormat, TIME_FORMAT_FRAME)) && (IsEqualGUID(*pTargetFormat, TIME_FORMAT_SAMPLE)) ) ) { *pTarget = llSource; return NOERROR; } // Convert samples/frames to media time if ( ( (IsEqualGUID(*pSourceFormat, TIME_FORMAT_SAMPLE)) || (IsEqualGUID(*pSourceFormat, TIME_FORMAT_FRAME)) ) && (IsEqualGUID(*pTargetFormat, TIME_FORMAT_MEDIA_TIME)) ) { // Check if we have correct video format parameters if (m_nFramesPerSecond == 0) return E_UNEXPECTED; *pTarget = (llSource * UNITS) / m_nFramesPerSecond; return NOERROR; } // Convert media time to samples/frames if ( (IsEqualGUID(*pSourceFormat, TIME_FORMAT_MEDIA_TIME)) && ( (IsEqualGUID(*pTargetFormat, TIME_FORMAT_SAMPLE)) || (IsEqualGUID(*pTargetFormat, TIME_FORMAT_FRAME)) ) ) { *pTarget = (llSource * m_nFramesPerSecond) / UNITS; return NOERROR; } // If the source/target formats pair does not match one of the // above, we cannot perform convertion return E_INVALIDARG; } else if (!lstrcmpW(pPinName, wszFSTAudioOutputName)) { // Process the audio output pin's request // Protect the filter data CAutoLock datalock(&m_csData); // Check if we have correct wave format parameters if ( (m_nSampleSize == 0) || (m_nAvgBytesPerSec == 0) ) return E_UNEXPECTED; // Convert the source value to samples LONGLONG llSourceInSamples; if (IsEqualGUID(*pSourceFormat, TIME_FORMAT_SAMPLE)) llSourceInSamples = llSource; else if (IsEqualGUID(*pSourceFormat, TIME_FORMAT_BYTE)) llSourceInSamples = llSource / m_nSampleSize; else if (IsEqualGUID(*pSourceFormat, TIME_FORMAT_MEDIA_TIME)) llSourceInSamples = (llSource * m_nAvgBytesPerSec) / (m_nSampleSize * UNITS); else return E_INVALIDARG; // Convert the source in samples to target format if (IsEqualGUID(*pTargetFormat, TIME_FORMAT_SAMPLE)) *pTarget = llSourceInSamples; else if (IsEqualGUID(*pTargetFormat, TIME_FORMAT_BYTE)) *pTarget = llSourceInSamples * m_nSampleSize; else if (IsEqualGUID(*pTargetFormat, TIME_FORMAT_MEDIA_TIME)) *pTarget = (llSourceInSamples * m_nSampleSize * UNITS) / m_nAvgBytesPerSec; else return E_INVALIDARG; // At this point the conversion is successfully done return NOERROR; } return E_NOTIMPL; }
HRESULT CFSTSplitterFilter::Initialize(IPin *pPin, IAsyncReader *pReader) { // Check and validate the pointer CheckPointer(pReader, E_POINTER); ValidateReadPtr(pReader, sizeof(IAsyncReader)); // Read file header FST_HEADER header; HRESULT hr = pReader->SyncRead(0, sizeof(header), (BYTE*)&header); if (hr != S_OK) return hr; // Verify file header if (header.dwID != FST_ID_2TSF) return VFW_E_INVALID_FILE_FORMAT; // Protect the filter data CAutoLock datalock(&m_csData); // Set video stream info m_nVideoFrames = header.nVideoFrames; m_nFramesPerSecond = header.nFramesPerSecond; // Allocate frame table m_pFrameTable = (FST_FRAME_ENTRY*)CoTaskMemAlloc(m_nVideoFrames * sizeof(FST_FRAME_ENTRY)); if (m_pFrameTable == NULL) { Shutdown(); return E_OUTOFMEMORY; } // Read in the frame table hr = pReader->SyncRead(sizeof(header), m_nVideoFrames * sizeof(FST_FRAME_ENTRY), (BYTE*)m_pFrameTable); if (hr != S_OK) { Shutdown(); return hr; } // Walk the frame table and determine the maximum frame data sizes DWORD cbMaxImage = 0, cbMaxSound = 0; for (DWORD iFrame = 0; iFrame < m_nVideoFrames; iFrame++) { if (m_pFrameTable[iFrame].cbImage > cbMaxImage) cbMaxImage = m_pFrameTable[iFrame].cbImage; if (m_pFrameTable[iFrame].cbSound > cbMaxSound) cbMaxSound = m_pFrameTable[iFrame].cbSound; } // Set file positions m_llDefaultStart = (LONGLONG)sizeof(FST_HEADER) + m_nVideoFrames * sizeof(FST_FRAME_ENTRY); // Right after the header and frame table m_llDefaultStop = MAXLONGLONG; // Defaults to file end // Decide on the input pin properties m_cbInputAlign = 1; m_cbInputBuffer = cbMaxImage + cbMaxSound; // Protect the output pins state CAutoLock pinlock(&m_csPins); // Decide on the output pins count m_nOutputPins = 1; // Video is always present // Check if we have soundtrack if ( (header.dwAudioSampleRate != 0) && (header.nAudioBits != 0) && (m_pFrameTable[0].cbSound != 0) ) m_nOutputPins++; // Create output pins array ASSERT(m_ppOutputPin == NULL); m_ppOutputPin = new CParserOutputPin*[m_nOutputPins]; if (m_ppOutputPin == NULL) { m_nOutputPins = 0; Shutdown(); return E_OUTOFMEMORY; } // Reset the output pin array elements to NULLs for (int i = 0; i < m_nOutputPins; i++) m_ppOutputPin[i] = NULL; // Allocate video media type CMediaType *pmtVideo = new CMediaType(); if (pmtVideo == NULL) { Shutdown(); return E_OUTOFMEMORY; } // Initialize the video media type pmtVideo->InitMediaType(); pmtVideo->SetType(&MEDIATYPE_Video); pmtVideo->SetSubtype(&MEDIASUBTYPE_FSTVideo); pmtVideo->SetSampleSize(0); pmtVideo->SetTemporalCompression(TRUE); pmtVideo->SetFormatType(&FORMAT_FSTVideo); if (!pmtVideo->SetFormat((BYTE*)&header, sizeof(header))) { delete pmtVideo; Shutdown(); return E_FAIL; } // Allocate the video allocator properties ALLOCATOR_PROPERTIES *papVideo = (ALLOCATOR_PROPERTIES*)CoTaskMemAlloc(sizeof(ALLOCATOR_PROPERTIES)); if (papVideo == NULL) { delete pmtVideo; Shutdown(); return E_OUTOFMEMORY; } // Set the video allocator properties papVideo->cbAlign = 0; // No matter papVideo->cbPrefix = 0; // No matter papVideo->cBuffers = 4; // TEMP: No need to set larger value? papVideo->cbBuffer = cbMaxImage; // Allocate time formats array. If we fail here, it's not an error, // we'll just set zero seeker parameters and may proceed DWORD dwVideoCapabilities = 0; int nVideoTimeFormats = 0; GUID *pVideoTimeFormats = (GUID*)CoTaskMemAlloc(3 * sizeof(GUID)); if (pVideoTimeFormats) { nVideoTimeFormats = 3; // Fill in the time formats array pVideoTimeFormats[0] = TIME_FORMAT_MEDIA_TIME; pVideoTimeFormats[1] = TIME_FORMAT_FRAME; pVideoTimeFormats[2] = TIME_FORMAT_SAMPLE; dwVideoCapabilities = AM_SEEKING_CanGetCurrentPos | AM_SEEKING_CanGetStopPos | AM_SEEKING_CanGetDuration; } // Create video output pin (always the first one!) hr = NOERROR; m_ppOutputPin[0] = new CParserOutputPin( NAME("FST Splitter Video Output Pin"), this, &m_csFilter, pmtVideo, papVideo, dwVideoCapabilities, nVideoTimeFormats, pVideoTimeFormats, &hr, wszFSTVideoOutputName ); if ( (FAILED(hr)) || (m_ppOutputPin[0] == NULL) ) { if (m_ppOutputPin[0]) { delete m_ppOutputPin[0]; m_ppOutputPin[0] = NULL; } else { delete pmtVideo; CoTaskMemFree(papVideo); if (pVideoTimeFormats) CoTaskMemFree(pVideoTimeFormats); } Shutdown(); if (FAILED(hr)) return hr; else return E_OUTOFMEMORY; } // Hold a reference on the video output pin m_ppOutputPin[0]->AddRef(); // We've created a new pin -- so increment pin version IncrementPinVersion(); if (m_nOutputPins > 1) { // Allocate audio media type CMediaType *pmtAudio = new CMediaType(); if (pmtAudio == NULL) { Shutdown(); return E_OUTOFMEMORY; } // Initialize the audio media type pmtAudio->InitMediaType(); pmtAudio->SetType(&MEDIATYPE_Audio); pmtAudio->SetSubtype(&MEDIASUBTYPE_PCM); pmtAudio->SetSampleSize(header.nAudioBits * (header.wAudioChannels + 1) / 8); pmtAudio->SetTemporalCompression(FALSE); pmtAudio->SetFormatType(&FORMAT_WaveFormatEx); WAVEFORMATEX *pFormat = (WAVEFORMATEX*)pmtAudio->AllocFormatBuffer(sizeof(WAVEFORMATEX)); if (pFormat == NULL) { delete pmtAudio; Shutdown(); return E_OUTOFMEMORY; } // Fill in the audio format block pFormat->wFormatTag = WAVE_FORMAT_PCM; pFormat->nChannels = header.wAudioChannels + 1; // TEMP: Is it really so? pFormat->nSamplesPerSec = header.dwAudioSampleRate; pFormat->nAvgBytesPerSec = pFormat->nChannels * header.nAudioBits * header.dwAudioSampleRate / 8; pFormat->nBlockAlign = pFormat->nChannels * header.nAudioBits / 8; pFormat->wBitsPerSample = header.nAudioBits; pFormat->cbSize = 0; // Allocate the audio allocator properties ALLOCATOR_PROPERTIES *papAudio = (ALLOCATOR_PROPERTIES*)CoTaskMemAlloc(sizeof(ALLOCATOR_PROPERTIES)); if (papAudio == NULL) { delete pmtAudio; Shutdown(); return E_OUTOFMEMORY; } // Set the audio allocator properties papAudio->cbAlign = 0; // No matter papAudio->cbPrefix = 0; // No matter papAudio->cBuffers = 4; // No use to set different from video value papAudio->cbBuffer = cbMaxSound; // Set the wave format parameters needed for the calculation // of sample stream and media duration m_nSampleSize = pFormat->nBlockAlign; m_nAvgBytesPerSec = pFormat->nAvgBytesPerSec; // Allocate time formats array. If we fail here, it's not an error, // we'll just set zero seeker parameters and may proceed DWORD dwAudioCapabilities = 0; int nAudioTimeFormats = 0; GUID *pAudioTimeFormats = (GUID*)CoTaskMemAlloc(3 * sizeof(GUID)); if (pAudioTimeFormats) { nAudioTimeFormats = 3; // Fill in the time formats array pAudioTimeFormats[0] = TIME_FORMAT_MEDIA_TIME; pAudioTimeFormats[1] = TIME_FORMAT_SAMPLE; pAudioTimeFormats[2] = TIME_FORMAT_BYTE; dwAudioCapabilities = AM_SEEKING_CanGetCurrentPos | AM_SEEKING_CanGetStopPos | AM_SEEKING_CanGetDuration; } // Create audio output pin hr = NOERROR; m_ppOutputPin[1] = new CParserOutputPin( NAME("FST Splitter Audio Output Pin"), this, &m_csFilter, pmtAudio, papAudio, dwAudioCapabilities, nAudioTimeFormats, pAudioTimeFormats, &hr, wszFSTAudioOutputName ); if ( (FAILED(hr)) || (m_ppOutputPin[1] == NULL) ) { if (m_ppOutputPin[1]) { delete m_ppOutputPin[1]; m_ppOutputPin[1] = NULL; } else { delete pmtAudio; CoTaskMemFree(papAudio); if (pAudioTimeFormats) CoTaskMemFree(pAudioTimeFormats); } Shutdown(); if (FAILED(hr)) return hr; else return E_OUTOFMEMORY; } // Hold a reference on the audio output pin m_ppOutputPin[1]->AddRef(); // We've created a new pin -- so increment pin version IncrementPinVersion(); } // Scope for the locking { // Protect media content information CAutoLock infolock(&m_csInfo); // Set the media content strings m_wszAuthorName = (OLECHAR*)CoTaskMemAlloc(sizeof(OLECHAR) * (lstrlenW(wszFSTAuthorName) + 1)); if (m_wszAuthorName) lstrcpyW(m_wszAuthorName, wszFSTAuthorName); m_wszDescription = (OLECHAR*)CoTaskMemAlloc(sizeof(OLECHAR) * (lstrlenW(wszFSTDescription) + 1)); if (m_wszDescription) lstrcpyW(m_wszDescription, wszFSTDescription); } return NOERROR; }
void QueryExecutor::StopSearch() { boost::mutex::scoped_lock datalock(m_mutex); if(cancelHandle != NULL) { cancelHandle->SetStopFlag(); } }