STDMETHODIMP CScreenCaptureSourcePin::SetMaxRecordTime(IN INT nMillSecs) { m_rfMaxRecordTime = MILLISECONDS_TO_100NS_UNITS(nMillSecs); return S_OK; }
// This is where we insert the DIB bits into the video stream. // FillBuffer is called once for every sample in the stream. HRESULT CPushPinAudio::FillBuffer(IMediaSample *pSample) { BYTE *pData; long cbData; CheckPointer(pSample, E_POINTER); CAutoLock cAutoLockShared(&m_cSharedState); // Access the sample's data buffer pSample->GetPointer(&pData); cbData = pSample->GetSize(); // Check that we're still using Audio ASSERT(m_mt.formattype == FORMAT_WaveFormatEx); WAVEFORMATEX *pwfx = (WAVEFORMATEX *)m_mt.Format(); // 一時停止処理 while(m_blPaused){ Sleep(10); //CPU負荷減少 FILTER_STATE fs; HRESULT hr = m_pFilter->GetState(100, &fs); if (hr != S_OK) { break; } if (fs != State_Running) { break; } } // 入力バッファ状態チェック if (m_pListOfBuffer == NULL) { Sleep(10); //CPU負荷減少 goto NODATA_SECTION; } long lngCnt = (long)m_pListOfBuffer->GetCount(); // 競合を防ぐため、バッファを1つ残して処理(バッファの生成待ち) if (lngCnt <= 1) { Sleep(10); //CPU負荷減少 goto NODATA_SECTION; } POSITION pos = m_pListOfBuffer->GetHeadPosition(); if (pos == NULL) goto NODATA_SECTION; CWaveBuffer *pBuf = (CWaveBuffer *)m_pListOfBuffer->GetAt(pos); if (!pBuf) goto NODATA_SECTION; if (pBuf->m_blActive) goto NODATA_SECTION; // バッファ書き込み中 // 入力バッファをストリームバッファにコピー long lngSz = pBuf->GetNumSamples() * pBuf->GetSampleSize(); // 録音終了時では小さくなる if (!pBuf->m_blDead) { BYTE *pDat = (BYTE *)pBuf->GetBuffer(); CopyMemory(pData, pBuf->GetBuffer(), lngSz); } // バッファの解放 if (pBuf) { delete pBuf; pBuf = NULL; } m_pListOfBuffer->RemoveHead(); // フレームの開始時間を計算 DWORD dwFrame = lngSz * 1000 / pwfx->nAvgBytesPerSec; // 1バッファのミリ秒 DWORD dwNow = timeGetTime(); REFERENCE_TIME rtStart = m_rtSampleTime; REFERENCE_TIME rtStop = rtStart + MILLISECONDS_TO_100NS_UNITS(dwFrame); m_rtSampleTime = rtStop; DbgLog((LOG_TRACE, 1, "rtStart:%ld", rtStart )); DbgLog((LOG_TRACE, 1, "rtStop:%ld", rtStop )); TRACE(_T("audio rtStart:%ld\n"), rtStart); TRACE(_T("audio rtStop:%ld\n"), rtStop); pSample->SetTime(&rtStart, &rtStop); pSample->SetDiscontinuity(FALSE); pSample->SetSyncPoint(TRUE); pSample->SetActualDataLength(lngSz); return S_OK; NODATA_SECTION: REFERENCE_TIME rtTime = m_rtSampleTime; pSample->SetTime(&rtTime, &rtTime); pSample->SetDiscontinuity(FALSE); pSample->SetSyncPoint(FALSE); pSample->SetActualDataLength(0); return S_OK; }
bool ParseCUESheet(CString cueData, CAtlList<Chapters> &ChaptersList, CString& Title, CString& Performer) { BOOL fAudioTrack; int track_no = -1, /*index, */index_cnt = 0; REFERENCE_TIME rt = _I64_MIN; CString TrackTitle; CString title, performer; Title.Empty(); Performer.Empty(); CAtlList<CString> cuelines; Explode(cueData, cuelines, '\n'); if (cuelines.GetCount() <= 1) { return false; } while (cuelines.GetCount()) { CString cueLine = cuelines.RemoveHead().Trim(); CString cmd = GetCUECommand(cueLine); if (cmd == _T("TRACK")) { if (rt != _I64_MIN && track_no != -1 && index_cnt) { MakeCUETitle(TrackTitle, title, performer, track_no); if (!TrackTitle.IsEmpty()) { ChaptersList.AddTail(Chapters(TrackTitle, rt)); } } rt = _I64_MIN; index_cnt = 0; TCHAR type[256]; swscanf_s(cueLine, _T("%d %s"), &track_no, type, _countof(type)-1); fAudioTrack = (wcscmp(type, _T("AUDIO")) == 0); TrackTitle.Format(_T("Track %02d"), track_no); } else if (cmd == _T("TITLE")) { cueLine.Trim(_T(" \"")); title = cueLine; if (track_no == -1) { Title = title; } } else if (cmd == _T("PERFORMER")) { cueLine.Trim(_T(" \"")); performer = cueLine; if (track_no == -1) { Performer = performer; } } else if (cmd == _T("INDEX")) { int idx, mm, ss, ff; swscanf_s(cueLine, _T("%d %d:%d:%d"), &idx, &mm, &ss, &ff); if (fAudioTrack) { index_cnt++; rt = MILLISECONDS_TO_100NS_UNITS((mm * 60 + ss) * 1000); } } } if (rt != _I64_MAX && track_no != -1 && index_cnt) { MakeCUETitle(TrackTitle, title, performer, track_no); if (!TrackTitle.IsEmpty()) { ChaptersList.AddTail(Chapters(TrackTitle, rt)); } } if (ChaptersList.GetCount()) { return true; } else { return false; } }