bool VDAudioOutputDirectSoundW32::InitPlayback() { tWAVEFORMATEX *wf = &*mInitFormat; mMillisecsPerByte = 1000.0 * (double)wf->nBlockAlign / (double)wf->nAvgBytesPerSec; // create looping secondary buffer DSBUFFERDESC dsd={sizeof(DSBUFFERDESC)}; dsd.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS; dsd.dwBufferBytes = mDSBufferSize; dsd.lpwfxFormat = (WAVEFORMATEX *)wf; dsd.guid3DAlgorithm = DS3DALG_DEFAULT; IDirectSoundBuffer *pDSB; HRESULT hr = mpDS8->CreateSoundBuffer(&dsd, &pDSB, NULL); if (FAILED(hr)) { VDDEBUG("VDAudioOutputDirectSound: Failed to create secondary buffer! hr=%08x\n", hr); return false; } // query to IDirectSoundBuffer8 hr = pDSB->QueryInterface(IID_IDirectSoundBuffer8, (void **)&mpDSBuffer); pDSB->Release(); if (FAILED(hr)) { VDDEBUG("VDAudioOutputDirectSound: Failed to obtain IDirectSoundBuffer8 interface! hr=%08x\n", hr); return false; } // all done! mDSWriteCursor = 0; return true; }
void ActivateFrameServerDialog(HWND hwnd) { static wchar_t fileFilters[]= L"VirtualDub AVIFile signpost (*.vdr,*.avi)\0" L"*.vdr;*.avi\0" L"All files\0" L"*.*\0" ; char szServerName[128]; if (!InitServerDLL()) return; if (!DialogBoxParam(GetModuleHandle(NULL), MAKEINTRESOURCE(IDD_SERVER_SETUP), hwnd, FrameServerSetupDlgProc, (LPARAM)szServerName)) return; try { vdrefptr<Frameserver> fs(new Frameserver(inputVideoAVI, inputAudio, hwnd, &g_dubOpts, g_project->GetTimeline().GetSubset())); const VDStringW fname(VDGetSaveFileName(kFileDialog_Signpost, (VDGUIHandle)hwnd, L"Save .VDR signpost for AVIFile handler", fileFilters, g_prefs.main.fAttachExtension ? L"vdr" : NULL, 0, 0)); if (!fname.empty()) { long buf[5]; char sname[128]; int slen; ivdsl->GetComputerName(sname); strcat(sname,"/"); strcat(sname,szServerName); slen = strlen(sname); slen += slen&1; buf[0] = 'FFIR'; buf[1] = slen+12; buf[2] = 'MRDV'; buf[3] = 'HTAP'; buf[4] = slen; VDFile file(fname.c_str(), nsVDFile::kWrite | nsVDFile::kDenyRead | nsVDFile::kCreateAlways); file.write(buf, 20); file.write(sname, strlen(sname)); if (strlen(sname) & 1) file.write("", 1); file.close(); } VDDEBUG("Attempting to initialize frameserver...\n"); fs->Go(ivdsl, szServerName); VDDEBUG("Frameserver exit.\n"); } catch(const MyError& e) { e.post(hwnd, "Frameserver error"); } }
bool VDAudioOutputDirectSoundW32::InitDirectSound() { mDSBufferSize = mBufferSize * 2; mDSBufferSizeHalf = mBufferSize; // attempt to load DirectSound library mhmodDS = LoadLibraryA("dsound"); if (!mhmodDS) return false; typedef HRESULT (WINAPI *tpDirectSoundCreate8)(LPCGUID, LPDIRECTSOUND8 *, LPUNKNOWN); tpDirectSoundCreate8 pDirectSoundCreate8 = (tpDirectSoundCreate8)GetProcAddress(mhmodDS, "DirectSoundCreate8"); if (!pDirectSoundCreate8) { VDDEBUG("VDAudioOutputDirectSound: Cannot find DirectSoundCreate8 entry point!\n"); return false; } // attempt to create DirectSound object HRESULT hr = pDirectSoundCreate8(NULL, &mpDS8, NULL); if (FAILED(hr)) { VDDEBUG("VDAudioOutputDirectSound: Failed to create DirectSound object! hr=%08x\n", hr); return false; } // Set cooperative level. // // From microsoft.public.win32.programmer.directx.audio, by an SDE on the Windows AV team: // // "I can't speak for all DirectX components but DirectSound does not // subclass the window procedure. It simply uses the window handle to // determine (every 1/2 second, in a seperate thread) if the window that // corresponds to the handle has the focus (Actually, it is slightly more // complicated than that, but that is close enough for this discussion). // You can feel free to use the desktop window or console window for the // window handle if you are going to create GLOBAL_FOCUS buffers." // // Alright, you guys said we could do it! // hr = mpDS8->SetCooperativeLevel(GetDesktopWindow(), DSSCL_PRIORITY); if (FAILED(hr)) { VDDEBUG("VDAudioOutputDirectSound: Failed to set cooperative level! hr=%08x\n", hr); return false; } return true; }
void MyError::post(HWND hWndParent, const char *title) const { if (!buf || !*buf) return; VDDEBUG("*** %s: %s\n", title, buf); VDLog(kVDLogError, VDswprintf(L"Error: %hs", 1, &buf)); MessageBox(hWndParent, buf, title, MB_OK | MB_ICONERROR | MB_SETFOREGROUND); }
FrameserverSession *Frameserver::SessionLookup(LPARAM lParam) { tSessions::const_iterator it(mSessions.find(lParam)); if (it != mSessions.end()) return (*it).second; VDDEBUG("Session lookup failed on %08lx\n", lParam); return NULL; }
Frameserver::Frameserver(VideoSource *video, AudioSource *audio, HWND hwndParent, DubOptions *xopt, const FrameSubset& subset) { opt = xopt; hwnd = hwndParent; aSrc = audio; vSrc = video; lFrameCount = lRequestCount = lAudioSegCount = 0; InitStreamValuesStatic(vInfo, aInfo, video, audio, opt, &subset); vdfastvector<IVDVideoSource *> vsrcs(1, video); mVideoFrameMap.Init(vsrcs, vInfo.start_src, vInfo.frameRateIn / vInfo.frameRate, &subset, vInfo.end_dst, false); VDPosition lOffsetStart = video->msToSamples(opt->video.lStartOffsetMS); VDPosition lOffsetEnd = video->msToSamples(opt->video.lEndOffsetMS); FrameSubset videoset(subset); if (opt->audio.fEndAudio) videoset.deleteRange(videoset.getTotalFrames() - lOffsetEnd, videoset.getTotalFrames()); if (opt->audio.fStartAudio) videoset.deleteRange(0, lOffsetStart); VDDEBUG("Video subset:\n"); videoset.dump(); if (audio) AudioTranslateVideoSubset(audioset, videoset, vInfo.frameRateIn, audio->getWaveFormat(), !opt->audio.fEndAudio && (videoset.empty() || videoset.back().end() == video->getEnd()) ? audio->getEnd() : 0, NULL); VDDEBUG("Audio subset:\n"); audioset.dump(); if (audio) { audioset.offset(audio->msToSamples(-opt->audio.offset)); lAudioSamples = audioset.getTotalFrames(); } else lAudioSamples = 0; lVideoSamples = mVideoFrameMap.size(); }
static BOOL InitServerDLL() { #ifdef _M_AMD64 hmodServer = LoadLibrary("vdsvrlnk64.dll"); #else hmodServer = LoadLibrary("vdsvrlnk.dll"); #endif VDDEBUG("VDSVRLNK handle: %p\n", hmodServer); if (hmodServer) { FARPROC fp; if (!(fp = GetProcAddress(hmodServer, "GetDubServerInterface"))) return FALSE; ivdsl = ((IVDubServerLink *(*)(void))fp)(); return TRUE; } return FALSE; }
LRESULT Frameserver::SessionAudio(LPARAM lParam, WPARAM lStart) { FrameserverSession *fs = SessionLookup(lParam); if (!fs) return VDSRVERR_BADSESSION; LONG lCount = *(LONG *)fs->arena; LONG cbBuffer = *(LONG *)(fs->arena+4); if (cbBuffer > fs->arena_size - 8) cbBuffer = fs->arena_size - 8; VDDEBUG("[session %08lx] VDSRVM_REQ_AUDIO(sample %ld, count %d, cbBuffer %ld)\n", lParam, lCount, lStart, cbBuffer); // Do not return an error on an attempt to read beyond the end of // the audio stream -- this causes Panasonic to error. if (lStart >= lAudioSamples) { memset(fs->arena, 0, 8); return VDSRVERR_OK; } if (lStart+lCount > lAudioSamples) lCount = lAudioSamples; // Read subsets. long lTotalBytes = 0, lTotalSamples = 0; uint32 lActualBytes, lActualSamples = 1; char *pDest = (char *)(fs->arena + 8); try { while(lCount>0 && lActualSamples>0) { sint64 start, len; // Translate range. start = audioset.lookupRange(lStart, len); if (len > lCount) len = lCount; if (start < aSrc->getStart()) { start = aSrc->getStart(); len = 1; } if (start >= aSrc->getEnd()) { start = aSrc->getEnd() - 1; len = 1; } // Attempt read. switch(aSrc->read(start, VDClampToSint32(len), pDest, cbBuffer, &lActualBytes, &lActualSamples)) { case AVIERR_OK: break; case AVIERR_BUFFERTOOSMALL: if (!lTotalSamples) return VDSRVERR_TOOBIG; goto out_of_space; default: return VDSRVERR_FAILED; } lCount -= lActualSamples; lStart += lActualSamples; cbBuffer -= lActualBytes; pDest += lActualBytes; lTotalSamples += lActualSamples; lTotalBytes += lActualBytes; } out_of_space: ; } catch(const MyError&) { return VDSRVERR_FAILED; } *(LONG *)(fs->arena + 0) = lTotalBytes; *(LONG *)(fs->arena + 4) = lTotalSamples; return VDSRVERR_OK; }
LRESULT Frameserver::WndProc( HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) { switch (message) { case WM_CLOSE: DestroyWindow(hWnd); break; case WM_DESTROY: // message: window being destroyed mbExit = true; break; case VDSRVM_BIGGEST: { uint32 size = sizeof(AVISTREAMINFO); if (vSrc) { if (size < sizeof(BITMAPINFOHEADER)) size = sizeof(BITMAPINFOHEADER); if (size < mFrameSize) size = mFrameSize; } if (aSrc) { uint32 dataRate = aSrc->getWaveFormat()->mDataRate; if (size < dataRate) size = dataRate; uint32 formatSize = aSrc->getFormatLen(); if (size < formatSize) size = aSrc->getFormatLen(); } if (size < 65536) size = 65536; VDDEBUG("VDSRVM_BIGGEST: allocate a frame of size %ld bytes\n", size); return size; } case VDSRVM_OPEN: ++lRequestCount; VDDEBUG("VDSRVM_OPEN(arena size %ld, mmap ID %08lx)\n", wParam, lParam); return SessionOpen(lParam, wParam); case VDSRVM_CLOSE: ++lRequestCount; VDDEBUG("[session %08lx] VDSRVM_CLOSE()\n", lParam); return SessionClose(lParam); case VDSRVM_REQ_STREAMINFO: ++lRequestCount; VDDEBUG("[session %08lx] VDSRVM_REQ_STREAMINFO(stream %d)\n", lParam, wParam); return SessionStreamInfo(lParam, wParam); case VDSRVM_REQ_FORMAT: ++lRequestCount; VDDEBUG("[session %08lx] VDSRVM_REQ_FORMAT(stream %d)\n", lParam, wParam); return SessionFormat(lParam, wParam); case VDSRVM_REQ_FRAME: ++lFrameCount; VDDEBUG("[session %08lx] VDSRVM_REQ_FRAME(sample %ld)\n", lParam, wParam); return SessionFrame(lParam, wParam); case VDSRVM_REQ_AUDIO: ++lAudioSegCount; return SessionAudio(lParam, wParam); case VDSRVM_REQ_AUDIOINFO: ++lAudioSegCount; VDDEBUG("[session %08lx] VDSRVM_REQ_AUDIOINFO(sample %ld)\n", lParam, wParam); return SessionAudioInfo(lParam, wParam); default: return mpUIFrame->DefProc(hWnd, message, wParam, lParam); } return (0); }
void Frameserver::Go(IVDubServerLink *ivdsl, char *name) { int server_index = -1; lpszFsname = name; // prepare the sources... if (!vSrc->setTargetFormat(g_dubOpts.video.mInputFormat)) if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_XRGB8888)) if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_RGB888)) if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_XRGB1555)) if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_Pal8)) throw MyError("The decompression codec cannot decompress to an RGB format. This is very unusual. Check that any \"Force YUY2\" options are not enabled in the codec's properties."); IVDStreamSource *pVSS = vSrc->asStream(); FrameSubset videoset(mSubset); const VDFraction frameRateTimeline(g_project->GetTimelineFrameRate()); VDPosition startFrame; VDPosition endFrame; VDConvertSelectionTimesToFrames(*opt, mSubset, frameRateTimeline, startFrame, endFrame); InitVideoStreamValuesStatic(vInfo, vSrc, aSrc, opt, &mSubset, &startFrame, &endFrame); const VDPixmap& px = vSrc->getTargetFormat(); const VDFraction& srcFAR = vSrc->getPixelAspectRatio(); filters.prepareLinearChain(&g_filterChain, px.w, px.h, px.format, vInfo.mFrameRatePreFilter, -1, srcFAR); mpVideoFrameSource = new VDFilterFrameVideoSource; mpVideoFrameSource->Init(vSrc, filters.GetInputLayout()); filters.SetVisualAccelDebugEnabled(false); filters.SetAccelEnabled(VDPreferencesGetFilterAccelEnabled()); filters.SetAsyncThreadCount(VDPreferencesGetFilterThreadCount()); filters.initLinearChain(NULL, 0, &g_filterChain, mpVideoFrameSource, px.w, px.h, px.format, px.palette, vInfo.mFrameRatePreFilter, -1, srcFAR); filters.ReadyFilters(); InitVideoStreamValuesStatic2(vInfo, opt, &filters, frameRateTimeline); InitAudioStreamValuesStatic(aInfo, aSrc, opt); vdfastvector<IVDVideoSource *> vsrcs(1, vSrc); mVideoFrameMap.Init(vsrcs, vInfo.start_src, vInfo.mFrameRateTimeline / vInfo.mFrameRate, &mSubset, vInfo.end_dst, opt->video.mbUseSmartRendering, opt->video.mode == DubVideoOptions::M_NONE, opt->video.mbPreserveEmptyFrames, &filters, false, false); if (opt->audio.fEndAudio) videoset.deleteRange(endFrame, videoset.getTotalFrames()); if (opt->audio.fStartAudio) videoset.deleteRange(0, startFrame); VDDEBUG("Video subset:\n"); videoset.dump(); if (aSrc) AudioTranslateVideoSubset(audioset, videoset, vInfo.mFrameRateTimeline, aSrc->getWaveFormat(), !opt->audio.fEndAudio && (videoset.empty() || videoset.back().end() == pVSS->getEnd()) ? aSrc->getEnd() : 0, NULL); VDDEBUG("Audio subset:\n"); audioset.dump(); if (aSrc) { audioset.offset(aSrc->msToSamples(-opt->audio.offset)); lAudioSamples = VDClampToUint32(audioset.getTotalFrames()); } else lAudioSamples = 0; lVideoSamples = VDClampToUint32(mVideoFrameMap.size()); vSrc->streamBegin(true, false); const VDPixmapLayout& outputLayout = filters.GetOutputLayout(); mFrameSize = VDPixmapCreateLinearLayout(mFrameLayout, nsVDPixmap::kPixFormat_RGB888, outputLayout.w, outputLayout.h, 4); VDPixmapLayoutFlipV(mFrameLayout); if (aSrc) aSrc->streamBegin(true, false); // usurp the window VDUIFrame *pFrame = VDUIFrame::GetFrame(hwnd); mpUIFrame = pFrame; pFrame->Attach(this); guiSetTitle(hwnd, IDS_TITLE_FRAMESERVER); // create dialog box mbExit = false; if (hwndStatus = CreateDialogParam(g_hInst, MAKEINTRESOURCE(IDD_SERVER), hwnd, Frameserver::StatusDlgProc, (LPARAM)this)) { // hide the main window ShowWindow(hwnd, SW_HIDE); // create the frameserver server_index = ivdsl->CreateFrameServer(name, hwnd); if (server_index>=0) { // kick us into high priority SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS); // enter window loop { MSG msg; while(!mbExit) { BOOL result = GetMessage(&msg, NULL, 0, 0); if (result == (BOOL)-1) break; if (!result) { PostQuitMessage(msg.wParam); break; } TranslateMessage(&msg); DispatchMessage(&msg); } } // return to normal priority SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS); ivdsl->DestroyFrameServer(server_index); } if (IsWindow(hwndStatus)) DestroyWindow(hwndStatus); // show the main window ShowWindow(hwnd, SW_SHOW); } // unsubclass pFrame->Detach(); if (vSrc) { IVDStreamSource *pVSS = vSrc->asStream(); pVSS->streamEnd(); } if (server_index<0) throw MyError("Couldn't create frameserver"); }
bool VDJob::Merge(const VDJob& src) { const int rev1 = mChangeRevision; const int rev2 = src.mChangeRevision; const bool mod1 = mbModified; const bool mod2 = src.mChangeRevision > mChangeRevision; if (mChangeRevision < src.mChangeRevision) mChangeRevision = src.mChangeRevision; if (operator==(src)) return false; // Okay, the jobs aren't the same. Which ones are modified? bool acceptTheirs = false; do { // only yours modified -- accept yours if (mod1 && !mod2) { acceptTheirs = false; break; } // only theirs modified -- accept theirs if (!mod1 && mod2) { acceptTheirs = true; break; } // Two cases left: // - neither modified, but mismatch (bad, but should recover) // - both modified, but mismatch (the dreaded three-way merge) // We resolve with the following priority: // // Starting < Aborting < Aborted < Error < Waiting < Postponed < Completed < In Progress // // Ties are broken via Accept Theirs. This is critical for In Progress mode. static const int kPriority[]={ 4, 7, 6, 5, 2, 3, 1, 0 }; VDASSERTCT(sizeof(kPriority)/sizeof(kPriority[0]) == VDJob::kStateCount); int pri1 = kPriority[mState]; int pri2 = kPriority[src.mState]; acceptTheirs = (pri1 <= pri2); } while(false); VDDEBUG("Yours[%3d%c]: %s\n", rev1, mod1 ? '*' : ' ', ToString().c_str()); VDDEBUG("Theirs[%3d%c]: %s\n", rev2, mod2 ? '*' : ' ', src.ToString().c_str()); // We should never accept a local copy when it corresponds to an In Progress from a // different machine. VDASSERT(acceptTheirs || mState != kStateInProgress || (uint32)mRunnerId == VDGetCurrentProcessId()); if (acceptTheirs) { mName = src.mName; mError = src.mError; mRunnerName = src.mRunnerName; mState = src.mState; mRunnerId = src.mRunnerId; mDateStart = src.mDateStart; mDateEnd = src.mDateEnd; VDDEBUG(" Resolving accept theirs.\n"); return true; } VDDEBUG(" Resolving accept yours.\n"); return false; }