Ejemplo n.º 1
0
 BaseSource* MediaServer::loadVideo(string& path) {
   VideoSource* videoSource = NULL;
   // Check if this video is already loaded
   bool isVideoLoaded = false;
   if (loadedSources.count(path)) {
     videoSource = static_cast<VideoSource*>(loadedSources[path]);
     isVideoLoaded = true;
   }
   // If is loaded
   if (isVideoLoaded) {
     // Increase reference count of this source
     videoSource->referenceCount++;
     std::stringstream refss;
     refss << "Current reference count for " << path << " = " << videoSource->referenceCount;
     ofLogNotice("MediaServer") << refss.str();
     // Notify objects registered to onImageLoaded event
     std::stringstream ss;
     ss << "Video " << path << " already loaded";
     ofLogNotice("MediaServer") << ss.str();
     ofNotifyEvent(onVideoLoaded, path, this);
     return videoSource;
   }
   // Else load fresh
   videoSource = new VideoSource();
   videoSource->loadVideo(path);
   loadedSources[path] = videoSource;
   // Set reference count of this image path to 1
   //referenceCount[path] = 1;
   std::stringstream refss;
   refss << "Initialized reference count of " << path << " to " << videoSource->referenceCount;
   ofLogNotice("MediaServer") << refss.str();
   ofNotifyEvent(onVideoLoaded, path, this);
   return videoSource;
 }
Ejemplo n.º 2
0
 void MediaServer::unloadVideo(string& path) {
   VideoSource* videoSource = static_cast<VideoSource*>(getSourceByPath(path));
   // Decrease reference count of the video
   //referenceCount[path]--;
   videoSource->referenceCount--;
   // Unload only if reference count is less or equal to 0
   if (videoSource->referenceCount > 0) {
     ofLogNotice("MediaServer") << "Not unloading video as it is being referenced elsewhere";
     return;
   }
   // Reference count 0 or less, let's unload the video
   ofLogNotice("MediaServer") << "Removing video " << path;
   // Distroy video source
   if (loadedSources.count(path)) {
     ofLogNotice("MediaServer") << "Source count before video removal: " << loadedSources.size() << endl;
     videoSource->clear();
     std::map<std::string, BaseSource*>::iterator it = loadedSources.find(path);
     delete it->second;
     loadedSources.erase(it);
     ofLogNotice("MediaServer") << "Source count after video removal: " << loadedSources.size() << endl;
     ofNotifyEvent(onVideoUnloaded, path, this);
     return;
   }
   // Something wrong here, we should be out of the routine by now
   std::stringstream failss;
   failss << "Failed to remove video source: " << path;
   ofLogFatalError("MediaServer") << failss.str();
   std::exit(EXIT_FAILURE);
 }
Ejemplo n.º 3
0
int main(int argc, char *argv[])
{
    VideoSource videoSrc;
    videoSrc.init();

    QApplication a(argc, argv);
    CameraWindow w;
    w.show();

    CannyFilter cannyFilter;
    cannyFilter.setLowThreshold(50);
    cannyFilter.setHighThreshold(100);
    TextDetectElement textDetect;

    QObject::connect(&videoSrc, SIGNAL(pushRawFrame(Mat&)), &cannyFilter, SLOT(onPushRawFrame(Mat&)));
    QObject::connect(&videoSrc, SIGNAL(pushRawFrame(Mat&)), &textDetect, SLOT(onPushRawFrame(Mat&)));
    QObject::connect(&cannyFilter, SIGNAL(pushEdgeImage(Mat&)), &textDetect, SLOT(onPushEdgeImage(Mat&)));
    QObject::connect(&textDetect, SIGNAL(pushResultImage(Mat&)), &w, SLOT(onPushImage(Mat&)));

//    videoSrc.addPushListener(cannyFilter);
//    cannyFilter.addPushListener(textDetect);
//    textDetect.addPushListener(w);

    videoSrc.start();

    return a.exec();
}
Ejemplo n.º 4
0
SearchParams *MediaView::getSearchParams() {
    VideoSource *videoSource = playlistModel->getVideoSource();
    if (videoSource && videoSource->metaObject()->className() == QLatin1String("YTSearch")) {
        YTSearch *search = qobject_cast<YTSearch *>(videoSource);
        return search->getSearchParams();
    }
    return nullptr;
}
Ejemplo n.º 5
0
void MediaView::stop() {
    stopped = true;

    while (!history.isEmpty()) {
        VideoSource *videoSource = history.takeFirst();
        if (!videoSource->parent()) delete videoSource;
    }

    playlistModel->abortSearch();
    videoAreaWidget->clear();
    videoAreaWidget->update();
    errorTimer->stop();
    playlistView->selectionModel()->clearSelection();
    if (downloadItem) {
        downloadItem->stop();
        delete downloadItem;
        downloadItem = 0;
        currentVideoSize = 0;
    }
    The::globalActions()->value("refine-search")->setChecked(false);
    updateSubscriptionAction(0, false);
#ifdef APP_ACTIVATION
    demoTimer->stop();
#endif

    foreach (QAction *action, currentVideoActions)
        action->setEnabled(false);

    QAction *a = The::globalActions()->value("download");
    a->setEnabled(false);
    a->setVisible(false);

#ifdef APP_PHONON
    mediaObject->stop();
#endif
    currentVideoId.clear();

#ifndef APP_PHONON_SEEK
    QSlider *slider = MainWindow::instance()->getSlider();
    slider->setEnabled(false);
    slider->setValue(0);
#else
    Phonon::SeekSlider *slider = MainWindow::instance()->getSeekSlider();
#endif

    if (snapshotSettings) {
        delete snapshotSettings;
        snapshotSettings = 0;
    }
}
Ejemplo n.º 6
0
void VideoRate::setup(VideoSource & _source, float fps){
	source = &_source;
	ofAddListener(source->newFrameEvent,this,&VideoRate::newVideoFrame);
	setFps(fps);
	front = _source.getNextVideoFrame();
	//startThread(true,false);
	ofAddListener(ofEvents().update,this,&VideoRate::glThreadUpdate);
}
Ejemplo n.º 7
0
void MediaView::setVideoSource(VideoSource *videoSource, bool addToHistory, bool back) {
    Q_UNUSED(back);
    stopped = false;
    errorTimer->stop();

    // qDebug() << "Adding VideoSource" << videoSource->getName() << videoSource;

    if (addToHistory) {
        int currentIndex = getHistoryIndex();
        if (currentIndex >= 0 && currentIndex < history.size() - 1) {
            while (history.size() > currentIndex + 1) {
                VideoSource *vs = history.takeLast();
                if (!vs->parent()) {
                    qDebug() << "Deleting VideoSource" << vs->getName() << vs;
                    vs->deleteLater();
                }
            }
        }
        history.append(videoSource);
    }

#ifdef APP_EXTRA
    if (history.size() > 1)
        Extra::slideTransition(playlistView->viewport(), playlistView->viewport(), back);
#endif

    playlistModel->setVideoSource(videoSource);

    if (media->state() == Media::StoppedState) {
        QSettings settings;
        if (settings.value("manualplay", false).toBool()) {
            videoAreaWidget->showPickMessage();
        }
    }

    sidebar->showPlaylist();
    sidebar->getRefineSearchWidget()->setSearchParams(getSearchParams());
    sidebar->hideSuggestions();
    sidebar->getHeader()->updateInfo();

    SearchParams *searchParams = getSearchParams();
    bool isChannel = searchParams && !searchParams->channelId().isEmpty();
    playlistView->setClickableAuthors(!isChannel);
}
Ejemplo n.º 8
0
void MediaView::stop() {
    stopped = true;

    while (!history.isEmpty()) {
        VideoSource *videoSource = history.takeFirst();
        // Don't delete videoSource in the Browse view
        if (!videoSource->parent()) {
            videoSource->deleteLater();
        }
    }

    playlistModel->abortSearch();
    videoAreaWidget->clear();
    videoAreaWidget->update();
    errorTimer->stop();
    playlistView->selectionModel()->clearSelection();

    MainWindow::instance()->getAction("refineSearch")->setChecked(false);
    updateSubscriptionAction(nullptr, false);
#ifdef APP_ACTIVATION
    demoTimer->stop();
#endif

    for (QAction *action : currentVideoActions)
        action->setEnabled(false);

    QAction *a = MainWindow::instance()->getAction("download");
    a->setEnabled(false);
    a->setVisible(false);

    media->stop();
    media->clearQueue();
    currentVideoId.clear();

#ifdef APP_SNAPSHOT
    if (snapshotSettings) {
        delete snapshotSettings;
        snapshotSettings = nullptr;
    }
#endif
}
Ejemplo n.º 9
0
void VideoBuffer::setup(VideoSource & _source, int size, bool allocateOnSetup){
	source=&_source;
	totalFrames=0;
	maxSize = size;
	
	VideoSource::width = _source.getWidth();
	VideoSource::height = _source.getHeight();

	if(allocateOnSetup){
		printf("VideoBuffer:: allocating on setup %d %d : ",VideoSource::getWidth(),VideoSource::getHeight());
		for(int i=0;i<size;i++){
			VideoFrame videoFrame = VideoFrame::newVideoFrame(source->getNextVideoFrame().getPixelsRef());
			//videoFrame.getTextureRef();
			newVideoFrame(videoFrame);
			printf("%d-",i);
		}
		printf("//\n");
	}
	resume();
	microsOneSec=-1;
}
Ejemplo n.º 10
0
void VideoBuffer::setup(VideoSource & source, int size, bool allocateOnSetup){
	this->source=&source;
	totalFrames=0;
	maxSize = size;
	if(allocateOnSetup){
		for(int i=0;i<size;i++){
			VideoFrame videoFrame = VideoFrame::newVideoFrame(source.getNextVideoFrame().getPixelsRef());
			videoFrame.getTextureRef();
			newVideoFrame(videoFrame);
		}
	}
	resume();
	microsOneSec=-1;
}
Ejemplo n.º 11
0
VideoInfoDialog::VideoInfoDialog( wxWindow* parent, RectangleBase* o )
    : wxDialog( parent, wxID_ANY, _("Video Info") ), obj( o )
{
    SetSize( wxSize( 250, 150 ) );
    wxStaticText* labelText = new wxStaticText( this, wxID_ANY, _("") );
    wxStaticText* infoText = new wxStaticText( this, wxID_ANY, _("") );
    std::string labelTextStd, infoTextStd;

    labelTextStd += "Name:\n";
    infoTextStd += obj->getName() + "\n";
    VideoSource* video = dynamic_cast<VideoSource*>( obj );
    if ( video )
    {
        labelTextStd += "RTP name:\n";
        infoTextStd += video->getMetadata( VPMSession::VPMSESSION_SDES_NAME ) +
                "\n";
        labelTextStd += "RTP cname:\n";
        infoTextStd += video->getMetadata( VPMSession::VPMSESSION_SDES_CNAME ) +
                "\n";
        labelTextStd += "Location:\n";
        infoTextStd += video->getMetadata( VPMSession::VPMSESSION_SDES_LOC ) +
                "\n";
        labelTextStd += "Codec:\n";
        infoTextStd += std::string( video->getPayloadDesc() ) + "\n";
        char width[10];
        char height[10];
        sprintf( width, "%u", video->getVideoWidth() );
        sprintf( height, "%u", video->getVideoHeight() );
        labelTextStd += "Resolution:\n";
        infoTextStd += std::string( width ) + " x " + std::string( height ) +
                "\n";
    }
    labelTextStd += "Grouped?";
    infoTextStd += std::string( obj->isGrouped() ? "Yes" : "No" );
    if ( obj->isGrouped() )
    {
        labelTextStd += "\nGroup:";
        infoTextStd += "\n" + obj->getGroup()->getName();
    }

    labelText->SetLabel( wxString( labelTextStd.c_str(), wxConvUTF8 ) );
    infoText->SetLabel( wxString( infoTextStd.c_str(), wxConvUTF8 ) );

    wxBoxSizer* textSizer = new wxBoxSizer( wxHORIZONTAL );
    textSizer->Add( labelText, wxSizerFlags(0).Align(0).Border( wxALL, 10 ) );
    textSizer->Add( infoText, wxSizerFlags(0).Align(0).Border( wxALL, 10 ) );

    SetSizer( textSizer );
    textSizer->SetSizeHints( this );
}
Ejemplo n.º 12
0
LRESULT Frameserver::SessionFormat(LPARAM lParam, WPARAM stream) {
	FrameserverSession *fs = SessionLookup(lParam);
	DubSource *ds;
	long len;

	if (!fs) return VDSRVERR_BADSESSION;

	if (stream<0 || stream>2) return VDSRVERR_NOSTREAM;

	ds = stream ? (DubSource *)aSrc : (DubSource *)vSrc;

	if (!ds) return VDSRVERR_NOSTREAM;

	if (stream) {
		len = aSrc->getFormatLen();

		if (len > fs->arena_size) return VDSRVERR_TOOBIG;

		memcpy(fs->arena, aSrc->getFormat(), len);
	} else {
		BITMAPINFOHEADER *bmih;

		len = sizeof(BITMAPINFOHEADER);
		if (len > fs->arena_size) return VDSRVERR_TOOBIG;

		memcpy(fs->arena, vSrc->getDecompressedFormat(), len);

		bmih = (BITMAPINFOHEADER *)fs->arena;
//		bmih->biSize		= sizeof(BITMAPINFOHEADER);
		bmih->biWidth		= filters.LastBitmap()->w;
		bmih->biHeight		= filters.LastBitmap()->h;
		bmih->biPlanes		= 1;
		bmih->biCompression	= BI_RGB;
		bmih->biBitCount	= 24;
		bmih->biSizeImage	= ((bmih->biWidth*3+3)&-4)*abs(bmih->biHeight);
		bmih->biClrUsed		= 0;
		bmih->biClrImportant= 0;
	}

	return len;
}
Ejemplo n.º 13
0
LRESULT Frameserver::SessionStreamInfo(LPARAM lParam, WPARAM stream) {
	FrameserverSession *fs = SessionLookup(lParam);

	if (!fs) return VDSRVERR_BADSESSION;

	if (stream<0 || stream>2) return VDSRVERR_NOSTREAM;

	if (stream==0) {
		AVISTREAMINFO *lpasi = (AVISTREAMINFO *)(fs->arena+8);

		if (!vSrc) return VDSRVERR_NOSTREAM;

		*(long *)(fs->arena+0) = 0;										//vSrc->lSampleFirst;
		*(long *)(fs->arena+4) = lVideoSamples;			//vSrc->lSampleLast;
		memcpy(lpasi, &vSrc->getStreamInfo(), sizeof(AVISTREAMINFO));

		lpasi->fccHandler	= ' BID';
		lpasi->dwLength		= *(long *)(fs->arena+4);
		lpasi->dwRate		= vInfo.frameRate.getHi();
		lpasi->dwScale		= vInfo.frameRate.getLo();

		SetRect(&lpasi->rcFrame, 0, 0, filters.OutputBitmap()->w, filters.OutputBitmap()->h);

		lpasi->dwSuggestedBufferSize = filters.OutputBitmap()->size;

	} else {
		if (!aSrc) return VDSRVERR_NOSTREAM;

		*(long *)(fs->arena+0) = 0;
		*(long *)(fs->arena+4) = lAudioSamples;
		memcpy(fs->arena+8, &aSrc->getStreamInfo(), sizeof(AVISTREAMINFO));

		((AVISTREAMINFO *)(fs->arena+8))->dwLength = audioset.getTotalFrames();
	}

	return VDSRVERR_OK;
}
Ejemplo n.º 14
0
void Frameserver::Go(IVDubServerLink *ivdsl, char *name) {
	int server_index = -1;

	lpszFsname = name;
	
	// prepare the sources...

	if (vSrc) {
		if (!vSrc->setTargetFormat(g_dubOpts.video.mInputFormat))
			if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_XRGB8888))
				if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_RGB888))
					if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_XRGB1555))
						if (!vSrc->setTargetFormat(nsVDPixmap::kPixFormat_Pal8))
							throw MyError("The decompression codec cannot decompress to an RGB format. This is very unusual. Check that any \"Force YUY2\" options are not enabled in the codec's properties.");

		vSrc->streamBegin(true, false);

		BITMAPINFOHEADER *bmih = vSrc->getDecompressedFormat();

		filters.initLinearChain(&g_listFA, (Pixel *)(bmih+1), bmih->biWidth, abs(bmih->biHeight), 24);

		if (filters.getFrameLag())
			MessageBox(g_hWnd,
			"One or more filters in the filter chain has a non-zero lag. This will cause the served "
			"video to lag behind the audio!"
			, "VirtualDub warning", MB_OK);

		fsi.lMicrosecsPerFrame		= vInfo.usPerFrame;
		fsi.lMicrosecsPerSrcFrame	= vInfo.usPerFrameIn;
		fsi.flags					= 0;

		if (filters.ReadyFilters(fsi))
			throw MyError("Error readying filters.");

		const VBitmap *pvb = filters.LastBitmap();

		VDPixmapCreateLinearLayout(mFrameLayout, nsVDPixmap::kPixFormat_RGB888, pvb->w, pvb->h, 4);
		VDPixmapLayoutFlipV(mFrameLayout);
	}

	if (aSrc)
		aSrc->streamBegin(true, false);

	// usurp the window

	VDUIFrame *pFrame = VDUIFrame::GetFrame(hwnd);
	mpUIFrame = pFrame;
	pFrame->Attach(this);

	guiSetTitle(hwnd, IDS_TITLE_FRAMESERVER);

	// create dialog box

	mbExit = false;

	if (hwndStatus = CreateDialogParam(g_hInst, MAKEINTRESOURCE(IDD_SERVER), hwnd, Frameserver::StatusDlgProc, (LPARAM)this)) {

		// hide the main window

		ShowWindow(hwnd, SW_HIDE);

		// create the frameserver

		server_index = ivdsl->CreateFrameServer(name, hwnd);

		if (server_index>=0) {

			// kick us into high priority

			SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);

			// enter window loop

			{
				MSG msg;

				while(!mbExit) {
					BOOL result = GetMessage(&msg, NULL, 0, 0);

					if (result == (BOOL)-1)
						break;

					if (!result) {
						PostQuitMessage(msg.wParam);
						break;
					}

					TranslateMessage(&msg);
					DispatchMessage(&msg);
				}
			}

			// return to normal priority

			SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);

			ivdsl->DestroyFrameServer(server_index);
		}

		if (IsWindow(hwndStatus)) DestroyWindow(hwndStatus);

		// show the main window

		ShowWindow(hwnd, SW_SHOW);
	}

	// unsubclass
	pFrame->Detach();

	if (vSrc) {
		vSrc->streamEnd();
	}

	if (server_index<0) throw MyError("Couldn't create frameserver");
}
Ejemplo n.º 15
0
/**************************************************
获取音频数据函数
参数:
lpData :输入数据的内存
size:输入数据的长度
pts:输入数据的时间戳
***************************************************/
void CDemandMediaAudio::PushAudio(const void *lpData, unsigned int size, int64_t pts, IBaseVideo *Video, bool bCanPlay)
{
	VideoSource *Source = dynamic_cast<VideoSource*>(Video);

	if (!m_uBlockSize || !Source)
		return;
	
	if (m_sAudioParam.iChannel <= 2)
	{
		if (fVolume != 1.0f)
		{
			short *Tem = (short*)lpData;
			for (int i = 0; i < size; i += 2)
			{
				long sVolume = Tem[i / 2];

				sVolume *= fVolume;

				if (sVolume > 0x7fff)
				{
					sVolume = 0x7fff;
				}
				else if (sVolume < -0x8000)
				{
					sVolume = -0x8000;
				}

				Tem[i / 2] = (short)sVolume;
			}
		}

		Source->PlayCallBackAudio((LPBYTE)lpData, size);
	}
	else
	{
		UINT totalSamples = size * 8 / m_sAudioParam.iBitPerSample;
		if (TemconvertBuffer.Num() < totalSamples)
			TemconvertBuffer.SetSize(totalSamples);

		OutputconvertBuffer.SetSize(totalSamples / m_sAudioParam.iChannel * 2);


		if (m_sAudioParam.iBitPerSample == 8)
		{
			float *tempConvert = TemconvertBuffer.Array();
			char *tempSByte = (char*)lpData;

			while (totalSamples--)
			{
				*(tempConvert++) = float(*(tempSByte++)) / 127.0f;
			}
		}
		else if (m_sAudioParam.iBitPerSample == 16)
		{
			float *tempConvert = TemconvertBuffer.Array();
			short *tempShort = (short*)lpData;

			while (totalSamples--)
			{
				*(tempConvert++) = float(*(tempShort++)) / 32767.0f;
			}
		}
		else if (m_sAudioParam.iBitPerSample == 24)
		{
			float *tempConvert = TemconvertBuffer.Array();
			BYTE *tempTriple = (BYTE*)lpData;
			TripleToLong valOut;

			while (totalSamples--)
			{
				TripleToLong &valIn = (TripleToLong&)tempTriple;

				valOut.wVal = valIn.wVal;
				valOut.tripleVal = valIn.tripleVal;
				if (valOut.tripleVal > 0x7F)
					valOut.lastByte = 0xFF;

				*(tempConvert++) = float(double(valOut.val) / 8388607.0);
				tempTriple += 3;
			}
		}
		else if (m_sAudioParam.iBitPerSample == 32)
		{
			float *tempConvert = TemconvertBuffer.Array();
			long *tempShort = (long*)lpData;

			while (totalSamples--)
			{
				*(tempConvert++) = float(double(*(tempShort++)) / 2147483647.0);
			}
		}

		float *inputTemp = TemconvertBuffer.Array();
		float *outputTemp = OutputconvertBuffer.Array();

		UINT numFloats = size * 8 / m_sAudioParam.iBitPerSample;

		if (m_sAudioParam.iChannel == 3)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];

				// Drop LFE since we don't need it
				//float lfe       = inputTemp[2]*lowFreqMix;

				*(outputTemp++) = left;
				*(outputTemp++) = right;

				inputTemp += 3;
			}
		}
		else if (m_sAudioParam.iChannel == 4)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];
				float frontCenter = inputTemp[2];
				float lowFreq = inputTemp[3];

				*(outputTemp++) = left;
				*(outputTemp++) = right;

				inputTemp += 4;
			}
		}
		else if (m_sAudioParam.iChannel == 5)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];

				// Skip LFE , we don't really need it.
				//float lfe       = inputTemp[2];

				float rearLeft = inputTemp[3] * surroundMix4;
				float rearRight = inputTemp[4] * surroundMix4;

				// Same idea as with 5.1 downmix

				*(outputTemp++) = (left + rearLeft)  * attn4dotX;
				*(outputTemp++) = (right + rearRight) * attn4dotX;

				inputTemp += 5;
			}
		}
		else if (m_sAudioParam.iChannel == 6)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];
				float center = inputTemp[2] * centerMix;


				float rearLeft = inputTemp[4] * surroundMix;
				float rearRight = inputTemp[5] * surroundMix;


				*(outputTemp++) = (left + center + rearLeft) * attn5dot1;
				*(outputTemp++) = (right + center + rearRight) * attn5dot1;

				inputTemp += 6;
			}
		}

		else if (m_sAudioParam.iChannel == 8)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];

				float center = inputTemp[2] * centerMix;

				// Drop LFE since we don't need it
				//float lowFreq       = inputTemp[3]*lowFreqMix;

				float rearLeft = inputTemp[4] * surroundMix;
				float rearRight = inputTemp[5] * surroundMix;

				// Drop SPEAKER_FRONT_LEFT_OF_CENTER , SPEAKER_FRONT_RIGHT_OF_CENTER
				//float centerLeft    = inputTemp[6];
				//float centerRight   = inputTemp[7];

				// Downmix from 5.1 to stereo
				*(outputTemp++) = (left + center + rearLeft)  * attn5dot1;
				*(outputTemp++) = (right + center + rearRight) * attn5dot1;

				inputTemp += 8;
			}
		}

		if (fVolume != 1.0f)
			MultiplyAudioBuffer(OutputconvertBuffer.Array(), OutputconvertBuffer.Num(),fVolume);


		Source->PlayCallBackAudio((LPBYTE)OutputconvertBuffer.Array(), OutputconvertBuffer.Num() * 4);

		if (bCanPlay)
		{
			bool bPlayLive = false;
			if (bLiveInstance)
			{
				AudioTimestamp audioTimestamp;
				EnterCriticalSection(&sampleBufferLock);
				sampleBuffer.AppendArray((BYTE *)(OutputconvertBuffer.Array()), OutputconvertBuffer.Num() * 4);
				audioTimestamp.count = size / m_uBlockSize;
				audioTimestamp.pts = pts;
				//sampleBufferPts.push_back(audioTimestamp);
				LeaveCriticalSection(&sampleBufferLock);
				bPlayLive = m_bPlayPcmLive;
			}
			else
			{
				EnterCriticalSection(&sampleBufferLock);
				sampleBuffer.RemoveRange(0, sampleBuffer.Num());
				LeaveCriticalSection(&sampleBufferLock);
			}

			int Len = OutputconvertBuffer.Num();
			char *OutBuffer;
			CaculateVolume((LPVOID)OutputconvertBuffer.Array(), Len, (void**)&OutBuffer);

			EnterCriticalSection(&sampleBufferLock);
			
			if (m_pAudioWaveOut && (m_bPlayPcmLocal || bPlayLive))
			{
				m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);

				if (!bSameDevice && bProjector && m_pSecWaveOut)
					m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);

			}
			else if (bProjector)
			{
				if (bSameDevice && m_pAudioWaveOut)
				{
					m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);
				}
				else if (m_pSecWaveOut)
				{
					m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);
				}
			}
			LeaveCriticalSection(&sampleBufferLock);
		}
		else
		{
			int Len = OutputconvertBuffer.Num();
			char *OutBuffer;
			CaculateVolume((LPVOID)OutputconvertBuffer.Array(), Len, (void**)&OutBuffer, true);
		}
		
		
		return;
	}

	if (bCanPlay)
	{
		bool bPlayLive = false;
		size = size / m_uBlockSize;
		if (bLiveInstance)
		{
			AudioTimestamp audioTimestamp;
			EnterCriticalSection(&sampleBufferLock);
			sampleBuffer.AppendArray(static_cast<const BYTE *>(lpData), size * m_uBlockSize);
			audioTimestamp.count = size;
			audioTimestamp.pts = pts;
			//sampleBufferPts.push_back(audioTimestamp);
			LeaveCriticalSection(&sampleBufferLock);
			bPlayLive = m_bPlayPcmLive;
		}
		else
		{
			EnterCriticalSection(&sampleBufferLock);
			sampleBuffer.RemoveRange(0, sampleBuffer.Num());
			LeaveCriticalSection(&sampleBufferLock);
		}

		EnterCriticalSection(&sampleBufferLock);

		int Len = size  * m_uBlockSize;
		char *OutBuffer;
		CaculateVolume((LPVOID)lpData, Len, (void**)&OutBuffer);

		if (m_pAudioWaveOut && (m_bPlayPcmLocal || bPlayLive))
		{
			m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len);

			if (!bSameDevice && bProjector && m_pSecWaveOut)
				m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len);

		}
		else if (bProjector)
		{
			if (bSameDevice && m_pAudioWaveOut)
			{
				m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len);
			}
			else if (m_pSecWaveOut)
			{
				m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len);
			}
		}
		LeaveCriticalSection(&sampleBufferLock);
	}
	else
	{
		int Len = size;
		char *OutBuffer;
		CaculateVolume((LPVOID)lpData, Len, (void**)&OutBuffer,true);
	}
}
Ejemplo n.º 16
0
int main( int argc, char* argv[])
{
    // On déclare notre pointeur sur SourceVideo
    VideoSource *src;
    CvVideoWriter *writer = 0;
    int isColor = 1;
    int fps     = 30;  // or 30
    int frameW  = 640; // 744 for firewire cameras
    int frameH  = 480; // 480 for firewire cameras
    writer=cvCreateVideoWriter("out.avi",CV_FOURCC('P','I','M','1'),
                                   fps,cvSize(frameW,frameH),isColor);


    if( argc > 1 ) {
        // Initialisation : fichier vidéo
        string path(argv[1]);
        src = new VideoFile( path, (argc > 2) );
    }
    else {
        // Initialisation : webcam
        src = new Camera( 0 );
    }

    // Initialisation du flux vidéo
    try {
        src->open();
    }
    catch( Exception &e ) {
        // Si une exception se produit, on l'affiche et on quitte.
        cout << e.what() << endl;
        delete src;
        return 10;
    }

    // Si tout va bien, on affiche les informations du flux vidéo.
    cout << src->getInfos() << endl;

    cvNamedWindow( "video", CV_WINDOW_AUTOSIZE );
    Image img;
    char key = 'a';

    // Début de la mesure du frame rate
    debut_mesure = getTimeMillis();

    while( key != 'q' ) {
        try {
            src->getFrame( img );
        }
        catch(Exception &e) {
            cout << "\n" << e.what() << endl;
            break;
        }
        /*CvScalar scalaire;
        scalaire.val[0] = 120;
        scalaire.val[1] = scalaire.val[2] = 0;
        img.colorFilter(scalaire);*/

        img.colorPaint2(top_left,bottom_right);
        if (bottom_right.x < 720) {
        	bottom_right.x++;
        }
        if (bottom_right.y < 576) {
        	bottom_right.y++;
        }
        if (top_left.x > 0) {
        	top_left.x--;
        }
        if (top_left.y > 0) {
        	top_left.y--;
        }

        //img.colorBlacknWhite();

        cvShowImage( "video", img );
        cvWriteFrame(writer,img);
        key = cvWaitKey( 10 );

        // Affichage du frame rate
        cout << "\rFrame Rate : " << setw(5);
        cout << left << setprecision(4);
        cout << calculFrameRate() << " FPS" << flush;
    }

    cout << endl;
    cvDestroyWindow( "video" );
    delete src;
    return 0;
}
int main( int aCount, const char ** aArgs )
{
    // Creates default configuration, parse command line parameters
    Config lConfig;
    lConfig.ParseCommandLine( aCount, aArgs );

    // Create video source (pattern generator)
    VideoSource lSource;

    // Get video source properties
    PvUInt32 lWidth = lConfig.GetWidth();
    PvUInt32 lHeight = lConfig.GetHeight();
    PvPixelType lPixelFormat = PvPixelMono8;
    PvUInt32 lSize = lWidth * lHeight;

    // Allocate transmit buffers
    PvBufferList lBuffers;
    PvBufferList lFreeBuffers;
    for ( PvUInt32 i = 0; i < lConfig.GetBufferCount(); i++ )
    {
        // Alloc new buffer
        PvBuffer *lBuffer = new PvBuffer();
        lBuffer->GetImage()->Alloc( lWidth, lHeight, lPixelFormat );

        // Set to 0
        memset( lBuffer->GetDataPointer(), 0x00, lSize );

        // Add to both buffer list and free buffer list
        lBuffers.push_back( lBuffer );
        lFreeBuffers.push_back( lBuffer );
    }

    // Create transmitter, set packet size
    PvTransmitterRaw lTransmitter;
    lTransmitter.SetPacketSize( lConfig.GetPacketSize() );

    // Create virtual device (used for discovery)
    PvVirtualDevice lDevice;
    lDevice.StartListening( lConfig.GetSourceAddress() );

    cout << "Listening for device discovery requests on " << lConfig.GetSourceAddress() << endl;

    // Open transmitter - sets destination and source
    PvResult lResult = lTransmitter.Open( 
        lConfig.GetDestinationAddress(), lConfig.GetDestinationPort(), 
        lConfig.GetSourceAddress(), lConfig.GetSourcePort() );
    if ( !lResult.IsOK() )
    {
        cout << "Failed to open a connection to the transmitter." << endl;
        return 1;
    }

    cout << "Transmission stream opened:" << endl;
    cout << "Source: " << lTransmitter.GetSourceIPAddress().GetAscii() << " port " << lTransmitter.GetSourcePort() << endl;
    cout << "Destination: " << lConfig.GetDestinationAddress() << " port " << lConfig.GetDestinationPort() << endl; 

    if ( !lConfig.GetSilent() )
    {
        cout << "Press any key to begin transmitting.\r";
        PvWaitForKeyPress();
    }

    cout << "Press any key to stop transmitting." << endl;

    // Set maximum throughput (just to even out traffic, as we control throughput at the source)
    if ( lConfig.GetFPS() != 0 )
    {
        // Multiply image size (in bits) by FPS
        float lMax = static_cast<float>( lSize ) * 8;
        lMax *= lConfig.GetFPS();

        // Since we control throughput at the source, make sure maximum throughput is slightly
        // higher than what we need. We want to even out packet traffic, not slow down source frame rate
        lMax *= 1.1f;

        // Set max throughput
        lTransmitter.SetMaxPayloadThroughput( lMax );
    }

    char lDoodle[] = "|\\-|-/";
    int lDoodleIndex = 0;

    // Reset transmitter stats
    lTransmitter.ResetStats();

    // Used to transmit at a steady frame rate
    PvFPSStabilizer lStabilizer;

    // Acquisition/transmission loop
    while( !PvKbHit() )
    {
        // Step 1: If timing is right to meet desired FPS, generate pattern, transmit
        if ( ( lConfig.GetFPS() == 0 ) || lStabilizer.IsTimeToDisplay( (PvUInt32)lConfig.GetFPS() ) )
        {
            // Are there buffers available for transmission?
            if ( lFreeBuffers.size() > 0 )
            {
                // Retrieve buffer from list
                PvBuffer *lBuffer = lFreeBuffers.front();
                lFreeBuffers.pop_front();

                // Generate the test pattern (if needed)
                if ( !lConfig.GetNoPattern() )
                {
                    lSource.CopyPattern( lBuffer );
                }

                // Queue the buffer for transmission
                lTransmitter.QueueBuffer( lBuffer );
            }
        }

        // Step 2: Retrieve free buffer(s), display stats and requeue
        PvBuffer *lBuffer = NULL;
        while ( lTransmitter.RetrieveFreeBuffer( &lBuffer, 0 ).IsOK() )
        {
            // Queue buffers back in available buffer list
            lFreeBuffers.push_back( lBuffer );

            // Buffer transmission complete, dislay stats
            cout << fixed << setprecision( 1 );
            cout << lDoodle[ lDoodleIndex ] << " ";
            cout << "Transmitted " << lTransmitter.GetBlocksTransmitted() << " blocks ";
            cout << "at " << lTransmitter.GetAverageTransmissionRate() << " ";
            cout << "(" << lTransmitter.GetInstantaneousTransmissionRate() << ") FPS ";
            cout << lTransmitter.GetAveragePayloadThroughput() / 1000000.0f << " ";
            cout << "(" << lTransmitter.GetInstantaneousPayloadThroughput() / 1000000.0f << ") Mb/s  \r";
            ++lDoodleIndex %= 6;
        }
    }

    // Close transmitter (will also abort buffers)
    lTransmitter.Close();

    // Free buffers
    PvBufferList::iterator lIt = lBuffers.begin();
    while ( lIt != lBuffers.end() )
    {
        delete ( *lIt );
        lIt++;
    }

    // Stop virtual device
    lDevice.StopListening();
}
Ejemplo n.º 18
0
void VideoListener::vpmsession_source_created( VPMSession &session,
        uint32_t ssrc, uint32_t pt, VPMPayload type,
        VPMPayloadDecoder* decoder )
{
    VPMVideoDecoder *d = dynamic_cast<VPMVideoDecoder*>( decoder );

    if ( d )
    {
        sourceCount++;
        VPMVideoFormat format = d->getOutputFormat();
        VPMVideoBufferSink *sink;

        // if we have shaders available, set the output format to YUV420P so
        // the videosource class will apply the YUV420P -> RGB conversion shader
        gravUtil::logVerbose( "VideoListener::vpmsession_source_created: "
                "creating source, have shaders? %i format? %i (yuv420p: %i)\n",
                GLUtil::getInstance()->areShadersAvailable(), format,
                VIDEO_FORMAT_YUV420 );
        if ( GLUtil::getInstance()->areShadersAvailable() &&
                format == VIDEO_FORMAT_YUV420 )
            sink = new VPMVideoBufferSink( format );
        else
            sink = new VPMVideoBufferSink( VIDEO_FORMAT_RGB24 );

        // note that the buffer sink will be deleted when the decoder for the
        // source is (inside VPMedia), so that's why it isn't deleted here or in
        // videosource

        if ( !sink->initialise() )
        {
            gravUtil::logError( "VideoListener::vpmsession_source_created: "
                    "Failed to initialise video sink\n" );
            return;
        }

        d->connectVideoProcessor( sink );

        // this is a bit clunky - VideoSource needs to have a reference to the
        // general SessionEntry but we only know the VPMSession pointer (not
        // even the address since that's only in VPMSession_net)
        // this should be thread-safe since this function will be called on the
        // second thread
        // TODO will change if sessions are on their own threads?
        SessionEntry* se = sessionMan->findSessionByVPMSession( &session );

        // if we're getting a new video from a VPMSession but it's not found in
        // the SessionManager something is seriously fubar
        if ( se == NULL )
        {
            gravUtil::logError( "VideoListener::vpmsession_source_created: "
                    "session not found in SessionManager. Something is "
                    "horribly wrong :(\n" );
            return;
        }

        VideoSource* source = new VideoSource( se, this, ssrc, sink, 0.0f,
                0.0f );
        source->setScale( 5.25f, 5.25f );
        source->move( x, y );
        objectMan->addNewSource( source );

        // new frame callback mostly just used for testing
        //sink->addNewFrameCallback( &newFrameCallbackTest, (void*)timer );

        // do some basic grid positions
        // TODO make this better, use layoutmanager somehow?
        // probably should be moved to objectManager regardless
        x += 8.8f;
        if ( x > 15.0f )
        {
            x = -7.5f;
            y -= 5.9f;
        }
        // reset to top
        if ( y < -11.0f )
        {
            x = initialX + ( 0.5f * ( sourceCount / 9 ) );
            y = initialY - ( 0.5f * ( sourceCount / 9 ) );
        }
    }
}
Ejemplo n.º 19
0
void VideoPanel::draw(){

	if(!visible){
		return;
	}

	glActiveTextureARB(GL_TEXTURE1_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE2_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE3_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE4_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE5_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE6_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE7_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE8_ARB);
	glDisable(GL_TEXTURE_2D);

	glActiveTextureARB(GL_TEXTURE0_ARB);
	glDisable(GL_TEXTURE_2D);

	glColor4f(color.x,color.y,color.z,color.w);


	if(shaderId==SHADER_NONE){
		shader.reset();
	}else{
		shader.set(shaderId);

		for(int lol=0; lol<shaderParameters.size(); lol++){
			shader.localParameterFragment(lol,shaderParameters[lol]);
		}
	}

	VideoSource* vs;

	float px,py,pz,sx,sy;

	pz=pos.z;

	if(percent){
		px=pos.x*conf->sizeX;
		py=pos.y*conf->sizeY;
		sx=scale.x*conf->sizeX;
		sy=scale.y*conf->sizeY;
	}else{
		px=pos.x;
		py=pos.y;
		sx=scale.x;
		sy=scale.y;
	}

	for(int i=0; i<layers.size(); i++){

		if(layers[i].substr(0,8)=="picture:"){
			GLuint texid=remapPictures[layers[i]];

			if(i==0){
				glActiveTextureARB(GL_TEXTURE0_ARB);
			}else if(i==1){
				glActiveTextureARB(GL_TEXTURE1_ARB);
			}else if(i==2){
				glActiveTextureARB(GL_TEXTURE2_ARB);
			}else if(i==3){
				glActiveTextureARB(GL_TEXTURE3_ARB);
			}



			glEnable(GL_TEXTURE_2D);
			glBindTexture(GL_TEXTURE_2D,texid);

		}else{

			vs=remap[layers[i]];

			if(vs!=NULL){
				if(i==0){
					glActiveTextureARB(GL_TEXTURE0_ARB);
				}else if(i==1){
					glActiveTextureARB(GL_TEXTURE1_ARB);
				}else if(i==2){
					glActiveTextureARB(GL_TEXTURE2_ARB);
				}else if(i==3){
					glActiveTextureARB(GL_TEXTURE3_ARB);
				}

				glEnable(GL_TEXTURE_2D);
				glBindTexture(GL_TEXTURE_2D,vs->texid);

				vs->getFrame();

				#ifdef VID_SOFTWARE_RESIZE
				glTexSubImage2D (GL_TEXTURE_2D, 0, 0, 0, vs->widthp2, vs->heightp2, GL_BGR_EXT, GL_UNSIGNED_BYTE, vs->data);
				#else
				gluBuild2DMipmaps(GL_TEXTURE_2D, 3, vs->width, vs->height, GL_BGR_EXT , GL_UNSIGNED_BYTE, vs->data);
				#endif
			}
		}
	
	}

	float tx=1;

	glBegin(GL_QUADS);
		glTexCoord2d(0,0); glVertex3f(px		,py,pz);
		glTexCoord2d(tx,0); glVertex3f(px+sx	,py,pz);
		glTexCoord2d(tx,-tx); glVertex3f(px+sx	,py-sy,pz);
		glTexCoord2d(0,-tx); glVertex3f(px		,py-sy,pz);
	glEnd();

}
Ejemplo n.º 20
0
bool VideoPanel::load(int layer, String source){
	//VideoSource
	
	while(layer>layers.size()){
		layers.pushBack("");
	}

	if(source==""){
		layers[layer-1]="";
		return true;

	}

	layers[layer-1]=source;

	if(source.substr(0,7)=="camera:"){
		VideoSource* vs;

		if(!remap.find(source)){
			remap[source]=new VideoSource;
			vs=remap[source];
		}else{
			return true;
		}

		short driver=0;

		if(source.substr(7,-1)=="1"){
			driver=0;
		}else if(source.substr(7,-1)=="2"){
			driver=1;
		}


		#ifdef VID_SOFTWARE_RESIZE
		vs->powerOf2=true;
		#endif

		if(!vs->sourceCapture(driver)){

			console().write("error, could not access camera");
			return false;
		}else{
			//console().write("successfully connected to camera");
		}

		glGenTextures(1, (GLuint*)&vs->texid);
		glBindTexture(GL_TEXTURE_2D,vs->texid);
		
		vs->getFrame();


		#ifdef VID_SOFTWARE_RESIZE
			glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, vs->widthp2,vs->heightp2, 0,  GL_BGR_EXT, GL_UNSIGNED_BYTE, blank);
			glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);	// Linear Filtered
			glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		#else
			gluBuild2DMipmaps(GL_TEXTURE_2D, 3, vs->width,vs->height, GL_BGR_EXT , GL_UNSIGNED_BYTE, vs->data);
			glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
			glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		#endif


		
		glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_REPEAT);
		glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_REPEAT);

	}else if(source.substr(0,5)=="file:"){
		VideoSource* vs;

		if(!remap.find(source)){
			vs=new VideoSource;
			remap[source]=vs;
		}else{
			return true;
		}


		#ifdef VID_SOFTWARE_RESIZE
		vs->powerOf2=true;
		#endif

		String x=source.substr(5,-1);

		if(  !vs->sourceAvi(x.c_str())  ){
			console().write("error loading video");
			return false;
		}else{
			//console().write("video loaded successfully");
		}

		

		glGenTextures(1, (GLuint*)&vs->texid);
		glBindTexture(GL_TEXTURE_2D,vs->texid);
		
		vs->getFrame();

		#ifdef VID_SOFTWARE_RESIZE
		glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, vs->widthp2,vs->heightp2, 0,  GL_BGR_EXT, GL_UNSIGNED_BYTE, blank);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);	// Linear Filtered
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		#else
			gluBuild2DMipmaps(GL_TEXTURE_2D, 3, remap[source]->width, remap[source]->height, GL_BGR_EXT , GL_UNSIGNED_BYTE, remap[source]->data);
			glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
			glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		#endif

		glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_REPEAT);
		glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_REPEAT);

	}else if(source.substr(0,8)=="picture:"){
		String file=source.substr(8,-1);

		int g=texture.load(file);

		ImageData im=texture.info(g);

		if(g!=-1){

			remapPictures[source]=g;

			console().write("picture '"+file+"', loaded successfully");
		}else{

			console().write("Error loading file '"+file+"', "+im.error);
			return false;
		}
		
	}else{

		console().write("error, unknown video source type '"+source+"', should be file:, picture: or camera:");
		return false;
	}


	return true;

}
Ejemplo n.º 21
0
LRESULT Frameserver::SessionFrame(LPARAM lParam, WPARAM original_frame) {
	FrameserverSession *fs = SessionLookup(lParam);

	if (!fs)
		return VDSRVERR_BADSESSION;

	try {
		const void *ptr = vSrc->getFrameBuffer();
		const BITMAPINFOHEADER *bmih = vSrc->getDecompressedFormat();
		VDPosition sample;
		bool is_preroll;

		if (fs->arena_size < ((filters.LastBitmap()->w*3+3)&-4)*filters.LastBitmap()->h)
			return VDSRVERR_TOOBIG;

		sample = mVideoFrameMap[original_frame].mDisplayFrame;

		if (sample < 0)
			return VDSRVERR_FAILED;

		vSrc->streamSetDesiredFrame(sample);

		VDPosition targetSample = vSrc->displayToStreamOrder(sample);
		VDPosition frame = vSrc->streamGetNextRequiredFrame(is_preroll);

		if (frame >= 0) {
			do {
				uint32 lSize;
				int hr;

	//			_RPT1(0,"feeding frame %ld\n", frame);

				hr = vSrc->read(frame, 1, NULL, 0x7FFFFFFF, &lSize, NULL);
				if (hr)
					return VDSRVERR_FAILED;

				uint32 bufSize = (lSize + 65535 + vSrc->streamGetDecodePadding()) & ~65535;
				if (mInputBuffer.size() < bufSize)
					mInputBuffer.resize(bufSize);

				hr = vSrc->read(frame, 1, mInputBuffer.data(), lSize, &lSize, NULL); 
				if (hr)
					return VDSRVERR_FAILED;

				vSrc->streamFillDecodePadding(mInputBuffer.data(), lSize);
				ptr = vSrc->streamGetFrame(mInputBuffer.data(), lSize, is_preroll, frame, targetSample);
			} while(-1 != (frame = vSrc->streamGetNextRequiredFrame(is_preroll)));

		} else
			ptr = vSrc->streamGetFrame(NULL, 0, FALSE, targetSample, targetSample);

		VDPixmap pxdst(VDPixmapFromLayout(mFrameLayout, fs->arena));

		if (!g_listFA.IsEmpty()) {
			VDPixmapBlt(VDAsPixmap(*filters.InputBitmap()), vSrc->getTargetFormat());

			fsi.lCurrentFrame				= original_frame;
			fsi.lCurrentSourceFrame			= sample;
			fsi.lSourceFrameMS				= MulDiv(fsi.lCurrentSourceFrame, fsi.lMicrosecsPerSrcFrame, 1000);
			fsi.lDestFrameMS				= MulDiv(fsi.lCurrentFrame, fsi.lMicrosecsPerFrame, 1000);

			filters.RunFilters(fsi);

			VDPixmapBlt(pxdst, VDAsPixmap(*filters.LastBitmap()));
		} else
			VDPixmapBlt(pxdst, vSrc->getTargetFormat());

	} catch(const MyError&) {
		return VDSRVERR_FAILED;
	}

	return VDSRVERR_OK;
}