Esempio n. 1
0
QVideoDecoder::QVideoDecoder(QVideo * video, QObject * parent) : QThread(parent),
	m_start_timestamp(0),
	m_initial_decode(true),
	m_previous_pts(0.0),
	m_killed(false),
	m_video_clock(0)
{
	m_video = video;

	m_time_base_rational.num = 1;
	m_time_base_rational.den = AV_TIME_BASE;

	m_sws_context = NULL;
// 	m_frame = NULL;

	m_video_buffer = new QVideoBuffer(this);
	connect(m_video_buffer, SIGNAL(nowEmpty()), this, SLOT(decode()));
	//connect(this, SIGNAL(decodeMe()), this, SLOT(decode()));
	connect(this, SIGNAL(newFrame(QFFMpegVideoFrame)), this, SLOT(addFrame(QFFMpegVideoFrame)));
	//connect(this, SIGNAL(newFrame(QFFMpegVideoFrame)), this, SLOT(setCurrentFrame(QFFMpegVideoFrame)));
}
Esempio n. 2
0
QSharedPointer<QmlVlcI420Frame> cloneFrame( const QSharedPointer<QmlVlcI420Frame>& from )
{
    QSharedPointer<QmlVlcI420Frame> newFrame( new QmlVlcI420Frame );

    newFrame->frameBuf.resize( from->frameBuf.size() );

    newFrame->width = from->width;
    newFrame->height = from->height;

    char* fb = newFrame->frameBuf.data();

    newFrame->yPlane = fb;
    newFrame->yPlaneSize = from->yPlaneSize;

    newFrame->uPlane = fb + newFrame->yPlaneSize;
    newFrame->uPlaneSize = from->uPlaneSize;

    newFrame->vPlane = fb + newFrame->yPlaneSize + newFrame->uPlaneSize;
    newFrame->vPlaneSize = from->vPlaneSize;

    return newFrame;
}
Esempio n. 3
0
			void Base::insert( FrameList::iterator at, double offset, double shift, Iterator::Base & otherFrames )
			{
				FrameList toInsert;
				try
				{
					// clone and shift by offset time
					for(;;)
					{
						auto_ptr< Frame::Base > newFrame( otherFrames.next().getClone() );
						newFrame->addTime( offset );
						toInsert.push_back( newFrame );
					}
				}
				catch( KGD::Exception::OutOfBounds )
				{
					// insert and adjust internal state
					Log::debug( "%s: media append: append %lu new frames", getLogName(), toInsert.size() );
					_frame.list.insert( at, toInsert.begin(), toInsert.end() );
					_frame.count += toInsert.size();
					_duration += otherFrames.duration();
					_frame.timeShift += shift;
				}
			}
Esempio n. 4
0
void TearScreenCapture::capture()
{
    //Remember to delete the frame!
    QImage vp = target->grabWindow(0,0,0,1920,1200).toImage();

    QByteArray frame_f;

//    QDataStream ser(&frame_f,QIODevice::WriteOnly);
//    ser << vp;
    QBuffer buff;
    buff.setBuffer(&frame_f);
    buff.open(QIODevice::WriteOnly);
    vp.save(&buff,"JPEG");

    QByteArray *frame = new QByteArray(qCompress(frame_f,9));

    //When the timer is stopped, some frames are redundant.
    //We delete these when there are no receivers for the newFrame() signal
    //In early releases, this was a blatant memory leak as packets
    // are only disposed of *if they are sent* normally.
    if( receivers(SIGNAL(newFrame(QByteArray*))) > 0)
        newFrame(frame);
    else
        delete frame;
int main(int argc, char ** argv) {
   pthread_t t;
   ThreadValues threadValues;
   int ret;
   int try;
   char * string;
   long startTime;

   /* acquiring the semaphore */
   while(!access(SEMAPHORE, F_OK)) {
      usleep(50000);
   }

   FILE *fp = fopen(SEMAPHORE, "ab+");



   /* We open the pi serial port */
   threadValues.p = SerialLib_open("/dev/ttyAMA0"); /* The serial port of the raspberry pi */

   if(threadValues.p<0) {  
      fclose(fp);
      unlink(SEMAPHORE);
      return -1; 
   }

   threadValues.frame = newFrame(0x01,0,NULL); 
   threadValues.status=1;

   /* Creation of the thread for the communication */
   ret = pthread_create(&t, NULL,fn_process, &threadValues); 
   startTime = time(NULL);
   if(!ret) {
      do {
         if(0 == threadValues.status) {
            /* We extract the most relevent part of the string */
            unlink(SEMAPHORE);
            int lenght = threadValues.frame->lenght * 8;
            unsigned char * extraction = extractTrueFrame((unsigned char *)threadValues.frame->content, (int * )&lenght);

            if(NULL == extraction || 1 == lenght) {
               freeFrame(threadValues.frame);
               close(threadValues.p);
               fclose(fp);
               return -1;
            }

            string = hexToString(extraction,lenght);
            free(extraction);
            printf("%s",string);
            free(string);
            freeFrame(threadValues.frame);
            close(threadValues.p);
            fclose(fp);
            return 0;
         }while((time(NULL) - startTime) < 50);
      }
      pthread_cancel(t);
      freeFrame(threadValues.frame);
      close(threadValues.p);
      fclose(fp);
      unlink(SEMAPHORE);
      return -1;
   } 
   freeFrame(threadValues.frame);
   close(threadValues.p);
   fclose(fp);
   unlink(SEMAPHORE);
   return -1;
}
Esempio n. 6
0
void QVideoDecoder::decodeVideoFrame()
{
	//get next video frame from global queue
	//decode it and add to video buffer
	//freepacket
	//global_video_pkt_pts = packet.pts;

	int frame_finished = 0;
	long double pts;

	while(!frame_finished && !m_killed)
	{
		AVPacket packet;
		int num_packets = g_packet_queue.video_packets.count();
		if(num_packets > 0)
			packet = g_packet_queue.video_packets.dequeue();
		else
			return;

		avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet.data, packet.size);

		if(packet.dts == AV_NOPTS_VALUE && m_av_frame->opaque && *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)
		{
			pts = *(uint64_t *)m_av_frame->opaque;
		}
		else if(packet.dts != AV_NOPTS_VALUE)
		{
			pts = packet.dts;
		}
		else
		{
			pts = 0;
		}

		pts *= av_q2d(m_timebase);

		// Did we get a video frame?
		if(frame_finished)
		{
			// Convert the image from its native format to RGB, then copy the image data to a QImage
			if(m_sws_context == NULL)
			{
				m_sws_context = sws_getContext(m_video_codec_context->width, m_video_codec_context->height,
					m_video_codec_context->pix_fmt, m_video_codec_context->width, m_video_codec_context->height,
					PIX_FMT_RGB32, SWS_PRINT_INFO, NULL, NULL, NULL);
				//printf("decodeVideoFrame(): created m_sws_context\n");
			}
			printf("decodeVideoFrame(): got frame\n");

			sws_scale(m_sws_context, m_av_frame->data, m_av_frame->linesize, 0,
				m_video_codec_context->height, m_av_rgb_frame->data, m_av_rgb_frame->linesize);

			size_t num_bytes = m_av_rgb_frame->linesize[0] * m_video_codec_context->height;

			QImage * frame = new QImage(m_video_codec_context->width, m_video_codec_context->height, QImage::Format_RGB32);

			memcpy(frame->bits(), m_av_rgb_frame->data[0], num_bytes);

			av_free_packet(&packet);

			QFFMpegVideoFrame video_frame;
			video_frame.frame = frame;
			video_frame.pts = pts;
			video_frame.previous_pts = m_previous_pts;

			emit newFrame(video_frame);

			m_previous_pts = pts;
		}
	}
}
void DataController::registerCamera(GenericCameraInterface *camera)
{

    cameraList.append(camera);
    numCameras++;

    if (numCameras == 1) {
        connect(camera, SIGNAL(newFrame(QVideoFrame)), videoWidget, SLOT(newFrame(QVideoFrame)));
        connect(camera, SIGNAL(newFrame(QVideoFrame)), videoWriter, SLOT(newFrame(QVideoFrame)));
    } else if (numCameras == 2) {
        qDebug() << "Deregistering";
        disconnect(cameraList.at(0), SIGNAL(newFrame(QVideoFrame)), videoWidget, SLOT(newFrame(QVideoFrame)));
        disconnect(cameraList.at(0), SIGNAL(newFrame(QVideoFrame)), videoWriter, SLOT(newFrame(QVideoFrame)));
        qDebug() << "Re-registering";
        connect(this, SIGNAL(newFrame(QVideoFrame)), videoWidget, SLOT(newFrame(QVideoFrame)));
        connect(this, SIGNAL(newFrame(QVideoFrame)), videoWriter, SLOT(newFrame(QVideoFrame)));
        connect(cameraList.at(0), SIGNAL(newFrame(QVideoFrame)), this, SLOT(newLeftFrame(QVideoFrame)));
        connect(cameraList.at(1), SIGNAL(newFrame(QVideoFrame)), this, SLOT(newRightFrame(QVideoFrame)));
    }

    qDebug() << "Camera registered " << camera->cameraName << " #" << numCameras;
}
Esempio n. 8
0
void MainUI::timerEvent(QTimerEvent *)
{
    updateGL();
    emit newFrame();
}
Esempio n. 9
0
void FrameGrabber::grab()
{
    incFrames();

    if (fakeCam) {
        ++t;
        const int MAX_X = 400;
        const int MAX_Y = 300;
        std::vector<char> frame(MAX_X * MAX_Y * 3);
        for (int y = 0; y < MAX_Y; ++y) {
            for (int x = 0; x < MAX_X; ++x) {
                int r = y > (MAX_Y / 2) ? y - MAX_Y / 2 : MAX_Y / 2 - y;
                int g = x > (MAX_X / 2) ? x - MAX_X / 2 : MAX_X / 2 - x;
                int b = 0;

                r = 255.0 * r * (1.0 / (MAX_Y / 2));
                g = 255.0 * g * (1.0 / (MAX_X / 2));
                b = 0;

                if (t < 2 || t > 5) {
                    r = 255;
                    g = 255;
                    b = 255;
                }

                frame[(y * MAX_X + x) * 3 + 0] = r;
                frame[(y * MAX_X + x) * 3 + 1] = g;
                frame[(y * MAX_X + x) * 3 + 2] = b;
            }
        }

        emit newFrame(&frame[0], MAX_X, MAX_Y);
        return;
    }

    IplImage *frame = cvQueryFrame((CvCapture*)capture);
    if (!frame)
        throw std::runtime_error("could not capture frame");

    if (frame->depth != 8)
        throw std::runtime_error("unexpected color depth");

    if (frame->nChannels != 3)
        throw std::runtime_error("unexpected number of channels");

    if (simParamsHost.debug)
        std::cerr << "Capture:\n"
                  << "  nChannels: " << frame->nChannels << "\n"
                  << "  depth: " << frame->depth << "\n"
                  << "  IPL_DEPTH_8U:  " << IPL_DEPTH_8U  << "\n"
                  << "  IPL_DEPTH_8S:  " << IPL_DEPTH_8S << "\n"
                  << "  IPL_DEPTH_16U: " << IPL_DEPTH_16U  << "\n"
                  << "  IPL_DEPTH_16S: " << IPL_DEPTH_16S << "\n"
                  << "  IPL_DEPTH_32S: " << IPL_DEPTH_32S << "\n"
                  << "  IPL_DEPTH_32F: " << IPL_DEPTH_32F  << "\n"
                  << "  IPL_DEPTH_64F: " << IPL_DEPTH_64F << "\n"
                  << "  dataOrder: " << frame->dataOrder << "\n"
                  << "  width: " << frame->width << "\n"
                  << "  height: " << frame->height << "\n"
                  << "  imageSize: " << frame->imageSize << "\n"
                  << "  widthStep: " << frame->widthStep << "\n\n";

    emit newFrame(frame->imageData, frame->width, frame->height);
}
Esempio n. 10
0
Gui::Gui() : 
  io( IOManager::getInstance() ),
   gdata( Gamedata::getInstance() ),
//  parser( "xmlSpec/menu.xml" ),
  buttons(), 
  clicks(),
  nextIcon(0),
  click(0) 
{
  float x = gdata->getXmlInt("newX");
  float y = gdata->getXmlInt("newY");

  string filename = gdata->getXmlStr("newFile");
  SDL_Surface* surfaceNew(io->loadAndSet(filename, true));
  Frame newFrame(surfaceNew, 
            gdata->getXmlInt("newWidth"), 
            gdata->getXmlInt("newHeight"), 
            gdata->getXmlInt("pterSrcX"), 
            gdata->getXmlInt("pterSrcX")); 
  buttons.push_back( Button("New", newFrame, Vector2f(x, y)) );

  x = gdata->getXmlInt("openX");
  y = gdata->getXmlInt("openY");
  filename = gdata->getXmlStr("openFile");
  SDL_Surface* surfaceOpen(io->loadAndSet(filename, true));
  Frame openFrame(surfaceOpen, 
            gdata->getXmlInt("openWidth"), 
            gdata->getXmlInt("openHeight"), 
            gdata->getXmlInt("openSrcX"), 
            gdata->getXmlInt("openSrcY")); 
  //buttons.push_back( Button("Open", openFrame, Vector2f(x, y)) );
 
 
  
  x = gdata->getXmlInt("helpX"); 
  y = gdata->getXmlInt("helpY"); 
  filename = gdata->getXmlStr("helpFile");
  SDL_Surface* surfaceHelp(io->loadAndSet(filename, true));
  Frame helpFrame(surfaceHelp, 
            gdata->getXmlInt("helpWidth"), 
            gdata->getXmlInt("helpHeight"), 
            gdata->getXmlInt("helpSrcX"), 
            gdata->getXmlInt("helpSrcY")); 
  buttons.push_back( Button("Help", helpFrame, Vector2f(x, y)) );

  filename = gdata->getXmlStr("clickoffFile");
  SDL_Surface* surfaceOff(io->loadAndSet(filename, true));
  Frame clickoff(surfaceOff, 
              gdata->getXmlInt("clickoffWidth"), 
              gdata->getXmlInt("clickoffHeight"), 
              gdata->getXmlInt("clickoffSrcX"), 
              gdata->getXmlInt("clickoffSrcX")); 
  clicks.push_back( clickoff );

  filename = gdata->getXmlStr("clickonFile");
  SDL_Surface* surfaceOn(io->loadAndSet(filename, true));
  Frame clickon(surfaceOn, 
              gdata->getXmlInt("clickonWidth"), 
              gdata->getXmlInt("clickonHeight"), 
              gdata->getXmlInt("clickonSrcX"), 
              gdata->getXmlInt("clickonSrcX")); 
  clicks.push_back( clickon );
}
void DeviceAcquisitionDemo::run()
{
    int step = 10;

    int minX = 200;
    int minY = 200;
    int maxX = 500;
    int maxY = 500;

    mTouchX = minX;
    mTouchY = minY;

    //mIsTouchPressed = true;
    emit touchPress(mTouchX, mTouchY);

    while(1){

        switch (mMode) {
            case 0:
                if (mTouchX < maxX)
                {
                    mTouchX += step;
                }else{
                    mMode = 1;
                }
                break;

             case 1:
                 if (mTouchY < maxY){
                     mTouchY += step;
                 }else{
                     mMode = 2;
                 }
                 break;

            case 2:
                if (mTouchX > minX)
                {
                    mTouchX -= step;
                }else{
                    mMode = 3;
                }
                break;

            case 3:
                if (mTouchY > minY){
                    mTouchY -= step;
                }else{
                    mMode = 0;
                }
                break;
        }


        objects[0].x = mTouchX;
        objects[0].y = mTouchY;

        objects[1].x = (mTouchX + 300) / 4.0f;
        objects[1].y = (mTouchY) / 3.33f;

        objects[2].x = (mTouchX + 600) / 4.0f;
        objects[2].y = (mTouchY) / 3.33f;

        emit touchMove(mTouchX, mTouchY);
        emit newFrame(&objects);

        msleep(10);
    }

}
Esempio n. 12
0
// return not null transform if odometry is correctly computed
Transform OdometryF2F::computeTransform(
		SensorData & data,
		const Transform & guess,
		OdometryInfo * info)
{
	UTimer timer;
	Transform output;
	if(!data.rightRaw().empty() && !data.stereoCameraModel().isValidForProjection())
	{
		UERROR("Calibrated stereo camera required");
		return output;
	}
	if(!data.depthRaw().empty() &&
		(data.cameraModels().size() != 1 || !data.cameraModels()[0].isValidForProjection()))
	{
		UERROR("Calibrated camera required (multi-cameras not supported).");
		return output;
	}

	RegistrationInfo regInfo;

	UASSERT(!this->getPose().isNull());
	if(lastKeyFramePose_.isNull())
	{
		lastKeyFramePose_ = this->getPose(); // reset to current pose
	}
	Transform motionSinceLastKeyFrame = lastKeyFramePose_.inverse()*this->getPose();

	Signature newFrame(data);
	if(refFrame_.sensorData().isValid())
	{
		Signature tmpRefFrame = refFrame_;
		output = registrationPipeline_->computeTransformationMod(
				tmpRefFrame,
				newFrame,
				!guess.isNull()?motionSinceLastKeyFrame*guess:Transform(),
				&regInfo);

		if(info && this->isInfoDataFilled())
		{
			std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > > pairs;
			EpipolarGeometry::findPairsUnique(tmpRefFrame.getWords(), newFrame.getWords(), pairs);
			info->refCorners.resize(pairs.size());
			info->newCorners.resize(pairs.size());
			std::map<int, int> idToIndex;
			int i=0;
			for(std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > >::iterator iter=pairs.begin();
				iter!=pairs.end();
				++iter)
			{
				info->refCorners[i] = iter->second.first.pt;
				info->newCorners[i] = iter->second.second.pt;
				idToIndex.insert(std::make_pair(iter->first, i));
				++i;
			}
			info->cornerInliers.resize(regInfo.inliersIDs.size(), 1);
			i=0;
			for(; i<(int)regInfo.inliersIDs.size(); ++i)
			{
				info->cornerInliers[i] = idToIndex.at(regInfo.inliersIDs[i]);
			}

			Transform t = this->getPose()*motionSinceLastKeyFrame.inverse();
			for(std::multimap<int, cv::Point3f>::const_iterator iter=tmpRefFrame.getWords3().begin(); iter!=tmpRefFrame.getWords3().end(); ++iter)
			{
				info->localMap.insert(std::make_pair(iter->first, util3d::transformPoint(iter->second, t)));
			}
			info->words = newFrame.getWords();
		}
	}
	else
	{
		//return Identity
		output = Transform::getIdentity();
		// a very high variance tells that the new pose is not linked with the previous one
		regInfo.variance = 9999;
	}

	if(!output.isNull())
	{
		output = motionSinceLastKeyFrame.inverse() * output;

		// new key-frame?
		if( (registrationPipeline_->isImageRequired() && (keyFrameThr_ == 0 || float(regInfo.inliers) <= keyFrameThr_*float(refFrame_.sensorData().keypoints().size()))) ||
			(registrationPipeline_->isScanRequired() && (scanKeyFrameThr_ == 0 || regInfo.icpInliersRatio <= scanKeyFrameThr_)))
		{
			UDEBUG("Update key frame");
			int features = newFrame.getWordsDescriptors().size();
			if(features == 0)
			{
				newFrame = Signature(data);
				// this will generate features only for the first frame or if optical flow was used (no 3d words)
				Signature dummy;
				registrationPipeline_->computeTransformationMod(
						newFrame,
						dummy);
				features = (int)newFrame.sensorData().keypoints().size();
			}

			if((features >= registrationPipeline_->getMinVisualCorrespondences()) &&
			   (registrationPipeline_->getMinGeometryCorrespondencesRatio()==0.0f ||
					   (newFrame.sensorData().laserScanRaw().cols &&
					   (newFrame.sensorData().laserScanMaxPts() == 0 || float(newFrame.sensorData().laserScanRaw().cols)/float(newFrame.sensorData().laserScanMaxPts())>=registrationPipeline_->getMinGeometryCorrespondencesRatio()))))
			{
				refFrame_ = newFrame;

				refFrame_.setWords(std::multimap<int, cv::KeyPoint>());
				refFrame_.setWords3(std::multimap<int, cv::Point3f>());
				refFrame_.setWordsDescriptors(std::multimap<int, cv::Mat>());

				//reset motion
				lastKeyFramePose_.setNull();
			}
			else
			{
				if (!refFrame_.sensorData().isValid())
				{
					// Don't send odometry if we don't have a keyframe yet
					output.setNull();
				}

				if(features < registrationPipeline_->getMinVisualCorrespondences())
				{
					UWARN("Too low 2D features (%d), keeping last key frame...", features);
				}

				if(registrationPipeline_->getMinGeometryCorrespondencesRatio()>0.0f && newFrame.sensorData().laserScanRaw().cols==0)
				{
					UWARN("Too low scan points (%d), keeping last key frame...", newFrame.sensorData().laserScanRaw().cols);
				}
				else if(registrationPipeline_->getMinGeometryCorrespondencesRatio()>0.0f && newFrame.sensorData().laserScanMaxPts() != 0 && float(newFrame.sensorData().laserScanRaw().cols)/float(newFrame.sensorData().laserScanMaxPts())<registrationPipeline_->getMinGeometryCorrespondencesRatio())
				{
					UWARN("Too low scan points ratio (%d < %d), keeping last key frame...", float(newFrame.sensorData().laserScanRaw().cols)/float(newFrame.sensorData().laserScanMaxPts()), registrationPipeline_->getMinGeometryCorrespondencesRatio());
				}
			}
		}
	}
	else if(!regInfo.rejectedMsg.empty())
	{
		UWARN("Registration failed: \"%s\"", regInfo.rejectedMsg.c_str());
	}

	data.setFeatures(newFrame.sensorData().keypoints(), newFrame.sensorData().descriptors());

	if(info)
	{
		info->type = 1;
		info->variance = regInfo.variance;
		info->inliers = regInfo.inliers;
		info->icpInliersRatio = regInfo.icpInliersRatio;
		info->matches = regInfo.matches;
		info->features = newFrame.sensorData().keypoints().size();
	}

	UINFO("Odom update time = %fs lost=%s inliers=%d, ref frame corners=%d, transform accepted=%s",
			timer.elapsed(),
			output.isNull()?"true":"false",
			(int)regInfo.inliers,
			(int)newFrame.sensorData().keypoints().size(),
			!output.isNull()?"true":"false");

	return output;
}
Esempio n. 13
0
void ProcessingThread::run()
{
    while(1)
    {
        /////////////////////////////////
        // Stop thread if stopped=TRUE //
        /////////////////////////////////
        stoppedMutex.lock();
        if (stopped)
        {
            stopped=false;
            stoppedMutex.unlock();
            break;
        }
        stoppedMutex.unlock();
        /////////////////////////////////
        /////////////////////////////////

        // Save processing time
        processingTime=t.elapsed();
        // Start timer (used to calculate processing rate)
        t.start();

        // Get frame from queue, store in currentFrame, set ROI
        currentFrame=Mat(imageBuffer->getFrame(),currentROI);

        updateMembersMutex.lock();
        ///////////////////
        // PERFORM TASKS //
        ///////////////////
        // Note: ROI changes will take effect on next frame
        if(resetROIFlag)
            resetROI();
        else if(setROIFlag)
            setROI();
        ////////////////////////////////////
        // PERFORM IMAGE PROCESSING BELOW //
        ////////////////////////////////////
        // Grayscale conversion
        if(grayscaleOn)
            cvtColor(currentFrame,currentFrameGrayscale,CV_BGR2GRAY);
        // Smooth (in-place operations)
        if(smoothOn)
        {
            if(grayscaleOn)
            {
                switch(smoothType)
                {
                    // BLUR
                    case 0:
                        blur(currentFrameGrayscale,currentFrameGrayscale,Size(smoothParam1,smoothParam2));
                        break;
                    // GAUSSIAN
                    case 1:
                        GaussianBlur(currentFrameGrayscale,currentFrameGrayscale,Size(smoothParam1,smoothParam2),smoothParam3,smoothParam4);
                        break;
                    // MEDIAN
                    case 2:
                        medianBlur(currentFrameGrayscale,currentFrameGrayscale,smoothParam1);
                        break;
                }
            }
            else
            {
                switch(smoothType)
                {
                    // BLUR
                    case 0:
                        blur(currentFrame,currentFrame,Size(smoothParam1,smoothParam2));
                        break;
                    // GAUSSIAN
                    case 1:
                        GaussianBlur(currentFrame,currentFrame,Size(smoothParam1,smoothParam2),smoothParam3,smoothParam4);
                        break;
                    // MEDIAN
                    case 2:
                        medianBlur(currentFrame,currentFrame,smoothParam1);
                        break;
                }
            }
        }
        // Dilate
        if(dilateOn)
        {
            if(grayscaleOn)
                dilate(currentFrameGrayscale,currentFrameGrayscale,Mat(),Point(-1,-1),dilateNumberOfIterations);
            else
                dilate(currentFrame,currentFrame,Mat(),Point(-1,-1),dilateNumberOfIterations);
        }
        // Erode
        if(erodeOn)
        {
            if(grayscaleOn)
                erode(currentFrameGrayscale,currentFrameGrayscale,Mat(),Point(-1,-1),erodeNumberOfIterations);
            else
                erode(currentFrame,currentFrame,Mat(),Point(-1,-1),erodeNumberOfIterations);
        }
        // Flip
        if(flipOn)
        {
            if(grayscaleOn)
                flip(currentFrameGrayscale,currentFrameGrayscale,flipCode);
            else
                flip(currentFrame,currentFrame,flipCode);
        }
        // Canny edge detection
        if(cannyOn)
        {
            // Frame must be converted to grayscale first if grayscale conversion is OFF
            if(!grayscaleOn)
                cvtColor(currentFrame,currentFrameGrayscale,CV_BGR2GRAY);

            Canny(currentFrameGrayscale,currentFrameGrayscale,
                  cannyThreshold1,cannyThreshold2,
                  cannyApertureSize,cannyL2gradient);
        }
        ////////////////////////////////////
        // PERFORM IMAGE PROCESSING ABOVE //
        ////////////////////////////////////

        // Convert Mat to QImage: Show grayscale frame [if either Grayscale or Canny processing modes are ON]
        if(grayscaleOn||cannyOn)
            frame=MatToQImage(currentFrameGrayscale);
        // Convert Mat to QImage: Show BGR frame
        else
            frame=MatToQImage(currentFrame);
        updateMembersMutex.unlock();

        // Update statistics
        updateFPS(processingTime);
        currentSizeOfBuffer=imageBuffer->getSizeOfImageBuffer();
        // Inform GUI thread of new frame (QImage)
        emit newFrame(frame);
    }
    qDebug() << "Stopping processing thread...";
}
Esempio n. 14
0
std::unique_ptr<Map> SceneGenerator::generateFromPoses(const std::vector<Eigen::Matrix3fr> &posesR, const std::vector<Eigen::Vector3f> &posesCenter)
{
	std::unique_ptr<Map> newMap(new Map());

	newMap->mGroundTruthCamera.reset(new CameraModel(*mCamera));

	cv::Mat3b nullImg3(mCamera->getImageSize()[1], mCamera->getImageSize()[0]);
	cv::Mat1b nullImg1(mCamera->getImageSize()[1], mCamera->getImageSize()[0]);
	Eigen::Matrix<uchar, 1, 32> nullDescr;
	nullDescr.setZero();

	float expectedPixelNoiseStd = std::max(0.3f, mNoiseStd);

	//Features
	const int kFeatureCount = 1000;
	std::uniform_real_distribution<float> distributionx(0, mCamera->getImageSize()[0]);
	std::uniform_real_distribution<float> distributiony(0, mCamera->getImageSize()[1]);
	for (int i = 0; i < kFeatureCount; i++)
	{
		std::unique_ptr<Feature> newFeature(new Feature());

		//Get position
		Eigen::Vector2f imagePos(distributionx(mRandomDevice), distributiony(mRandomDevice));
		Eigen::Vector2f xn = mCamera->unprojectToWorld(imagePos).hnormalized();

		newFeature->mGroundTruthPosition3D = Eigen::Vector3f(xn[0], xn[1], 0);
		newFeature->mPosition3D = newFeature->mGroundTruthPosition3D;
		//newFeature->setPosition(imagePos);

		newMap->addFeature(std::move(newFeature));
	}

	//Matlab log of poses
	if (mLogScene)
	{
		//Eigen::MatrixX3f points(newMap->getFeatures().size(), 3);
		//for (int i = 0; i < points.rows(); i++)
		//{
		//	points.row(i) = newMap->getFeatures()[i]->mPosition3D.transpose();
		//}
		//MatlabDataLog::Instance().AddValue("K", mCamera->getK());
		//MatlabDataLog::Instance().AddValue("X", points.transpose());
		//for (int i = 0; i < posesR.size(); i++)
		//{
		//	MatlabDataLog::Instance().AddCell("R", posesR[i]);
		//	MatlabDataLog::Instance().AddCell("center", posesCenter[i]);
		//}
		//return std::unique_ptr<Map>();
	}

	//Frames
	std::normal_distribution<float> error_distribution(0, mNoiseStd);
	for (int i = 0; i < (int)posesR.size(); i++)
	{
		std::unique_ptr<Keyframe> newFrame(new Keyframe());

		newFrame->init(nullImg3, nullImg1);
		
		Eigen::Vector3f poseT = -posesR[i] * posesCenter[i];

		newFrame->mGroundTruthPose3DR = posesR[i];
		newFrame->mGroundTruthPose3DT = poseT;

		//Stats
		std::vector<float> distortionError;
		std::vector<float> noiseError;

		//Points
		std::vector<Eigen::Vector2f> refPoints, imgPoints;
		for (int j = 0; j< (int)newMap->getFeatures().size(); j++)
		{
			auto &feature = *newMap->getFeatures()[j];

			//Project
			Eigen::Vector3f xn = posesR[i] * feature.mPosition3D + poseT;
			Eigen::Vector2f imagePosClean = mCamera->projectFromWorld(xn);
			Eigen::Vector2f noise(error_distribution(mRandomDevice), error_distribution(mRandomDevice));
			Eigen::Vector2f imagePos = imagePosClean + noise;

			//Position
			if (i == 0)
				feature.setPosition(imagePos);

			//Skip if outside of image
			if (imagePos[0] < 0 || imagePos[1] < 0 || imagePos[0] > mCamera->getImageSize()[0] || imagePos[1] > mCamera->getImageSize()[1])
				continue;

			//Save distortion and noise errors
			if (mVerbose)
			{
				//Eigen::Vector2f imagePosNoDistortion = mCamera->projectFromDistorted(xn.hnormalized());
				//distortionError.push_back((imagePosClean - imagePosNoDistortion).norm());
				//noiseError.push_back(noise.norm());
			}

			//Measurement
			std::unique_ptr<FeatureMeasurement> m(new FeatureMeasurement(&feature, newFrame.get(), imagePos, 0, nullDescr.data()));
			newFrame->getMeasurements().push_back(m.get());
			feature.getMeasurements().push_back(std::move(m));

			//Save match
			refPoints.push_back(feature.getPosition());
			imgPoints.push_back(imagePos);
		}

		//Write stats
		if (mVerbose)
		{
			MYAPP_LOG << "Frame " << i << "\n";
			if (newFrame->getMeasurements().empty())
				MYAPP_LOG << "AAAAHHHH NO MEASUREMENTS!\n";
			Eigen::Map<Eigen::ArrayXf> _distortionError(distortionError.data(), distortionError.size());
			Eigen::Map<Eigen::ArrayXf> _noiseError(noiseError.data(), noiseError.size());
			MYAPP_LOG << "  Measurement count: " << newFrame->getMeasurements().size() << "\n";
			MYAPP_LOG << "  Max distortion error: " << _distortionError.maxCoeff() << "\n";
			MYAPP_LOG << "  Max noise error: " << _noiseError.maxCoeff() << "\n";
		}


		//Get homography
		Eigen::Matrix<uchar, Eigen::Dynamic, 1> mask(refPoints.size());
		cv::Mat1b mask_cv(refPoints.size(), 1, mask.data());

		HomographyRansac ransac;
		ransac.setParams(3 * expectedPixelNoiseStd, 10, 100, (int)(0.9f * newFrame->getMeasurements().size()));
		ransac.setData(newFrame->getMeasurements());
		ransac.doRansac();

		Eigen::Matrix3fr H = ransac.getBestModel().cast<float>();

		//Refine
		HomographyEstimation hest;
		std::vector<bool> inliersVec;
		std::vector<float> scales(imgPoints.size(), 1);
		H = hest.estimateCeres(H, imgPoints, refPoints, scales, 3 * expectedPixelNoiseStd, inliersVec);

		//Set final
		newFrame->setPose(H);

		//Add
		newMap->addKeyframe(std::move(newFrame));
	}

	return std::move(newMap);
}
Esempio n. 15
0
void Composer::run()
{
	int ret;
	Frame *f = NULL;
	ItcMsg lastMsg( ItcMsg::RENDERSTOP );

	hiddenContext->makeCurrent();

	while ( running ) {
		itcMutex.lock();
		if ( !itcMsgList.isEmpty() )
			lastMsg = itcMsgList.takeFirst();
		itcMutex.unlock();
		
		switch ( lastMsg.msgType ) {
			case ItcMsg::RENDERPLAY: {
				ret = process( &f );
				if ( ret == PROCESSEND ) {
					while ( !sampler->getMetronom()->videoFrames.queueEmpty() ) {
						usleep( 5000 );
					}
					lastMsg.msgType = ItcMsg::RENDERSTOP;
					break;
				}
				else if ( ret == PROCESSWAITINPUT )
					usleep( 1000 );
				break;
			}
			case ItcMsg::RENDERSETPLAYBACKBUFFER: {
				double pts = sampler->fromComposerSetPlaybackBuffer( lastMsg.backward );
				sampler->fromComposerSeekTo( pts, lastMsg.backward, false );
				sampler->getMetronom()->play( true, lastMsg.backward );
				playing = true;
				skipFrame = 0;
				audioSampleDelta = 0;
				lastMsg.msgType = ItcMsg::RENDERPLAY;
				break;
			}
			case ItcMsg::RENDERFRAMEBYFRAMESETPLAYBACKBUFFER: {
				double pts = sampler->fromComposerSetPlaybackBuffer( lastMsg.backward );
				sampler->fromComposerSeekTo( pts, lastMsg.backward, false );
				runOneShot( f );
				lastMsg.msgType = ItcMsg::RENDERSTOP;
				break;
			}
			case ItcMsg::RENDERFRAMEBYFRAME: {
				bool shown = false;
				Metronom *m = sampler->getMetronom();
				if ( !m->videoFrames.isEmpty() ) {
					do {
						f = m->videoFrames.dequeue();
						if ( f->type() == Frame::GLTEXTURE ) {
							emit newFrame( f );
							shown = true;
						}
						else
							sampler->fromComposerReleaseVideoFrame( f );
					} while ( !m->videoFrames.isEmpty() && !shown );
				}
				if ( !shown ) {
					runOneShot( f );
				}
				lastMsg.msgType = ItcMsg::RENDERSTOP;
				break;
			}
			case ItcMsg::RENDERSKIPBY: {
				f = sampler->getMetronom()->getLastFrame();
				if ( f ) {
					sampler->fromComposerSeekTo( f->pts() + (f->profile.getVideoFrameDuration() * lastMsg.step) );
					runOneShot( f );
				}
				lastMsg.msgType = ItcMsg::RENDERSTOP;
				break;
			}
			case ItcMsg::RENDERSEEK: {
				sampler->fromComposerSeekTo( lastMsg.pts, lastMsg.backward, lastMsg.seek );
				runOneShot( f );
				lastMsg.msgType = ItcMsg::RENDERSTOP;
				break;
			}
			case ItcMsg::RENDERUPDATE: {
				f = sampler->getMetronom()->getAndLockLastFrame();
				Frame *dst = sampler->getMetronom()->freeVideoFrames.dequeue();
				if ( f && dst ) {
					dst->sample = new ProjectSample();
					dst->sample->copyVideoSample( f->sample );
					dst->setPts( f->pts() );
					if ( sampler->fromComposerUpdateFrame( dst ) ) {
						sampler->getMetronom()->unlockLastFrame();
						movitRender( dst, true );
						if ( dst->fence() )
							glClientWaitSync( dst->fence()->fence(), 0, GL_TIMEOUT_IGNORED );
						dst->isDuplicate = true;
						emit newFrame( dst );
					}
					else {
						sampler->getMetronom()->unlockLastFrame();
						dst->release();
						sampler->fromComposerSeekTo( f->pts() );
						f = NULL;
						runOneShot( f );
					}
				}
				else {
					sampler->getMetronom()->unlockLastFrame();
					if ( dst )
						dst->release();
				}
				lastMsg.msgType = ItcMsg::RENDERSTOP;
				break;
			}
			default: {
				if ( playing ) {
					sampler->getMetronom()->play( false );
					playing = false;
					emit paused( true );
					skipFrame = 0;
					audioSampleDelta = 0;
				}
				usleep( 1000 );
			}
		}
	}

	hiddenContext->doneCurrent();
#if QT_VERSION >= 0x050000
	hiddenContext->context()->moveToThread( qApp->thread() );
#endif	
}
Esempio n. 16
0
AP_App::AP_App (HINSTANCE hInstance, const char * szAppName)
  : XAP_App_BaseClass ( hInstance, szAppName )
#else
AP_App::AP_App (const char * szAppName)
  : XAP_App_BaseClass ( szAppName )
#endif
{
}

AP_App::~AP_App ()
{
}

/*!
 *  Open windows requested on commandline.
 * 
 * \return False if an unknown command line option was used, true
 * otherwise.  
 */
bool AP_App::openCmdLineFiles(const AP_Args * args)
{
	int kWindowsOpened = 0;
	const char *file = NULL;

	if (AP_Args::m_sFiles == NULL) {
		// no files to open, this is ok
		XAP_Frame * pFrame = newFrame();
		pFrame->loadDocument((const char *)NULL, IEFT_Unknown);
		return true;
	}

	int i = 0;
	while ((file = AP_Args::m_sFiles[i++]) != NULL) {
		char * uri = NULL;

		uri = UT_go_shell_arg_to_uri (file);

		XAP_Frame * pFrame = newFrame();

		UT_Error error = pFrame->loadDocument (uri, IEFT_Unknown, true);
		g_free (uri);

		if (UT_IS_IE_SUCCESS(error))
		{
			kWindowsOpened++;
			if (error == UT_IE_TRY_RECOVER) {
				pFrame->showMessageBox(AP_STRING_ID_MSG_OpenRecovered,
                           XAP_Dialog_MessageBox::b_O,
                           XAP_Dialog_MessageBox::a_OK);
			}
		}
		else
		{
			// TODO we crash if we just delete this without putting something
			// TODO in it, so let's go ahead and open an untitled document
			// TODO for now.  this would cause us to get 2 untitled documents
			// TODO if the user gave us 2 bogus pathnames....

			// Because of the incremental loader, we should not crash anymore;
			// I've got other things to do now though.
			kWindowsOpened++;
			pFrame->loadDocument((const char *)NULL, IEFT_Unknown);
			pFrame->raise();

			errorMsgBadFile (pFrame, file, error);
		}

		if (args->m_sMerge) {
			PD_Document * pDoc = static_cast<PD_Document*>(pFrame->getCurrentDoc());
			pDoc->setMailMergeLink(args->m_sMerge);
		}
	}

	if (kWindowsOpened == 0)
	{
		// no documents specified or openable, open an untitled one
		
		XAP_Frame * pFrame = newFrame();
		pFrame->loadDocument((const char *)NULL, IEFT_Unknown);
		if (args->m_sMerge) {
			PD_Document * pDoc = static_cast<PD_Document*>(pFrame->getCurrentDoc());
			pDoc->setMailMergeLink(args->m_sMerge);
		}
	}

	return true;
}
Esempio n. 17
0
void DetectorObserver::display(IplImage *frame)
{
    if (frame)
        emit newFrame(frame);
}
void ProcessingThread::run()
{
    qDebug() << "Starting processing thread...";
    while(1)
    {
        ////////////////////////// /////// 
        // Stop thread if doStop=TRUE // 
        ////////////////////////// /////// 
        doStopMutex.lock();
        if(doStop)
        {
            doStop=false;
            doStopMutex.unlock();
            break;
        }
        doStopMutex.unlock();
        ////////////////////////// ////////
        ////////////////////////// ////////

        // Save processing time
        processingTime=t.elapsed();
        // Start timer (used to calculate processing rate)
        t.start();

        processingMutex.lock();
        // Get frame from queue, store in currentFrame, set ROI
        currentFrame=Mat(sharedImageBuffer->getByDeviceNumber(deviceNumber)->get().clone(), currentROI);

        ////////////////////////// ///////// // 
        // PERFORM IMAGE PROCESSING BELOW // 
        ////////////////////////// ///////// // 

        // Grayscale conversion (in-place operation)
       if(imgProcFlags.grayscaleOn && (currentFrame.channels() == 3 || currentFrame.channels() == 4)) {
           cvtColor(currentFrame, currentFrame, CV_BGR2GRAY, 1);
       }

       // Save the original Frame after grayscale conversion, so VideoWriter works correct
       if(emitOriginal || captureOriginal)
           originalFrame = currentFrame.clone();

       // Fill Buffer that is processed by Magnificator
       fillProcessingBuffer();

       if (processingBufferFilled()) {
           if(imgProcFlags.colorMagnifyOn)
           {
               magnificator.colorMagnify();
               currentFrame = magnificator.getFrameLast();
           }
           else if(imgProcFlags.laplaceMagnifyOn)

           {
               magnificator.laplaceMagnify();
               currentFrame = magnificator.getFrameLast();
           }
           else if(imgProcFlags.waveletMagnifyOn)
           {
               magnificator.waveletMagnify();
               currentFrame = magnificator.getFrameLast();
           }
           else
               processingBuffer.erase(processingBuffer.begin());
       }

        ////////////////////////// ///////// // 
        // PERFORM IMAGE PROCESSING ABOVE // 
        ////////////////////////// ///////// // 

        // Convert Mat to QImage
        frame=MatToQImage(currentFrame);

        processingMutex.unlock();

        // Save the Stream
        if(doRecord) { 
            if(output.isOpened()) { 
                if(captureOriginal) {

                    processingMutex.lock();
                    // Combine original and processed frame
                    combinedFrame = combineFrames(currentFrame,originalFrame);
                    processingMutex.unlock();

                    output.write(combinedFrame);
                }
                else {
                    output.write(currentFrame);
                }

                framesWritten++;
                emit frameWritten(framesWritten);
            }
        }

        // Emit the original image before converting to grayscale
       if(emitOriginal)
           emit origFrame(MatToQImage(originalFrame));
        // Inform GUI thread of new frame (QImage)
        // emit newFrame(frame);
        emit newFrame(MatToQImage(currentFrame));

        // Update statistics
        updateFPS(processingTime);
        statsData.nFramesProcessed++;
        // Inform GUI of updated statistics
        emit updateStatisticsInGUI(statsData);
    }
    qDebug() << "Stopping processing thread...";
}
Esempio n. 19
0
void QVideoDecoder::decode()
{
	if(m_video->m_status == QVideo::NotRunning)
		return;

	emit ready(false);

	AVPacket pkt1, *packet = &pkt1;
	double pts;

	int frame_finished = 0;
	while(!frame_finished && !m_killed)
	{
		if(av_read_frame(m_av_format_context, packet) >= 0)
		{
			// Is this a packet from the video stream?
			if(packet->stream_index == m_video_stream)
			{
				global_video_pkt_pts = packet->pts;

// 				mutex.lock();
				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet->data, packet->size);
// 				mutex.unlock();

				if(packet->dts == AV_NOPTS_VALUE &&
					      m_av_frame->opaque &&
				  *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)
				{
					pts = *(uint64_t *)m_av_frame->opaque;
				}
				else if(packet->dts != AV_NOPTS_VALUE)
				{
					pts = packet->dts;
				}
				else
				{
					pts = 0;
				}

				pts *= av_q2d(m_timebase);

				// Did we get a video frame?
				if(frame_finished)
				{
// 					size_t num_native_bytes = m_av_frame->linesize[0]     * m_video_codec_context->height;
// 					size_t num_rgb_bytes    = m_av_rgb_frame->linesize[0] * m_video_codec_context->height;

					// Convert the image from its native format to RGB, then copy the image data to a QImage
					if(m_sws_context == NULL)
					{
						mutex.lock();
						m_sws_context = sws_getContext(
							m_video_codec_context->width, m_video_codec_context->height,
							m_video_codec_context->pix_fmt,
							m_video_codec_context->width, m_video_codec_context->height,
							//PIX_FMT_RGB32,SWS_BICUBIC,
							RAW_PIX_FMT, SWS_FAST_BILINEAR,
							NULL, NULL, NULL); //SWS_PRINT_INFO
						mutex.unlock();
						//printf("decode(): created m_sws_context\n");
					}
					//printf("decode(): got frame\n");

// 					mutex.lock();
					sws_scale(m_sws_context,
						  m_av_frame->data,
						  m_av_frame->linesize, 0,
						  m_video_codec_context->height,
						  m_av_rgb_frame->data,
						  m_av_rgb_frame->linesize);
// 					mutex.unlock();

// 					size_t num_bytes = m_av_rgb_frame->linesize[0] * m_video_codec_context->height;

// 					if(m_frame)
// 						delete m_frame;

					m_frame = QImage(m_av_rgb_frame->data[0],
								m_video_codec_context->width,
								m_video_codec_context->height,
								QImage::Format_RGB16);

					av_free_packet(packet);

					// This block from the synchronize_video(VideoState *is, AVFrame *src_frame, double pts) : double
					// function given at: http://www.dranger.com/ffmpeg/tutorial05.html
					{
						// update the frame pts
						double frame_delay;

						if(pts != 0)
						{
							/* if we have pts, set video clock to it */
							m_video_clock = pts;
						} else {
							/* if we aren't given a pts, set it to the clock */
							pts = m_video_clock;
						}
						/* update the video clock */
						frame_delay = av_q2d(m_timebase);
						/* if we are repeating a frame, adjust clock accordingly */
						frame_delay += m_av_frame->repeat_pict * (frame_delay * 0.5);
						m_video_clock += frame_delay;
						//qDebug() << "Frame Dealy: "<<frame_delay;
					}


					QFFMpegVideoFrame video_frame;
					video_frame.frame = &m_frame;
					video_frame.pts = pts;
					video_frame.previous_pts = m_previous_pts;

					m_current_frame = video_frame;

					emit newFrame(video_frame);

					m_previous_pts = pts;

                                        //QTimer::singleShot(5, this, SLOT(decode()));
				}
			}
			else if(packet->stream_index == m_audio_stream)
			{
// 				mutex.lock();
				//decode audio packet, store in queue
				av_free_packet(packet);
// 				mutex.unlock();

			}
			else
			{
// 				mutex.lock();
				av_free_packet(packet);
// 				mutex.unlock();

			}
		}
		else
		{
			emit reachedEnd();
		}
	}
}
bool XAP_App::retrieveState()
{
	XAP_StateData sd;

	bool bRet = true;
	
	if(!_retrieveState(sd))
		return false;

	UT_return_val_if_fail(sd.iFileCount <= XAP_SD_MAX_FILES, false);
		
	// now do our thing with it:
	//  * open the files stored in the data
	//  * move carets and scrollbars to the saved positions
	//  * make the first saved frame to be the current frame

	// we should only be restoring state with no docs already
	// opened
	UT_return_val_if_fail(m_vecFrames.getItemCount() <= 1, false);
	XAP_Frame * pFrame = NULL;

	if(m_vecFrames.getItemCount())
		pFrame = m_vecFrames.getNthItem(0);

	// if there is a frame, it should be one with unmodified untitled document
	UT_return_val_if_fail( !pFrame || (!pFrame->getFilename() && !pFrame->isDirty()), false );
		
	UT_Error errorCode = UT_IE_IMPORTERROR;
	
	for(UT_uint32 i = 0; i < sd.iFileCount; ++i)
	{
		if(!pFrame)
			pFrame = newFrame();
		
		if (!pFrame)
			return false;
		
		// Open a complete but blank frame, then load the document into it
		errorCode = pFrame->loadDocument((const char *)NULL, 0 /*IEFT_Unknown*/);

		bRet &= (errorCode == UT_OK);
		
		if (errorCode == UT_OK)
			pFrame->show();
	    else
			continue;

		errorCode = pFrame->loadDocument(sd.filenames[i], 0 /*IEFT_Unknown*/);

		bRet &= (errorCode == UT_OK);
		
		if (errorCode != UT_OK)
			continue;

		pFrame->show();

		AV_View* pView = pFrame->getCurrentView();
		if(!pView)
		{
			UT_ASSERT_HARMLESS( UT_SHOULD_NOT_HAPPEN );
			bRet = false;
			continue;
		}
		
		pView->setPoint(sd.iDocPos[i]);
		pView->setXScrollOffset(sd.iXScroll[i]);
		pView->setYScrollOffset(sd.iYScroll[i]);

		// now we check if this doc was autosaved Untitled* doc at hibernation
		char * p = strstr(sd.filenames[i], HIBERNATED_EXT);
		if(p)
		{
			// remove extension
			p = 0;
			AD_Document * pDoc = pFrame->getCurrentDoc();

			if(pDoc)
			{
				pDoc->clearFilename();
				pDoc->forceDirty();
				pFrame->updateTitle();
			}
		}
		
		
		// frame used -- next doc needs a new one
		pFrame = NULL;
	}

	// set focus to the first frame
	pFrame = m_vecFrames.getNthItem(0);
	UT_return_val_if_fail( pFrame, false );

	AV_View* pView = pFrame->getCurrentView();
	UT_return_val_if_fail( pView, false );

	pView->focusChange(AV_FOCUS_HERE);

	return bRet;
}
Esempio n. 21
0
void QVideoDecoder::readFrame()
{
	emit ready(false);

	AVPacket packet;
	double pts;

	int frame_finished = 0;
	while(!frame_finished && !m_killed)
	{
		if(av_read_frame(m_av_format_context, &packet) >= 0)
		{
			// Is this a packet from the video stream?
			if(packet.stream_index == m_video_stream)
			{
				//global_video_pkt_pts = packet.pts;

				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet.data, packet.size);

				if(packet.dts == AV_NOPTS_VALUE && m_av_frame->opaque && *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)
				{
					pts = *(uint64_t *)m_av_frame->opaque;
				}
				else if(packet.dts != AV_NOPTS_VALUE)
				{
					pts = packet.dts;
				}
				else
				{
					pts = 0;
				}

				pts *= av_q2d(m_timebase);

				// Did we get a video frame?
				if(frame_finished)
				{
					// Convert the image from its native format to RGB, then copy the image data to a QImage
					if(m_sws_context == NULL)
					{
						m_sws_context = sws_getContext(m_video_codec_context->width, m_video_codec_context->height,
							m_video_codec_context->pix_fmt, m_video_codec_context->width, m_video_codec_context->height,
							PIX_FMT_RGB32, SWS_PRINT_INFO, NULL, NULL, NULL);
						//printf("readFrame(): created m_sws_context\n");
					}
					printf("readFrame(): got frame\n");

					sws_scale(m_sws_context, m_av_frame->data, m_av_frame->linesize, 0,
						m_video_codec_context->height, m_av_rgb_frame->data, m_av_rgb_frame->linesize);

					size_t num_bytes = m_av_rgb_frame->linesize[0] * m_video_codec_context->height;

					QImage * frame = new QImage(m_video_codec_context->width, m_video_codec_context->height, QImage::Format_RGB32);

					memcpy(frame->bits(), m_av_rgb_frame->data[0], num_bytes);

					av_free_packet(&packet);

					QFFMpegVideoFrame video_frame;
					video_frame.frame = frame;
					video_frame.pts = pts;
					video_frame.previous_pts = m_previous_pts;

					emit newFrame(video_frame);

					m_previous_pts = pts;
				}
			}
			else if(packet.stream_index == m_audio_stream)
			{
				//decode audio packet, store in queue
				av_free_packet(&packet);
			}
			else
			{
				av_free_packet(&packet);
			}
		}
		else
		{
			emit reachedEnd();
		}
	}
}
ofRGBPacketProcessor::ofRGBPacketProcessor(){
    newFrame();
}
Esempio n. 23
0
	double getDelta() {
		__frame_time = __timer_delta;
		newFrame();
		prevTime.tv_usec = curTime.tv_usec;
		return __timer_delta;
	}