Ejemplo n.º 1
0
void processVideo(CameraCalibration& calibration, cv::VideoCapture& capture)
{
    // Grab first frame to get the frame dimensions
    cv::Mat currentFrame, srcFrame;  
    capture >> srcFrame;
	resize(srcFrame, currentFrame, cvSize(653, 368));
    // Check the capture succeeded:
    if (currentFrame.empty())
    {
        std::cout << "Cannot open video capture device" << std::endl;
        return;
    }

    cv::Size frameSize(currentFrame.cols, currentFrame.rows);

	std::auto_ptr<MarkerDetectionFacade> detector = createMarkerDetection(calibration);

    ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);

    bool shouldQuit = false;
    do
    {
        capture >> srcFrame;
        if (srcFrame.empty())
        {
            shouldQuit = true;
            continue;
        }
		resize(srcFrame, currentFrame, cvSize(653,368));
        shouldQuit = processFrame(currentFrame, detector, drawingCtx);
    } while (!shouldQuit);
}
QDomDocument *XmlWriter::toXml()
{
    QDomImplementation implementation;
    QDomDocumentType docType = implementation.createDocumentType(
        "scribe-document", "scribe", "qt.nokia.com/scribe");

    document = new QDomDocument(docType);

    // ### This processing instruction is required to ensure that any kind
    // of encoding is given when the document is written.
    QDomProcessingInstruction process = document->createProcessingInstruction(
        "xml", "version=\"1.0\" encoding=\"utf-8\"");
    document->appendChild(process);

    QDomElement documentElement = document->createElement("document");
    document->appendChild(documentElement);

//! [0]
    QTextFrame *root = textDocument->rootFrame();
//! [0]
    
    if (root)
        processFrame(documentElement, root);

    return document;
}
Ejemplo n.º 3
0
void FrameHandlerMono::addImage(const cv::Mat& img, const double timestamp)
{
  if(!startFrameProcessingCommon(timestamp))
    return;

  // some cleanup from last iteration, can't do before because of visualization
  core_kfs_.clear();
  overlap_kfs_.clear();

  // create new frame
  SVO_START_TIMER("pyramid_creation");
  new_frame_.reset(new Frame(cam_, img.clone(), timestamp));
  SVO_STOP_TIMER("pyramid_creation");

  // process frame
  UpdateResult res = RESULT_FAILURE;
  if(stage_ == STAGE_DEFAULT_FRAME)
    res = processFrame();
  else if(stage_ == STAGE_SECOND_FRAME)
    res = processSecondFrame();
  else if(stage_ == STAGE_FIRST_FRAME)
    res = processFirstFrame();
  else if(stage_ == STAGE_RELOCALIZING)
    res = relocalizeFrame(SE3d(Matrix3d::Identity(), Vector3d::Zero()),
                          map_.getClosestKeyframe(last_frame_));

  // set last frame
  last_frame_ = new_frame_;
  new_frame_.reset();

  // finish processing
  finishFrameProcessingCommon(last_frame_->id_, res, last_frame_->nObs());
}
Ejemplo n.º 4
0
/**
 * Processes a recorded video or live view from web-camera and allows you to adjust homography refinement and 
 * reprojection threshold in runtime.
 */
void processVideo(const cv::Mat& patternImage, CameraCalibration& calibration, cv::VideoCapture& capture)
{
    // Grab first frame to get the frame dimensions
    cv::Mat currentFrame;  
    capture >> currentFrame;

    // Check the capture succeeded:
    if (currentFrame.empty())
    {
        std::cout << "Cannot open video capture device" << std::endl;
        return;
    }

    cv::Size frameSize(currentFrame.cols, currentFrame.rows);

    ARPipeline pipeline(patternImage, calibration);
    ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);

    bool shouldQuit = false;
    do
    {
        capture >> currentFrame;
        if (currentFrame.empty())
        {
            shouldQuit = true;
            continue;
        }

        shouldQuit = processFrame(currentFrame, pipeline, drawingCtx);
    } while (!shouldQuit);
}
Ejemplo n.º 5
0
FrameHandlerMono::UpdateResult FrameHandlerMono::relocalizeFrame(
    const SE3d& T_cur_ref,
    FramePtr ref_keyframe)
{
  SVO_WARN_STREAM_THROTTLE(1.0, "Relocalizing frame");
  if(ref_keyframe == nullptr)
  {
    SVO_INFO_STREAM("No reference keyframe.");
    return RESULT_FAILURE;
  }
  SparseImgAlign img_align(Config::kltMaxLevel(), Config::kltMinLevel(),
                           30, SparseImgAlign::GaussNewton, false, false);
  size_t img_align_n_tracked = img_align.run(ref_keyframe, new_frame_);
  if(img_align_n_tracked > 30)
  {
    SE3d T_f_w_last = last_frame_->T_f_w_;
    last_frame_ = ref_keyframe;
    FrameHandlerMono::UpdateResult res = processFrame();
    if(res != RESULT_FAILURE)
    {
      stage_ = STAGE_DEFAULT_FRAME;
      SVO_INFO_STREAM("Relocalization successful.");
    }
    else
      new_frame_->T_f_w_ = T_f_w_last; // reset to last well localized pose
    return res;
  }
  return RESULT_FAILURE;
}
Ejemplo n.º 6
0
void useFrame(cv::Mat& mRgba){

	int64 now = cv::getTickCount();
	int64 then;

	time_queue.push(now);

	// Process frame
	if(mRgba.cols != 0) {

		processFrame(mRgba);

		char buffer[256];
		sprintf(buffer, "Display performance: %dx%d @ %.3f", mRgba.cols, mRgba.rows, fps);
		cv::putText(mRgba, std::string(buffer), cv::Point(8,64),
				cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(0,255,255,255));
	}

	if (time_queue.size() >= 2)
		then = time_queue.front();
	else
		then = 0;

	if (time_queue.size() >= 25)
		time_queue.pop();

	fps = time_queue.size() * (float)cv::getTickFrequency() / (now-then);
}
Ejemplo n.º 7
0
int Viewfinder::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
    _id = QWidget::qt_metacall(_c, _id, _a);
    if (_id < 0)
        return _id;
    if (_c == QMetaObject::InvokeMetaMethod) {
        switch (_id) {
        case 0: processFrames((*reinterpret_cast< int(*)>(_a[1]))); break;
        case 1: processFrame(); break;
        case 2: toggleCube(); break;
        case 3: toggleGourd(); break;
        case 4: paintCube(); break;
        case 5: paintGourd(); break;
        case 6: changeX(); break;
        case 7: changeY(); break;
        case 8: changeZ(); break;
        case 9: rotateX(); break;
        case 10: rotateY(); break;
        case 11: rotateZ(); break;
        case 12: plus(); break;
        case 13: minus(); break;
        case 14: openDirectory(); break;
        default: ;
        }
        _id -= 15;
    }
    return _id;
}
Ejemplo n.º 8
0
void XmlWriter::processFrame(QDomElement &parent, QTextFrame *frame)
{
    QDomElement frameElement = document->createElement("frame");
    frameElement.setAttribute("begin", frame->firstPosition());
    frameElement.setAttribute("end", frame->lastPosition());
    parent.appendChild(frameElement);

//! [0]
    QTextFrame::iterator it;
    for (it = frame->begin(); !(it.atEnd()); ++it) {

        QTextFrame *childFrame = it.currentFrame();
        QTextBlock childBlock = it.currentBlock();

        if (childFrame) {
            QTextTable *childTable = qobject_cast<QTextTable*>(childFrame);

            if (childTable)
                processTable(frameElement, childTable);
            else
                processFrame(frameElement, childFrame);

        } else if (childBlock.isValid())
//! [0] //! [1]
            processBlock(frameElement, childBlock);
    }
//! [1]
}
Ejemplo n.º 9
0
void
liveScanner (dsd_opts * opts, dsd_state * state)
{
  if (opts->audio_in_fd == -1)
    {
      if (pthread_mutex_lock(&state->input_mutex))
        {
          printf("liveScanner -> Unable to lock mutex\n");
        }
    }
  while (1)
    {
      noCarrier (opts, state);
      state->synctype = getFrameSync (opts, state);
      // recalibrate center/umid/lmid
      state->center = ((state->max) + (state->min)) / 2;
      state->umid = (((state->max) - state->center) * 5 / 8) + state->center;
      state->lmid = (((state->min) - state->center) * 5 / 8) + state->center;
      while (state->synctype != -1)
        {
          processFrame (opts, state);
          state->synctype = getFrameSync (opts, state);
          // recalibrate center/umid/lmid
          state->center = ((state->max) + (state->min)) / 2;
          state->umid = (((state->max) - state->center) * 5 / 8) + state->center;
          state->lmid = (((state->min) - state->center) * 5 / 8) + state->center;
        }
    }
}
Ejemplo n.º 10
0
AlgorithmStatus OverlapAdd::process() {
  EXEC_DEBUG("process()");
  AlgorithmStatus status = acquireData();
  EXEC_DEBUG("data acquired");

  if (status != OK) {
    if (!shouldStop()) return status;

    int available = input("frame").available();
    if (available == 0) {
      return FINISHED;
    }
    // otherwise, there are still some frames
    return CONTINUE;
  }

  const vector<vector<Real> >& frames = _frames.tokens();
  vector<Real>& output = _output.tokens();

  assert(frames.size() == 1 && (int) output.size() == _hopSize);
  const vector<Real> & windowedFrame = frames[0];

  if (windowedFrame.empty()) throw EssentiaException("OverlapAdd: the input frame is empty");

  processFrame(_tmpFrame, windowedFrame, output, _frameHistory, _frameSize,
               _hopSize, _normalizationGain);

  EXEC_DEBUG("releasing");
  releaseData();
  EXEC_DEBUG("released");

  return OK;
}
Ejemplo n.º 11
0
  void pollCallback(polled_camera::GetPolledImage::Request& req,
                    polled_camera::GetPolledImage::Response& rsp,
                    sensor_msgs::Image& image, sensor_msgs::CameraInfo& info)
  {
    if (trigger_mode_ != prosilica::Software) {
      rsp.success = false;
      rsp.status_message = "Camera is not in software triggered mode";
      return;
    }

    tPvFrame* frame = NULL;

    try {
      cam_->setBinning(req.binning_x, req.binning_y);

      if (req.roi.x_offset || req.roi.y_offset || req.roi.width || req.roi.height) {
        // GigE cameras use ROI in binned coordinates, so scale the values down.
        // The ROI is expanded if necessary to land on binned coordinates.
        unsigned int left_x = req.roi.x_offset / req.binning_x;
        unsigned int top_y  = req.roi.y_offset / req.binning_y;
        unsigned int right_x  = (req.roi.x_offset + req.roi.width  + req.binning_x - 1) / req.binning_x;
        unsigned int bottom_y = (req.roi.y_offset + req.roi.height + req.binning_y - 1) / req.binning_y;
        unsigned int width = right_x - left_x;
        unsigned int height = bottom_y - top_y;
        cam_->setRoi(left_x, top_y, width, height);
      } else {
        cam_->setRoiToWholeFrame();
      }

      // Zero duration means "no timeout"
      unsigned long timeout = req.timeout.isZero() ? PVINFINITE : req.timeout.toNSec() / 1e6;
      frame = cam_->grab(timeout);
    }
    catch (prosilica::ProsilicaException &e) {
      if (e.error_code == ePvErrBadSequence)
        throw; // not easily recoverable

      rsp.success = false;
      rsp.status_message = (boost::format("Prosilica exception: %s") % e.what()).str();
      return;
    }

    if (!frame) {
      /// @todo Would be nice if grab() gave more info
      rsp.success = false;
      rsp.status_message = "Failed to capture frame, may have timed out";
      return;
    }

    info = cam_info_;
    image.header.frame_id = img_.header.frame_id;
    if (!processFrame(frame, image, info)) {
      rsp.success = false;
      rsp.status_message = "Captured frame but failed to process it";
      return;
    }
    info.roi.do_rectify = req.roi.do_rectify; // do_rectify should be preserved from request

    rsp.success = true;
  }
Ejemplo n.º 12
0
int VBoxNetLwipNAT::processGSO(PCPDMNETWORKGSO pGso, size_t cbFrame)
{
    if (!PDMNetGsoIsValid(pGso, cbFrame,
                          cbFrame - sizeof(PDMNETWORKGSO)))
        return VERR_INVALID_PARAMETER;

    cbFrame -= sizeof(PDMNETWORKGSO);
    uint8_t         abHdrScratch[256];
    uint32_t const  cSegs = PDMNetGsoCalcSegmentCount(pGso,
                                                      cbFrame);
    for (size_t iSeg = 0; iSeg < cSegs; iSeg++)
    {
        uint32_t cbSegFrame;
        void    *pvSegFrame =
          PDMNetGsoCarveSegmentQD(pGso,
                                  (uint8_t *)(pGso + 1),
                                  cbFrame,
                                  abHdrScratch,
                                  iSeg,
                                  cSegs,
                                  &cbSegFrame);

        int rc = processFrame(pvSegFrame, cbSegFrame);
        if (RT_FAILURE(rc))
        {
            return rc;
        }
    }

    return VINF_SUCCESS;
}
void AudioProcessor::updateCB(const ros::TimerEvent& timer_event)
{

  result_ = channel_->getSpectrum(spectrum_, spectrum_size_, 0, fft_method_);
  if (result_ == FMOD_OK)
  {
    now_time_ = ros::Time::now(); // try to grab as close to getting message as possible
    freq_status_.tick();
    if (received_first_frame_)
    {
      max_period_between_updates_ = std::max(max_period_between_updates_, (timer_event.current_real - timer_event.last_real).toSec());
      last_callback_duration_ = timer_event.profile.last_duration.toSec();
    }
    received_first_frame_ = true;
    consequtively_dropped_frames_ = 0;
    processFrame();
  }
  else
  {
    ROS_ERROR("Could not get spectrum. (%d) : %s.", result_, FMOD_ErrorString(result_));
    consequtively_dropped_frames_++;
    if(consequtively_dropped_frames_ > max_dropped_frames_)
    {
      shutdown();
    }
  }

  frame_count_++;
  system_->update();
  diagnostic_updater_.update();
}
Ejemplo n.º 14
0
void LeapMotionPlugin::pluginUpdate(float deltaTime, const controller::InputCalibrationData& inputCalibrationData) {
    if (!_enabled) {
        return;
    }

    const auto frame = _controller.frame();
    const auto frameID = frame.id();
    if (_lastFrameID >= frameID) {
        // Leap Motion not connected or duplicate frame.
        return;
    }

    if (!_hasLeapMotionBeenConnected) {
        emit deviceConnected(getName());
        _hasLeapMotionBeenConnected = true;
    }

    processFrame(frame);  // Updates _joints.

    auto joints = _joints;

    auto userInputMapper = DependencyManager::get<controller::UserInputMapper>();
    userInputMapper->withLock([&, this]() {
        _inputDevice->update(deltaTime, inputCalibrationData, joints, _prevJoints);
    });

    _prevJoints = joints;

    _lastFrameID = frameID;
}
QString TextDocumentSerializer::toSimpleHtml(QTextDocument *document)
{
    this->document = document;

    QTextFrame *root = document->rootFrame();
    return processFrame(root);
}
Ejemplo n.º 16
0
/*
 * QoS implementation using 8 queues for each priority and round-robin algorithm.
 */
void Switch::switchFrames()
{
	while(1)
	{
		for (int i = 7; i >= 0; --i)
		{
			for (int j = 0; j <= i; ++j)
			{
				r.lock();
				if (!queues[i].empty())
				{
					Frame* frame = queues[i].front();
					queues[i].pop();

					if(prt==true)
						std::cout << frame->toString() << std::endl;

					r.unlock();

					processFrame(frame, frame->sourcePort);
				}
				else
				{
					r.unlock();
					break;
				}
			}
		}
	}
}
Ejemplo n.º 17
0
void KeyframeMapper::RGBDCallback(
  const ImageMsg::ConstPtr& rgb_msg,
  const ImageMsg::ConstPtr& depth_msg,
  const CameraInfoMsg::ConstPtr& info_msg)
{
  tf::StampedTransform transform;

  const ros::Time& time = rgb_msg->header.stamp;

  try{
    tf_listener_.waitForTransform(
     fixed_frame_, rgb_msg->header.frame_id, time, ros::Duration(0.1));
    tf_listener_.lookupTransform(
      fixed_frame_, rgb_msg->header.frame_id, time, transform);  
  }
  catch(...)
  {
    return;
  }
  
  // create a new frame and increment the counter
  rgbdtools::RGBDFrame frame;
  createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame); 
  frame.index = rgbd_frame_index_;
  rgbd_frame_index_++;
  
  bool result = processFrame(frame, eigenAffineFromTf(transform));
  if (result) publishKeyframeData(keyframes_.size() - 1);
  
  publishPath();
}
void Application::loop()
{
	// Check for key input
	int key = cv::waitKey(20);

	switch (key)
	{
		case 's':
			makeScreenshots();
			break;

		case 'c':
			clearOutputImage();
			break;

		case 'q':
			m_isFinished = true;
	}

	// Grab new images from the Kinect's cameras
	m_depthCamera->frameFromCamera(m_rgbImage, m_depthImage, CV_16UC1);

	// Process the current frame
	processFrame();

	// Display the images
	cv::imshow("raw", m_rgbImage);
	cv::imshow("depth", m_depthImage);
	cv::imshow("output", m_outputImage);
}
void UT612ByteStreamParser::processByte(uint8_t byte)
{
	_byteCount++;

	bool currentByteIsLinefeed = (byte == 0x0a);
	bool frameIsEnding = _lastByteWasCarriageReturn && currentByteIsLinefeed;

	_buffer.push_back(byte);

	if (frameIsEnding && _buffer.size() >= frame_size)
	{
		//
		// Warn on frame alignment errors
		//
		int bytesSinceLastFrame = _byteCount - _lastFrameByteCount;
		if (bytesSinceLastFrame != frame_size)
		{
			std::cerr << "\nERROR - FRAME ALIGNMENT ERROR (diff=" << bytesSinceLastFrame << ")" << std::endl;
		}
		_lastFrameByteCount = _byteCount;


		//
		// Process the frame
		//
		std::cout << _frameCount++ << "\t";

		if (_doTimestamp)
		{
			std::cout << getCurrentTime() << "\t";
		}

		try {
			std::string values = processFrame(_buffer, _buffer.size() - frame_size);
			std::cout << values;
		}
		catch (std::runtime_error & e)
		{
			std::cerr << "Exception: \""<< e.what() << "\"\n";

			const uint8_t* d = &_buffer[_buffer.size() - frame_size];
			std::cerr << "Offending bytes: " << convert2HexString(d, frame_size) << std::endl;
		}

		bool showHex = false;
		if (showHex)
		{
			const uint8_t* d = &_buffer[_buffer.size() - frame_size];
			std::cout << "\t" << convert2HexString(d, frame_size);
		}

		std::cout << std::endl;

		_buffer.clear();
	}

	_lastByteWasCarriageReturn = (byte == 0x0d);
}
  void publishImage (FlyCapture2::Image * frame)
  {
    //sensor_msgs::CameraInfoPtr cam_info(new sensor_msgs::CameraInfo(cam_info_->getCameraInfo()));
    // or with a member variable:
    cam_info_ptr_ = sensor_msgs::CameraInfoPtr(new sensor_msgs::CameraInfo(cam_info_mgr_->getCameraInfo()));

    if (processFrame (frame, img_, cam_info_ptr_))
      streaming_pub_.publish (img_, *cam_info_ptr_);
  }
Ejemplo n.º 21
0
void BackgroundSuppressionShell::processNextStreamingFrame(Subject &subject, const std::string &signal, const boost::any &data)
{
   AnimationFrame *pFrame = boost::any_cast<AnimationFrame*>(data);
   if(pFrame != NULL)
   {
      mCurrentFrame = pFrame->mFrameNumber;
      processFrame() && displayResults();
   }
}
Ejemplo n.º 22
0
bool addFrame(NUI_Controller* mNui) //Data Collection for temporal Temporal Optimization
{
	if (processedFrameCount>29)
		return true;
	else if (processedFrameCount==29)
	{
		for (int i=0;i<29;i++) //last frame data is already in the actual registers
		{
			for (int j=0;j<16;j++)
				sphereRadii[j]+=radiiBuffer[i][j];
			estimatedShoulderWidth+=bodySizeBuffer[i][0];
			estimatedTorsoHeight+=bodySizeBuffer[i][1];
			estimatedBodyHeight+=bodySizeBuffer[i][2];
		}
		for (int i=0;i<16;i++)
				sphereRadii[i]/=30;
		for (int i=10;i<12;i++)		//Correct Torso, as they are measured by the same bone, and can cause errors.
				sphereRadii[i]*=1.4;
		for (int i=8;i<10;i++)		//Correct Torso, as they are measured by the same bone, and can cause errors.
				sphereRadii[i]*=1.2;
		for (int i=0;i<4;i++)		//Correct Torso, as they are measured by the same bone, and can cause errors.
				sphereRadii[i]*=0.75;
		estimatedShoulderWidth/=30;
		estimatedTorsoHeight/=30;
		estimatedBodyHeight/=30;
		outputDataToCSV();
		for (int i=0;i<29;i++) //free the buffers
		{
			delete[] radiiBuffer[i];
			delete[] bodySizeBuffer[i];
		}



		//Do not forget to release Images to prevent memory leak
		cvReleaseImage(&dImage);
		cvReleaseImage(&uImage);
		cvReleaseImage(&dImage);
		processedFrameCount++;
		return true;
	}
	else
	{
		if (processFrame(mNui))
		{	
			radiiBuffer[processedFrameCount]=new float[16];
			memcpy(radiiBuffer[processedFrameCount],sphereRadii,16*sizeof(float));
			bodySizeBuffer[processedFrameCount]=new float[3];
			bodySizeBuffer[processedFrameCount][0]=estimatedShoulderWidth;
			bodySizeBuffer[processedFrameCount][1]=estimatedTorsoHeight;
			bodySizeBuffer[processedFrameCount][2]=estimatedBodyHeight;
			processedFrameCount++;
		}
		return false;
	}
}
Ejemplo n.º 23
0
void ofxGifEncoder::doSave() {
	// create a multipage bitmap
	FIMULTIBITMAP *multi = FreeImage_OpenMultiBitmap(FIF_GIF, ofToDataPath(fileName).c_str(), TRUE, FALSE);
	for(int i = 0; i < frames.size(); i++ ) {
        ofxGifFrame * currentFrame = frames[i];
        processFrame(currentFrame, multi);
    }
	FreeImage_CloseMultiBitmap(multi); 
    
}
Ejemplo n.º 24
0
void VideoExtractor::previous(void)
{
    m_mutex.lock();

    m_autoPlay = false;
    m_videoStream[0]->r_grab();
    m_videoStream[1]->r_grab();

    processFrame();

    m_mutex.unlock();
}
Ejemplo n.º 25
0
/**
 * Processes single image. The processing goes in a loop.
 * It allows you to control the detection process by adjusting homography refinement switch and 
 * reprojection threshold in runtime.
 */
void processSingleImage(const cv::Mat& patternImage, CameraCalibration& calibration, const cv::Mat& image)
{
    cv::Size frameSize(image.cols, image.rows);
    ARPipeline pipeline(patternImage, calibration);
    ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);

    bool shouldQuit = false;
    do
    {
        shouldQuit = processFrame(image, pipeline, drawingCtx);
    } while (!shouldQuit);
}
Ejemplo n.º 26
0
void processSingleImage(CameraCalibration& calibration, const cv::Mat& image)
{
    cv::Size frameSize(image.cols, image.rows);
    std::auto_ptr<MarkerDetectionFacade> detector = createMarkerDetection(calibration);
    ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);

    bool shouldQuit = false;
    do
    {
        shouldQuit = processFrame(image, detector, drawingCtx);
    } while (!shouldQuit);
}
Ejemplo n.º 27
0
void VideoExtractor::slid(int value)
{
    m_mutex.lock();

    m_autoPlay = false;

    m_videoStream[0]->slid(value);
    m_videoStream[1]->slid(value);

    processFrame();

    m_mutex.unlock();
}
Ejemplo n.º 28
0
VideoFilter::VideoFilter(QObject *parent)
	: VideoSource(parent)
	, VideoConsumer()
	, m_frameDirty(false)
	, m_threadFps(30)
	, m_fpsLimit(-1)
	, m_isThreaded(false)
	
{
	connect(&m_processTimer, SIGNAL(timeout()), this, SLOT(processFrame()));
	m_processTimer.setSingleShot(true);
	m_processTimer.setInterval(0);
}
bool MainThreadWebSocketChannel::processOneItemFromBuffer()
{
    ASSERT(!m_suspended);
    ASSERT(m_client);
    ASSERT(!m_buffer.isEmpty());
    WTF_LOG(Network, "MainThreadWebSocketChannel %p processBuffer() Receive buffer has %lu bytes", this, static_cast<unsigned long>(m_buffer.size()));

    if (m_shouldDiscardReceivedData)
        return false;

    if (m_receivedClosingHandshake) {
        skipBuffer(m_buffer.size());
        return false;
    }

    if (m_handshake->mode() == WebSocketHandshake::Incomplete) {
        int headerLength = m_handshake->readServerHandshake(m_buffer.data(), m_buffer.size());
        if (headerLength <= 0)
            return false;
        if (m_handshake->mode() == WebSocketHandshake::Connected) {
            if (m_identifier) {
                TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "WebSocketReceiveHandshakeResponse", "data", InspectorWebSocketEvent::data(m_document, m_identifier));
                // FIXME(361045): remove InspectorInstrumentation calls once DevTools Timeline migrates to tracing.
                InspectorInstrumentation::didReceiveWebSocketHandshakeResponse(m_document, m_identifier, 0, &m_handshake->serverHandshakeResponse());
            }

            if (m_deflateFramer.enabled() && m_document) {
                const String message = "WebSocket extension \"x-webkit-deflate-frame\" is deprecated";
                m_document->addConsoleMessage(ConsoleMessage::create(JSMessageSource, WarningMessageLevel, message, m_sourceURLAtConstruction, m_lineNumberAtConstruction));
            }

            WTF_LOG(Network, "MainThreadWebSocketChannel %p Connected", this);
            skipBuffer(headerLength);
            String subprotocol = m_handshake->serverWebSocketProtocol();
            String extensions = m_handshake->acceptedExtensions();
            m_client->didConnect(subprotocol.isNull() ? "" : subprotocol, extensions.isNull() ? "" : extensions);
            WTF_LOG(Network, "MainThreadWebSocketChannel %p %lu bytes remaining in m_buffer", this, static_cast<unsigned long>(m_buffer.size()));
            return !m_buffer.isEmpty();
        }
        ASSERT(m_handshake->mode() == WebSocketHandshake::Failed);
        WTF_LOG(Network, "MainThreadWebSocketChannel %p Connection failed", this);
        skipBuffer(headerLength);
        m_shouldDiscardReceivedData = true;
        failAsError(m_handshake->failureReason());
        return false;
    }
    if (m_handshake->mode() != WebSocketHandshake::Connected)
        return false;

    return processFrame();
}
Ejemplo n.º 30
0
void VideoFilter::run()
{
	while(!m_killed)
	{
 		if(m_frameDirty)
 		{
 			m_frameAccessMutex.lock();
			processFrame();
			m_frameDirty = false;
			m_frameAccessMutex.unlock();
		}
		msleep(1000/m_threadFps);
	}
}