Пример #1
0
void AVISubsessionIOState::afterGettingFrame(unsigned packetDataSize,
					  struct timeval presentationTime) {
  // Begin by checking whether there was a gap in the RTP stream.
  // If so, try to compensate for this (if desired):
  unsigned short rtpSeqNum
    = fOurSubsession.rtpSource()->curPacketRTPSeqNum();
  if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) {
    short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum;
    for (short i = 1; i < seqNumGap; ++i) {
      // Insert a copy of the previous frame, to compensate for the loss:
      useFrame(*fPrevBuffer);
    }
  }
  fLastPacketRTPSeqNum = rtpSeqNum;

  // Now, continue working with the frame that we just got
  if (fBuffer->bytesInUse() == 0) {
    fBuffer->setPresentationTime(presentationTime);
  }
  fBuffer->addBytes(packetDataSize);

  useFrame(*fBuffer);
  if (fOurSink.fPacketLossCompensate) {
    // Save this frame, in case we need it for recovery:
    SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL
    fPrevBuffer = fBuffer;
    fBuffer = tmp;
  }
  fBuffer->reset(); // for the next input

  // Now, try getting more frames:
  fOurSink.continuePlaying();
}
Пример #2
0
void AligningReader::alignFramesImpl()
{
   int n_frames = getNumIntensityFrames();
   par_for(0, n_frames, [this](int i, int thread)
   {
      if (terminate) return;

      try
      {
         frame_aligner->addFrame(i, getIntensityFrame(i));
      }
      catch (cv::Exception e)
      {
         std::cout << "Error during realignment: " << e.what();
      }

      auto result = frame_aligner->getRealignmentResult(i);
      if (result.useFrame(realign_params))
         intensity_normalisation += result.mask->get();

      realign_cv.notify_all();
   });

   frame_aligner->clearTemp();

   realignment_complete = true;
   realign_cv.notify_all();

   int64 duration_ticks = cv::getTickCount() - tick_count_start;
   double duration_s = duration_ticks / cv::getTickFrequency();

   std::cout << "Realignment took: " << duration_s << "s\n";

}
Пример #3
0
JNIEXPORT void JNICALL Java_com_jbboin_myopencvskeleton_CameraPreview_ImageProcessing(
		JNIEnv* env, jobject obj,
		jint width, jint height,
		jbyteArray NV21FrameData, jintArray outPixels){

	jbyte * pNV21FrameData = env->GetByteArrayElements(NV21FrameData, 0);
	jint * poutPixels = env->GetIntArrayElements(outPixels, 0);

	cv::Mat mYuv(height + height/2, width, CV_8UC1, (uchar*)pNV21FrameData);
	cv::Mat mRgba(height, width, CV_8UC4, (uchar *)poutPixels);
	cv::cvtColor(mYuv, mRgba, CV_YUV2BGRA_NV21);

	useFrame(mRgba);

	env->ReleaseByteArrayElements(NV21FrameData, pNV21FrameData, 0);
	env->ReleaseIntArrayElements(outPixels, poutPixels, 0);
}
Пример #4
0
/**
 Cloud insertion callback
 */
void srs_env_model::CPointCloudPlugin::insertCloudCallback( const  tIncommingPointCloud::ConstPtr& cloud)
{
//	PERROR("Try lock");
	boost::mutex::scoped_lock lock(m_lockData);
//	PERROR("Lock");

	if( ! useFrame() )
	{
		PERROR("Skipping frame: " << cloud->header.stamp);

		return;
	}

//	std::cerr << "PCP.iccb start. Time: " << ros::Time::now() << std::endl;


	ros::WallTime startTime = ros::WallTime::now();

	m_bAsInput = true;

	// Convert input pointcloud
	m_data->clear();
	if( ! isRGBCloud( cloud ) )
	{
		pcl::PointCloud< pcl::PointXYZ >::Ptr bufferCloud( new pcl::PointCloud< pcl::PointXYZ> );

		pcl::fromROSMsg(*cloud, *bufferCloud );
		pcl::copyPointCloud< pcl::PointXYZ, tPclPoint >( *bufferCloud, *m_data );

	}
	else
	{
		pcl::PointCloud< pcl::PointXYZRGB >::Ptr bufferCloud( new pcl::PointCloud< pcl::PointXYZRGB > );

		pcl::fromROSMsg(*cloud, *bufferCloud);
		pcl::copyPointCloud<pcl::PointXYZRGB, tPclPoint>( *bufferCloud, *m_data );
	}


	//*/

	// If different frame id
	if( cloud->header.frame_id != m_pcFrameId )
	{
//		PERROR( "Wait for input transform" );

	    // Some transforms
		tf::StampedTransform sensorToPcTf;

		// Get transforms
		try {
			// Transformation - from, to, time, waiting time
			m_tfListener.waitForTransform(m_pcFrameId, cloud->header.frame_id,
					cloud->header.stamp, ros::Duration(5));

			m_tfListener.lookupTransform(m_pcFrameId, cloud->header.frame_id,
					cloud->header.stamp, sensorToPcTf);

		} catch (tf::TransformException& ex) {
			ROS_ERROR_STREAM("Transform error: " << ex.what() << ", quitting callback");
			PERROR("Transform error");

			return;
		}

		Eigen::Matrix4f sensorToPcTM;

		// Get transformation matrix
		pcl_ros::transformAsMatrix(sensorToPcTf, sensorToPcTM);	// Sensor TF to defined base TF


		// transform pointcloud from sensor frame to the preset frame
		pcl::transformPointCloud< tPclPoint >(*m_data, *m_data, sensorToPcTM);
	}

	// Filter input pointcloud
	if( m_bFilterPC )		// TODO: Optimize this by removing redundant transforms
	{
//		PERROR( "Wait for filtering transform");

		// Get transforms to and from base id
		tf::StampedTransform pcToBaseTf, baseToPcTf;
		try {
			// Transformation - to, from, time, waiting time
			m_tfListener.waitForTransform(BASE_FRAME_ID, m_pcFrameId,
					cloud->header.stamp, ros::Duration(5));

			m_tfListener.lookupTransform(BASE_FRAME_ID, m_pcFrameId,
					cloud->header.stamp, pcToBaseTf);

			m_tfListener.lookupTransform(m_pcFrameId, BASE_FRAME_ID,
					cloud->header.stamp, baseToPcTf );

		} catch (tf::TransformException& ex) {
			ROS_ERROR_STREAM("Transform error: " << ex.what() << ", quitting callback");
			PERROR( "Transform error.");
			return;
		}

		Eigen::Matrix4f pcToBaseTM, baseToPcTM;

		// Get transformation matrix
		pcl_ros::transformAsMatrix(pcToBaseTf, pcToBaseTM);	// Sensor TF to defined base TF
		pcl_ros::transformAsMatrix(baseToPcTf, baseToPcTM);	// Sensor TF to defined base TF


		// transform pointcloud from pc frame to the base frame
		pcl::transformPointCloud< tPclPoint >(*m_data, *m_data, pcToBaseTM);

		// filter height and range, also removes NANs:
		pcl::PassThrough<tPclPoint> pass;
		pass.setFilterFieldName("z");
		pass.setFilterLimits(m_pointcloudMinZ, m_pointcloudMaxZ);
		pass.setInputCloud(m_data->makeShared());
		pass.filter(*m_data);

		// transform pointcloud back to pc frame from the base frame
		pcl::transformPointCloud< tPclPoint >(*m_data, *m_data, baseToPcTM);
	}

	// Modify header
	m_data->header = cloud->header;
    m_data->header.frame_id = m_pcFrameId;

//    PERROR("Insert cloud CB. Size: " << m_data->size() );

 	invalidate();

 //	std::cerr << "PCP.iccb end. Time: " << ros::Time::now() << std::endl;

// 	PERROR("Unlock");

}
/**
 Cloud insertion callback
 */
void srs_env_model::CPointCloudPlugin::insertCloudCallback( const  tIncommingPointCloud::ConstPtr& cloud)
{
//	PERROR("insertCloud: Try lock");

	boost::mutex::scoped_lock lock(m_lockData);

//	PERROR("insertCloud: Locked");

	if( ! useFrame() )
	{
//		std::cerr << "Frame skipping" << std::endl;
		return;
	}

//	std::cerr << "PCP.iccb start. Time: " << ros::Time::now() << std::endl;

	m_bAsInput = true;

	// Convert input pointcloud
	m_data->clear();
	bool bIsRgbCloud(isRGBCloud( cloud ));

	if( !bIsRgbCloud  )
	{
		pcl::PointCloud< pcl::PointXYZ >::Ptr bufferCloud( new pcl::PointCloud< pcl::PointXYZ> );

		pcl::fromROSMsg(*cloud, *bufferCloud );
		pcl::copyPointCloud< pcl::PointXYZ, tPclPoint >( *bufferCloud, *m_data );
	}
	else
	{
		pcl::PointCloud< pcl::PointXYZRGB >::Ptr bufferCloud( new pcl::PointCloud< pcl::PointXYZRGB > );

		pcl::fromROSMsg(*cloud, *bufferCloud);
		pcl::copyPointCloud<pcl::PointXYZRGB, tPclPoint>( *bufferCloud, *m_data );
	}

	if( !(bIsRgbCloud && m_bUseInputColor) )
	{
		// Use our default color to colorize cloud
		tPointCloud::iterator it, itEnd(m_data->points.end());
		for( it = m_data->points.begin(); it != itEnd; ++it)
		{
			it->r = m_r; it->g = m_g; it->b = m_b;
		}
	}

	//*/
//	std::cerr << "Input cloud frame id: " << cloud->header.frame_id << std::endl;

	// If different frame id
//	if( cloud->header.frame_id != m_frame_id )
	if( cloud->header.frame_id != m_inputPcFrameId )
	{
//		PERROR( "Wait for input transform" );

	    // Some transforms
		tf::StampedTransform sensorToPcTf;

		// Get transforms
		try {
			// Transformation - from, to, time, waiting time
//			m_tfListener.waitForTransform(m_frame_id, cloud->header.frame_id,
			m_tfListener.waitForTransform(m_inputPcFrameId, cloud->header.frame_id,
					cloud->header.stamp, ros::Duration(5));

//			m_tfListener.lookupTransform(m_frame_id, cloud->header.frame_id,
			m_tfListener.lookupTransform(m_inputPcFrameId, cloud->header.frame_id,
					cloud->header.stamp, sensorToPcTf);

		} catch (tf::TransformException& ex) {
			ROS_ERROR_STREAM("Transform error: " << ex.what() << ", quitting callback");
			PERROR("Transform error");

			return;
		}

		Eigen::Matrix4f sensorToPcTM;

		// Get transformation matrix
		pcl_ros::transformAsMatrix(sensorToPcTf, sensorToPcTM);	// Sensor TF to defined base TF

		// transform pointcloud from sensor frame to the preset frame
		pcl::transformPointCloud< tPclPoint >(*m_data, *m_data, sensorToPcTM);
		m_data->header = cloud->header;
//		m_data->header.frame_id = m_frame_id;
		m_data->header.frame_id = m_inputPcFrameId;
	}

//	PERROR("1");

//	PERROR("2");

	// Filter input pointcloud
	if( m_bFilterPC )		// TODO: Optimize this by removing redundant transforms
	{
//		PERROR( "Wait for filtering transform");

		// Get transforms to and from base id
		tf::StampedTransform pcToBaseTf, baseToPcTf;
		try {
			// Transformation - to, from, time, waiting time
//			m_tfListener.waitForTransform(BASE_FRAME_ID, m_frame_id,
			m_tfListener.waitForTransform(BASE_FRAME_ID, m_inputPcFrameId,
					cloud->header.stamp, ros::Duration(5));

//			m_tfListener.lookupTransform(BASE_FRAME_ID, m_frame_id,
			m_tfListener.lookupTransform(BASE_FRAME_ID, m_inputPcFrameId,
					cloud->header.stamp, pcToBaseTf);

//			m_tfListener.lookupTransform(m_frame_id, BASE_FRAME_ID,
			m_tfListener.lookupTransform(m_inputPcFrameId, BASE_FRAME_ID,
					cloud->header.stamp, baseToPcTf );

		} catch (tf::TransformException& ex) {
			ROS_ERROR_STREAM("Transform error: " << ex.what() << ", quitting callback");
			PERROR( "Transform error.");
			return;
		}

		Eigen::Matrix4f pcToBaseTM, baseToPcTM;

		// Get transformation matrix
		pcl_ros::transformAsMatrix(pcToBaseTf, pcToBaseTM);	// Sensor TF to defined base TF
		pcl_ros::transformAsMatrix(baseToPcTf, baseToPcTM);	// Sensor TF to defined base TF

		// transform pointcloud from pc frame to the base frame
		pcl::transformPointCloud< tPclPoint >(*m_data, *m_data, pcToBaseTM);

		// filter height and range, also removes NANs:
		pcl::PassThrough<tPclPoint> pass;
		pass.setFilterFieldName("z");
		pass.setFilterLimits(m_pointcloudMinZ, m_pointcloudMaxZ);
		pass.setInputCloud(m_data->makeShared());
		pass.filter(*m_data);

		// transform pointcloud back to pc frame from the base frame
		pcl::transformPointCloud< tPclPoint >(*m_data, *m_data, baseToPcTM);
	}

	// Modify header
	m_data->header = cloud->header;
//	m_data->header.frame_id = m_frame_id;
	m_data->header.frame_id = m_inputPcFrameId;

    // Store timestamp
    m_DataTimeStamp = cloud->header.stamp;

//    ROS_DEBUG("CPointCloudPlugin::insertCloudCallback(): stamp = %f", cloud->header.stamp.toSec());

 //   PERROR("Insert cloud CB. Size: " << m_data->size() );

    // Unlock for invalidation (it has it's own lock)
    lock.unlock();
//    PERROR("insertCloud: Unlocked");

 	invalidate();

// 	PERROR("Unlock");
}