예제 #1
0
void drawReprojectedPoints(const ImgG& img, const double* K, const double* kc,
		const double* R, const double* T, int nPts, const double* pts3d,
		const double* pts2d, ImgRGB& outImg) {

	outImg.resize(img.w, img.h);
	cv::Mat cvOutImg(img.rows, img.cols, CV_8UC3, outImg.data);
	cv::Mat cvImg(img.rows, img.cols, CV_8U, img.data);

	cv::cvtColor(cvImg, cvOutImg, CV_GRAY2RGB);

	Mat_d tmpPts2d(nPts, 2);
	project(K, kc, R, T, nPts, pts3d, tmpPts2d);

	for (int i = 0; i < nPts; i++) {
		double x1 = tmpPts2d[2 * i];
		double y1 = tmpPts2d[2 * i + 1];

		double x0 = pts2d[2 * i];
		double y0 = pts2d[2 * i + 1];

		cv::circle(cvOutImg, cv::Point2f(x1, y1), 3, cv::Scalar(0, 255, 0), 1,
				CV_AA);
		cv::circle(cvOutImg, cv::Point2f(x0, y0), 1, cv::Scalar(0, 0, 255), 1,
				CV_AA);
		cv::line(cvOutImg, cv::Point2f(x1, y1), cv::Point2f(x0, y0),
				cv::Scalar(255, 0, 0), 1, CV_AA);
	}
}
예제 #2
0
void drawPoint(const ImgG& img, const double x, const double y, ImgRGB& outImg,
		int radius, uchar r, uchar g, uchar b, double thickness) {
	outImg.resize(img.w, img.h);
	cv::Mat cvImg(img.h, img.w, CV_8U, img.data);
	cv::Mat cvOutImg(img.h, img.w, CV_8UC3, outImg.data);
	cv::cvtColor(cvImg, cvOutImg, CV_GRAY2RGB);
	cv::circle(cvOutImg, cv::Point2f(x, y), radius, cv::Scalar(r, g, b),
			thickness, CV_AA);
}
예제 #3
0
int detectSURFPoints(const ImgG& img, Mat_d& surfPts,
                     std::vector<float>& surfDesc, double hessianThreshold) {

    KpVec surfPtsVec;
    cv::SURF surf(hessianThreshold, 4, 2, false);
    cv::Mat cvImg(img.rows, img.cols, CV_8UC1, img.data);
    surf(cvImg, cv::Mat(), surfPtsVec, surfDesc);
    KpVec2Mat(surfPtsVec, surfPts);
    return surf.descriptorSize();
}
예제 #4
0
void SingleSLAM::grabReadFrame() {
	videoReader->grabFrame();
	//videoReader->getCurGrayImage(m_img.data);
	videoReader->readCurFrame(m_rgb.data, m_img.data);

	cv::Mat cvImg(m_img.rows, m_img.cols, CV_8UC1, m_img.data);
	cv::Mat cvSmallImg(m_smallImg.rows, m_smallImg.cols, CV_8UC1,
			m_smallImg.data);
	cv::resize(cvImg, cvSmallImg, cv::Size(m_smallImg.cols, m_smallImg.rows));
	//scaleDownAvg(m_img, m_smallImg, m_smallScale);
}
예제 #5
0
CameraStream::CameraStream( osg::Geometry* geom ) : QObject(), osg::Image()
{
	mGeom	= geom;
	mWidth	= 0;
	mHeight = 0;

#ifdef WIN32
	cv::Mat cvImg( 480,640, CV_8UC3, CV_RGB( 0,0,0 ) ); // Black on Win
#else
	cv::Mat cvImg;			// gray on Linux
#endif
	updateBackgroundImage( cvImg );

}
예제 #6
0
void getNCCBlock(const ImgG& img, double x, double y, NCCBlock& blk) {
	cv::Mat cvImg(img.rows, img.cols, CV_8UC1, img.data);
	cv::Size patchSize(2 * SL_NCCBLK_HW + 1, 2 * SL_NCCBLK_HW + 1);
	cv::Mat patch;
	cv::getRectSubPix(cvImg, patchSize, cv::Point2d(x, y), patch);

	memcpy(blk.I, patch.data, SL_NCCBLK_LEN);

	/*compute NCC parameters*/
	double a = 0, b = 0;
	blk.avgI = 0;
	for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
		a += blk.I[j];
		b += double(blk.I[j]) * blk.I[j];
		blk.avgI += blk.I[j];
	}
	blk.A = a;
	blk.B = b;
	blk.C = 1 / sqrt(SL_NCCBLK_LEN * b - a * a);
	blk.avgI /= SL_NCCBLK_LEN;
}
예제 #7
0
void drawKeyPoints(const ImgG& img, const Mat_d& corners, ImgRGB& rgb, uchar r,
		uchar g, uchar b) {
	int nPts = corners.rows;
	if (nPts == 0)
		return;

	cv::Mat cvImg(img.rows, img.cols, CV_8UC1, img.data);
	cv::Mat tmpImg(img.rows, img.cols, CV_8UC3);

	cv::cvtColor(cvImg, tmpImg, CV_GRAY2RGB);

	std::vector<cv::KeyPoint> cvPts;
	cvPts.reserve(nPts);
	for (int i = 0; i < nPts; i++) {
		double x = corners.data[2 * i];
		double y = corners.data[2 * i + 1];
		cv::circle(tmpImg, cv::Point(x, y), 3, cv::Scalar(r, g, b), 1, CV_AA);
	}

	CvMat cvtmpImg = tmpImg;
	rgb.resize(img.cols, img.rows);
	memcpy(rgb.data, cvtmpImg.data.ptr, 3 * img.rows * img.cols);
}
bool ROSMain_features() {
	CoSLAM& coSLAM = MyApp::coSLAM;
	/////////////////////////1.GPU initilization/////////////////////////
	//initialization for CG;
	glutInitDisplayMode(GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE);
	glutCreateWindow(" ");
	glutHideWindow();

	glewInit();

	V3D_GPU::Cg_ProgramBase::initializeCg();

	//////////////////////////2.read video information//////////////////
	try {
//		for(int c = 0; c < coSLAM.numCams; c++){
//			coSLAM.slam[c].videoReader = &MyApp::aviReader[c];
//		}

		coSLAM.init(false);
		MyApp::bInitSucc = true;
		logInfo("Loading video sequences.. OK!\n");
	} catch (SL_Exception& e) {
		logInfo(e.what());
#ifdef WIN32
		wxMessageBox(e.what());
#endif
		return 0;
	}

	//notify the GUI thread to create GUIs
	MyApp::broadcastCreateGUI();

	//wait for the accomplishment of creating GUIs
	MyApp::waitCreateGUI();

	for (int i = 0; i < coSLAM.numCams; i++){
		MyApp::videoWnd[i]->setSLAMData(i, &coSLAM);
		vector<float> reprojErrStatic, reprojErrDynamic;
		vector<int> frameNumber;
		MyApp::s_reprojErrDynamic.push_back(reprojErrStatic);
		MyApp::s_reprojErrStatic.push_back(reprojErrDynamic);
		MyApp::s_frameNumber.push_back(frameNumber);
	}


	MyApp::modelWnd1->setSLAMData(&coSLAM);
	MyApp::modelWnd2->setSLAMData(&coSLAM);
	MyApp::broadcastCreateGUI();

	//for measuring the timings
	Timings timingsPerStep;
	Timings timingsReadFrame;
	Timings timingsNewMapPonits;

	/* start the SLAM process*/
	try {
		coSLAM.readFrame();
		//copy the data to buffers for display
		updateDisplayData();
		//initialise the map points
		for (int i = 0; i < coSLAM.numCams; i++) {
			printf("slam[%d].m_camPos.size(): %d\n", i, coSLAM.slam[i].m_camPos.size());
		}
//		tic();
//		coSLAM.initMap();
//		toc();
		cout <<"debug\n";

//		redis->setPose(1, 2, 3.6);

		while (!MyApp::bExit){
			pthread_mutex_lock(&MyApp::_mutexImg);
			if (!coSLAM.grabReadFrame())
				pthread_cond_wait(&MyApp::_condImg, &MyApp::_mutexImg);
			pthread_mutex_unlock(&MyApp::_mutexImg);

//			//copy the data to buffers for display
			updateDisplayData();
			//printf("current frame: %d\n", coSLAM.curFrame);
			if (MyApp::bStartInit){
				MyApp::publishMapInit();
				printf("Start initializing map...\n");
				coSLAM.curFrame = 0;
				//initialise the map points
				if (coSLAM.initMap()){

          if (MyApp::doCalibration) {
            // Need this in real test
					  coSLAM.calibGlobal2Cam();
          }

					for (int i = 0; i < coSLAM.numCams; i++)
						coSLAM.state[i] = SLAM_STATE_NORMAL;
					printf("Init map success!\n");
					break;
				}
				else{
					MyApp::bStartInit = false;
					MyApp::publishMapInit();
				}
			}
		}
		updateDisplayData();
//		coSLAM.pause();

		for (int i = 0; i < coSLAM.numCams; i++) {
			printf("slam[%d].m_camPos.size(): %d\n", i, coSLAM.slam[i].m_camPos.size());
			coSLAM.state[i] = SLAM_STATE_NORMAL;
		}

//		return 0;
//		coSLAM.pause();

		int endFrame = SLAMParam::nTotalFrame - SLAMParam::nSkipFrame
				- SLAMParam::nInitFrame - 10;
//		int endFrame = 500;

//		endFrame = 1500;

		int i = 0;
		// init estimation flag
		bool bEstPose[SLAM_MAX_NUM];
		for (int i = 0; i < SLAM_MAX_NUM; i++){
			bEstPose[i] = false;
		}

		vector<double> tmStepVec;


		vector<double> poseSent[SLAM_MAX_NUM];
//		vector<double> rosTime;

		while (!MyApp::bExit) {
//			while (MyApp::bStop) {/*stop*/
//			}

//			if (MyApp::_mergeable){
//				if (MyApp::_imgReady[0] && MyApp::_imgReady[1]){
//					coSLAM.grabReadFrame();
//					MyApp::_imgAvailableForMerge = true;
//				}
//				else
//					continue;
//			}

			i++;
			TimeMeasurer tmPerStep;
			tmPerStep.tic();


			if (!receiveFeatures){
				coSLAM.grabReadFrame();
				coSLAM.featureTracking();
			}
			else{
				coSLAM.featureReceiving();
				coSLAM.virtualReadFrame();
			}
			for (int i = 0; i < coSLAM.numCams; i++){
			cv::Mat cvImg(coSLAM.slam[i].m_img.rows, coSLAM.slam[i].m_img.cols, CV_8UC1,
								coSLAM.slam[i].m_img.data);
			MyApp::s_camFrames[i].push_back(cvImg.clone());
			CamPoseItem* cam = coSLAM.slam[i].m_camPos.current();
			double ts = cam->ts;
			MyApp::s_camFramesTS[i].push_back(ts);
			}

			if(!coSLAM.poseUpdate(bEstPose))
				break;
			//Use redis to send over the poses


//			for (int i = 0; i < coSLAM.numCams; i++){
//				double org[3];
////				getCamCenter(coSLAM.slam[i].m_camPos.current(), org);
//				coSLAM.transformCamPose2Global(coSLAM.slam[i].m_camPos.current(), org);
//
//				MyApp::redis[i]->setPose(org[0], org[1], org[2]);
//
//				double ts = coSLAM.slam[i].m_camPos.current()->ts;
//				poseSent[i].push_back(ts);
//				poseSent[i].push_back(org[0]);
//				poseSent[i].push_back(org[1]);
//				poseSent[i].push_back(org[2]);
//				rosTime.push_back(ts);
//			}
//			if (MyApp::bStartMove){
//				MyApp::redis_start->setCommand("go");
//				MyApp::bStartMove = false;
//			}

			//coSLAM.pause();
			coSLAM.cameraGrouping();
			//existing 3D to 2D points robust
			coSLAM.activeMapPointsRegister(Const::PIXEL_ERR_VAR);

			TimeMeasurer tmNewMapPoints;
			tmNewMapPoints.tic();

			if (receiveFeatures){
				coSLAM.genNewMapPoints_new();
				if (MyApp::_mergeable)
					MyApp::triggerClients();
			}
			else
			{
				bool merge = false;
				coSLAM.genNewMapPoints(merge);
			}
//
//			coSLAM.m_tmNewMapPoints = tmNewMapPoints.toc();
//			cout << "coSLAM.m_tmNewMapPoints" << coSLAM.m_tmNewMapPoints << endl;

			//point registration
			coSLAM.currentMapPointsRegister(Const::PIXEL_ERR_VAR,
					i % 50 == 0 ? true : false);

			coSLAM.storeDynamicPoints();

			updateDisplayData();
			redrawAllViews();

//			Sleep(50);

			if (i % 500 == 0) {
				//coSLAM.releaseFeatPts(coSLAM.curFrame - 500);
				coSLAM.releaseKeyPoseImgs(coSLAM.curFrame - 500);
				coSLAM.m_lastReleaseFrm = coSLAM.curFrame;
			}

			coSLAM.m_tmPerStep = tmPerStep.toc();
			tmStepVec.push_back(coSLAM.m_tmPerStep);

			//Send pose
			for (int i = 0; i < coSLAM.numCams; i++)
			{
				if (coSLAM.slam[i].m_camPos.size() > 0){

					double org[3], rpy[3];
					CamPoseItem* cam = coSLAM.slam[i].m_camPos.current();
					double ts = cam->ts;
					getCamCenter(cam, org);
					coSLAM.transformCamPose2Global(cam, org, rpy);

					double targetPos[3];
					targetPos[0] = targetPos[1] = targetPos[2] = 0;
					double theta = 0;
					if(coSLAM.dynObjPresent){
						coSLAM.transformTargetPos2Global(cam->currDynPos, targetPos);
						//printf("targetPos: %lf %lf %lf\n", targetPos[0], targetPos[1], targetPos[2]);
						coSLAM.slam[i].projectTargetToCam(cam, cam->currDynPos);
						theta = atan2(coSLAM.slam[i]._targetPosInCam[0], coSLAM.slam[i]._targetPosInCam[2]);
//							double H = targetPos[2] * 2;
//							double Z = coSLAM.slam[i]._targetPosInCam[2];
						
						// Publish target pose
                        MyApp::transformStampedMsg.transform.translation.x = targetPos[0];
						MyApp::transformStampedMsg.transform.translation.y = targetPos[1];
						//MyApp::transformStampedMsg.transform.translation.z = H;
						MyApp::transformStampedMsg.transform.rotation = tf::createQuaternionMsgFromRollPitchYaw(0, 0, theta);
						MyApp::transformStampedMsg.header.stamp.sec = ts;
						MyApp::targetPosePub.publish(MyApp::transformStampedMsg);
						//MyApp::redis[i]->setPoseTarget(ts, 1, theta, org[0], org[1], rpy[2], targetPos[0], targetPos[1], 0.9);
//						MyApp::redis_dynObj->setDynObj(ts, targetPos[0], targetPos[1], 0.9);
//						printf("currDynPos: %lf %lf %lf\n", cam->currDynPos[0], cam->currDynPos[1], cam->currDynPos[2]);
					}

					// Publish camera pose
					MyApp::transformStampedMsg.transform.translation.x = org[0];
					MyApp::transformStampedMsg.transform.translation.y = org[1];
					//MyApp::transformStampedMsg.transform.translation.z = org[2];
					MyApp::transformStampedMsg.transform.rotation = tf::createQuaternionMsgFromRollPitchYaw(rpy[0], rpy[1], rpy[2]);
					MyApp::transformStampedMsg.header.stamp.sec = ts;
					MyApp::posePubs[i].publish(MyApp::transformStampedMsg);
					
					// else {
					// 	MyApp::redis[i]->setPoseTarget(ts, 0, theta, org[0], org[1], rpy[2], targetPos[0], targetPos[1], 0.9);
					// }

                    // (Jacob) Huh?
					//MyApp::redis_dynObj->setDynObj(ts, 0.5, 2, 0.9);

					printf("targetPos: %lf %lf %lf\n", targetPos[0], targetPos[1], targetPos[2]);
					printf("org[2]: %lf %lf %lf\n", org[0], org[1], rpy[2]);
				}
			}
		}

		cout << " the result is saved at " << MyApp::timeStr << endl;
		coSLAM.exportResults(MyApp::timeStr);

		FILE* fid = fopen("/home/rui/coslamTime.txt","w");
		for (int i = 0; i < tmStepVec.size(); i++){
			fprintf(fid, "%lf\n", tmStepVec[i]);
		}
		fclose(fid);

//		FILE* fid = fopen("slam_timing.txt","w");
//		for (int i = 0; i < tmStepVec.size(); i++)
//			fprintf(fid, "%f\n", tmStepVec[i]);
//		fclose(fid);
//
//		fid = fopen("poseSent0.txt","w");
//		for (int i = 0; i < poseSent[0].size(); i = i + 4)
//			fprintf(fid, "%lf %lf %lf %lf\n", poseSent[0][i],
//					poseSent[0][i+1], poseSent[0][i+2], poseSent[0][i+3]);
//		fclose(fid);
//
//		fid = fopen("poseSent1.txt","w");
//		for (int i = 0; i < poseSent[1].size(); i = i + 4)
//			fprintf(fid, "%lf %lf %lf %lf\n", poseSent[1][i],
//					poseSent[1][i+1], poseSent[1][i+2], poseSent[1][i+3]);
//		fclose(fid);
//
//		FILE* fid = fopen("rosTime.txt","w");
//		for (int i = 0; i < rosTime.size(); i = i + coSLAM.numCams + 1){
//			for (int j = 0; j <= coSLAM.numCams; j++){
//				fprintf(fid, "%lf ", rosTime[i+j]);
//			}
//			fprintf(fid, "\n");
//		}
//		fclose(fid);

		logInfo("slam finished\n");
	} catch (SL_Exception& e) {
		logInfo(e.what());
	} catch (std::exception& e) {
#ifdef WIN32
		wxMessageBox(e.what());
#endif
		logInfo("%s\n", e.what());
		logInfo("slam failed!\n");
#ifdef WIN32
		wxMessageBox(e.what());
#endif
	}

	logInfo("\nslam stopped!\n");
	return 0;
}
예제 #9
0
void Simulation::Step() {
	// dispatch the new frame, this wraps up the follow Viewer operations:
	//   advance() to the new frame
	//   eventTraversal() that collects events and passes them on to the event handlers and event callbacks
	//   updateTraversal() to calls the update callbacks
	//   renderingTraversals() that runs syncronizes all the rendering threads (if any) and dispatch cull, draw and swap buffers

	int64 tmptime = cvGetTickCount();
	int64 difftime = (tmptime-loop_time_)/cvGetTickFrequency();
//	if(difftime < loop_target_time_){
//		//std::cout<<"sleeping: "<<loop_target_time_-difftime<<std::endl;
//		usleep(loop_target_time_-difftime);
//		return;
//	}
	loop_time_ = tmptime;



	viewer_.advance();
	viewer_.eventTraversal();
	viewer_.updateTraversal();
	viewer_.renderingTraversals();

//	osg::Matrix testmat = viewer_.getView(2)->getCamera()->getProjectionMatrix();
//	testmat = viewer_.getView(2)->getCamera()->getProjectionMatrix();
//	std::cout<<testmat(0,0)<<"  "<<testmat(0,1)<<"  "<<testmat(0,2)<<"  "<<testmat(0,3)<<std::endl;
//	std::cout<<testmat(1,0)<<"  "<<testmat(1,1)<<"  "<<testmat(1,2)<<"  "<<testmat(1,3)<<std::endl;
//	std::cout<<testmat(2,0)<<"  "<<testmat(2,1)<<"  "<<testmat(2,2)<<"  "<<testmat(2,3)<<std::endl;
//	std::cout<<testmat(3,0)<<"  "<<testmat(3,1)<<"  "<<testmat(3,2)<<"  "<<testmat(3,3)<<std::endl;
//	std::cout<<std::endl;




	viewer_.getView(0)->getCamera()->getViewMatrixAsLookAt(view_matrix_eye_, view_matrix_center_, view_matrix_up_, view_matrix_distance_);
	view_matrix_ = viewer_.getView(0)->getCamera()->getViewMatrix();

//	osg::Matrix windowMatrix = viewer_.getView(2)->getCamera()->getViewport()->computeWindowMatrix();
//	osg::Matrix projectionMatrix = viewer_.getView(2)->getCamera()->getProjectionMatrix();
//	osg::Matrix mat = projectionMatrix*windowMatrix;
//	std::cout<<"mat: "<<mat(0,0)<<" "<<mat(1,1)<<" "<<mat(2,0)<<" "<<mat(2,1)<<std::endl;
//	std::cout<<"eye: "<<view_matrix_eye_.x()<<" "<<view_matrix_eye_.y()<<" "<<view_matrix_eye_.z()<<std::endl;

	if(particles_on_){
		// The dynamic update of the particle filter.
		localisation_->dynamic(robotdata_->incremente_left_,robotdata_->incremente_right_);

		// Update of simulations view of particles.
		particle_view_->Update(localisation_->getParticles(), particle_visibility_ratio_);
	}

	//Picture handling
	if(screen_shot_callback_->isPicTaken() && !picture_processed_){
		osg::ref_ptr<osg::Image> osgImage = screen_shot_callback_->getImage();
		cv::Mat cvImg(osgImage->t(), osgImage->s(), CV_8UC3);
		cv::Mat cvCopyImg(osgImage->t(), osgImage->s(), CV_8UC3);
		cvImg.data = (uchar*)osgImage->data();
		cv::flip(cvImg, cvCopyImg, 0); // Flipping because of different origins
		observedImg_ = &cvCopyImg;

		// Write position, orientation and image to log file.
		data_to_file_writer_.WriteData(robotdata_->incremente_left_, robotdata_->incremente_right_,
												robotdata_->x_pos_, view_matrix_eye_[0], localisation_->getPosition().at(0),
												robotdata_->y_pos_, view_matrix_eye_[1], localisation_->getPosition().at(2),
												robotdata_->psi_, view_matrix_(0,0), localisation_->getOrientation().at(1), cvCopyImg);
		Observe();
		//		old version
		//		data_to_file_writer_.WritePlotData(	robotdata_->x_pos_, robotdata_->y_pos_, robotdata_->psi_,
		//											localisation_->getPosition().at(0), localisation_->getPosition().at(2),
		//											localisation_->getPosition().at(1)*3, localisation_->getPosition().at(3)*3, true);
				es_ = localisation_->getEstimatedRobotPose();
				data_to_file_writer_.WritePlotData(	robotdata_->x_pos_, robotdata_->y_pos_,
													es_->x, es_->y,
													es_->sigmaXYLarge, es_->sigmaXYSmall,
													es_->sigmaXYAngle/M_PI*180, true);
		// observe for particle filter is done here.
		picture_processed_ = true;
	}

	else {
		// writes the current position and orientation of the robot to file.
		data_to_file_writer_.WriteData(robotdata_->incremente_left_, robotdata_->incremente_right_,
														robotdata_->x_pos_, view_matrix_eye_[0], localisation_->getPosition().at(0),
														robotdata_->y_pos_, view_matrix_eye_[1], localisation_->getPosition().at(2),
														robotdata_->psi_, view_matrix_(0,0), localisation_->getOrientation().at(1));

		es_ = localisation_->getEstimatedRobotPose();
		data_to_file_writer_.WritePlotData(	robotdata_->x_pos_, robotdata_->y_pos_,
											es_->x, es_->y,
											es_->sigmaXYLarge, es_->sigmaXYSmall,
											es_->sigmaXYAngle/M_PI*180);
	}

	UpdateHUD();

	// without pad_control pictures are taken every takepicture_intervall_ ms
//	if( (!pad_control_on_) && (0.001*(cvGetTickCount()-take_picture_timer_)/cvGetTickFrequency()>takepicture_intervall_) ){
//		std::cout<<"Sim: 'I queued a Shot!'"<<std::endl;
//		screen_shot_callback_->queueShot();
//		picture_processed_ = false;
//		take_picture_timer_ = cvGetTickCount();
//	}

	if(step_counter_ % takepicture_intervall_ == 0){
		screen_shot_callback_->queueShot();
		picture_processed_ = false;
		take_picture_timer_ = cvGetTickCount();
	}

	if(pad_control_on_){
		if(picture_processed_ && !take_picture_button_pressed_ && sixaxes_->buttonPressed(BUTTON_CIRCLE)){
				// Every 2 sec a Shot is queued to get a picture form the scene.
				screen_shot_callback_->queueShot();
				take_picture_button_pressed_ = true;
				picture_processed_ = false;
				take_picture_timer_ = cvGetTickCount();
		}

		if(!sixaxes_->buttonPressed(BUTTON_CIRCLE))
			take_picture_button_pressed_ = false;

		// If Padcontrol is enabled, the inputs from the pad are written to the robotData.
		// That way the callback will use the inputs to move the robot.
		if(sixaxes_->buttonPressed(BUTTON_L1) && sixaxes_->buttonPressed(BUTTON_R1)){
			robotdata_->speed_ = sixaxes_->axis(AXIS_LY)*-0.2;
			robotdata_->psi_speed_ = sixaxes_->axis(AXIS_RX)*-0.05;
		}
		else{
			robotdata_->speed_ = 0.0;
			robotdata_->psi_speed_ = 0.0;
		}
		if(sixaxes_->buttonPressed(BUTTON_TRIANGLE)){
			while(viewer_.areThreadsRunning()){
				viewer_.stopThreading();
			}
			viewer_.setDone(true);
		}
	}

	if(!trajectory_from_file_.empty()){
		double temp = trajectory_from_file_.front();
		if(temp<1){
			//screen_shot_callback_->queueShot();
			//picture_processed_ = false;
		}
		trajectory_from_file_.erase(trajectory_from_file_.begin());

		temp = trajectory_from_file_.front();
		if(temp>0){
			while(viewer_.areThreadsRunning()){
				viewer_.stopThreading();
			}
			viewer_.setDone(true);
		}
		trajectory_from_file_.erase(trajectory_from_file_.begin());

		temp = trajectory_from_file_.front();
		robotdata_->speed_ = temp;
		trajectory_from_file_.erase(trajectory_from_file_.begin());

		temp = trajectory_from_file_.front();
		robotdata_->psi_speed_ = temp;
		trajectory_from_file_.erase(trajectory_from_file_.begin());
	}
	else{
		trajectory_buffer_ << screen_shot_callback_->isPicTaken() << std::endl;
		trajectory_buffer_ << viewer_.done() << std::endl;
		trajectory_buffer_ << robotdata_->speed_ << std::endl;
		trajectory_buffer_ << robotdata_->psi_speed_ << std::endl;
	}
	//trajectory_buffer_ << "------------------" << std::endl;
	step_counter_ ++;
}
예제 #10
0
void getNCCBlocks(const ImgG& scaledImg, Mat_d& pts, PtrVec<NCCBlock>& nccBlks,
		double scale) {
	if (pts.empty())
		return;

	nccBlks.clear();
	nccBlks.reserve(pts.rows * 2);

	int bw = 2 * SL_NCCBLK_HW + 1;

	if (scale == 1.0) {
		cv::Mat cvImg(scaledImg.rows, scaledImg.cols, CV_8UC1, scaledImg.data);
		cv::Size patchSize(bw, bw);
		cv::Mat patch;

		for (int i = 0; i < pts.rows; i++) {
			double x = pts.data[2 * i];
			double y = pts.data[2 * i + 1];
			cv::getRectSubPix(cvImg, patchSize, cv::Point2d(x, y), patch);
			NCCBlock* blk = new NCCBlock(0);
			/* copy image block */
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				blk->I[j] = patch.data[j];
			}
			/* compute NCC parameters */
			double a = 0, b = 0;
			blk->avgI = 0;
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				a += blk->I[j];
				b += double(blk->I[j]) * blk->I[j];
				blk->avgI += blk->I[j];
			}
			blk->A = a;
			blk->B = b;
			blk->C = 1 / sqrt(SL_NCCBLK_LEN * b - a * a);
			blk->avgI /= SL_NCCBLK_LEN;
			nccBlks.push_back(blk);
		}
	} else {
		cv::Mat cvOrgImg(scaledImg.rows, scaledImg.cols, CV_8UC1,
				scaledImg.data);
		cv::Mat cvImg;
		cv::resize(cvOrgImg, cvImg, cv::Size(), scale, scale);
		cv::Size patchSize(bw, bw);
		cv::Mat patch;

		for (int i = 0; i < pts.rows; i++) {
			double x = pts.data[2 * i] * scale;
			double y = pts.data[2 * i + 1] * scale;
			cv::getRectSubPix(cvImg, patchSize, cv::Point2d(x, y), patch);
			NCCBlock* blk = new NCCBlock(0);

			/* copy image block */
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				blk->I[j] = patch.data[j];
			}
			/* compute NCC parameters */
			double a = 0, b = 0;
			blk->avgI = 0;
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				a += blk->I[j];
				b += double(blk->I[j]) * blk->I[j];
				blk->avgI += blk->I[j];
			}
			blk->A = a;
			blk->B = b;
			blk->C = 1 / sqrt(SL_NCCBLK_LEN * b - a * a);
			blk->avgI /= SL_NCCBLK_LEN;
			nccBlks.push_back(blk);
		}
	}
}
예제 #11
0
void getNCCBlocks(const ImgG& scaledImg,
		const std::vector<FeaturePoint*>& vecFeatPts, PtrVec<NCCBlock>& nccBlks,
		double scale) {
	if (vecFeatPts.empty())
		return;

	nccBlks.clear();
	nccBlks.reserve(vecFeatPts.size() * 2);
	int bw = 2 * SL_NCCBLK_HW + 1;

	if (scale == 1.0) {
		cv::Mat cvImg(scaledImg.rows, scaledImg.cols, CV_8UC1, scaledImg.data);
		cv::Size patchSize(bw, bw);
		cv::Mat patch;

		for (size_t i = 0; i < vecFeatPts.size(); i++) {
			FeaturePoint* pFt = vecFeatPts[i];
			cv::getRectSubPix(cvImg, patchSize, cv::Point2d(pFt->xo, pFt->yo),
					patch);
			NCCBlock* blk = new NCCBlock(pFt);
			/* copy image block */
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				blk->I[j] = patch.data[j];
			}
			/* compute NCC parameters */
			double a = 0, b = 0;
			blk->avgI = 0;
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				a += blk->I[j];
				b += double(blk->I[j]) * blk->I[j];
				blk += blk->I[j];
			}
			blk->A = a;
			blk->B = b;
			blk->C = 1 / sqrt(SL_NCCBLK_LEN * b - a * a);
			blk->avgI /= SL_NCCBLK_LEN;
			nccBlks.push_back(blk);
		}
	} else {
		cv::Mat cvOrgImg(scaledImg.rows, scaledImg.cols, CV_8UC1,
				scaledImg.data);
		cv::Mat cvImg;
		cv::resize(cvOrgImg, cvImg, cv::Size(), scale, scale);

		cv::Size patchSize(bw, bw);
		cv::Mat patch;

		for (size_t i = 0; i < vecFeatPts.size(); i++) {
			FeaturePoint* pFt = vecFeatPts[i];
			cv::getRectSubPix(cvImg, patchSize,
					cv::Point2d(pFt->xo * scale, pFt->yo * scale), patch);
			NCCBlock* blk = new NCCBlock(pFt);
			/* copy image block */
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				blk->I[j] = patch.data[j];
			}
			/* compute NCC parameters */
			double a = 0, b = 0;
			blk->avgI = 0;
			for (int j = 0; j < SL_NCCBLK_LEN; ++j) {
				a += blk->I[j];
				b += double(blk->I[j]) * blk->I[j];
				blk->avgI += blk->I[j];
			}
			blk->A = a;
			blk->B = b;
			blk->C = 1 / sqrt(SL_NCCBLK_LEN * b - a * a);
			blk->avgI /= SL_NCCBLK_LEN;
			nccBlks.push_back(blk);
		}
	}
}
예제 #12
0
void drawLine(ImgRGB& img, double x0, double y0, double x1, double y1, uchar r,
		uchar g, uchar b) {

	cv::Mat cvImg(img.h, img.w, CV_8UC3, img.data);
	cv::line(cvImg, cv::Point2d(x0, y0), cv::Point2d(x1, y1),cv::Scalar(r,g,b));
}