Esempio n. 1
0
void PartFilter::computeDistance(int partition)
{
	std::multimap<int, std::string>::iterator it;
	for (int i=0 ; i<mModels.size() ; i++)
	{
		double distance=0;
		it = mOffsetPartToName[i].find(partition);
		for (it = mOffsetPartToName[i].equal_range(partition).first ; it != mOffsetPartToName[i].equal_range(partition).second ; ++it)
		{
			if (mJointNameToPos[(*it).second] != -1)
			{
				int pos = mJointNameToPos[(*it).second];
				double distTemp=0;
				// Mahalanobis distance
				//cout << (*it).second << "=>" << mPosNames[pos] << endl;
				Eigen::Vector3d jtPos = mModels[i]->getJoint((*it).second)->getXYZVect();
				Eigen::Vector3d jtObs(mCurrentFrame[pos][1], mCurrentFrame[pos][2], mCurrentFrame[pos][3]);
				Eigen::Vector3d diff = jtPos - jtObs;
				Eigen::Matrix3d cov;
				cov.setIdentity();
				distTemp = diff.transpose()*(cov*diff);
				distance += distTemp;
			}
		}
		mCurrentDistances[i] = distance;
	}
}
Esempio n. 2
0
void sdh_moveto_cb(boost::shared_ptr<std::string> data){
    Eigen::Vector3d p,o;
    p.setZero();
    o.setZero();
    p(0) = right_newP_x;
    p(1) = right_newP_y;
    p(2) = right_newP_z;

    Eigen::Vector3d desired_euler;
    Eigen::Matrix3d Ident;
    desired_euler.setZero();
    Ident.setIdentity();
    desired_euler(0) = 0;
    desired_euler(1) = -1*M_PI;
    desired_euler(2) = 0.5*M_PI;

    o = euler2axisangle(desired_euler,Ident);

    mutex_act.lock();
    right_ac_vec.clear();
    right_task_vec.clear();
    right_ac_vec.push_back(new ProActController(*right_pm));
    right_task_vec.push_back(new KukaSelfCtrlTask(RP_NOCONTROL));
    right_task_vec.back()->mt = JOINTS;
    right_task_vec.back()->mft = GLOBAL;
    right_task_vec.back()->set_desired_p_eigen(p);
    right_task_vec.back()->set_desired_o_ax(o);
    mutex_act.unlock();
    std::cout<<"kuka sdh self movement and move to new pose"<<std::endl;

}
Esempio n. 3
0
void PartFilter::computeDistance()
{
	std::map<std::string, int>::iterator it;
	for (int i=0 ; i<mModels.size() ; i++)
	{
		double distance=0;
		for (it = mJointNameToPos.begin() ; it != mJointNameToPos.end() ; it++)
		{
			if ((*it).second != -1)
			{
				// Mahalanobis distance
				Eigen::Vector3d jtPos = mModels[i]->getJoint((*it).first)->getXYZVect();
				Eigen::Vector3d jtObs(mCurrentFrame[(*it).second][1], mCurrentFrame[(*it).second][2], mCurrentFrame[(*it).second][3]);
				Eigen::Vector3d diff = jtPos - jtObs;
				Eigen::Matrix3d cov;
				cov.setIdentity();
				distance += diff.transpose()*(cov*diff);
			}
		}
		mCurrentDistances[i] = sqrt(distance);
	}
}
Esempio n. 4
0
void EKFOA::process(const double delta_t, cv::Mat & frame, Eigen::Vector3d & rW, Eigen::Vector4d & qWR, Eigen::Matrix3d & axes_orientation_and_confidence, std::vector<Point3d> (& XYZs)[3], Delaunay & triangulation, Point3d & closest_point){
	double time_total;
	std::vector<cv::Point2f> features_to_add;
	std::vector<Features_extra> features_extra;

	/*
	 * EKF prediction (state and measurement prediction)
	 */
	time_total = (double)cv::getTickCount();
	double time_prediction = (double)cv::getTickCount();
	filter.predict_state_and_covariance(delta_t);
	filter.compute_features_h(cam, features_extra);
	time_prediction = (double)cv::getTickCount() - time_prediction;
//	std::cout << "predict = " << time_prediction/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;

	/*
	 * Sense and map management (delete features from EKF)
	 */
	double time_tracker = (double)cv::getTickCount();
	motion_tracker.process(frame, features_extra, features_to_add);
	//TODO: Why is optical flow returning points outside the image???
	time_tracker = (double)cv::getTickCount() - time_tracker;

	time_total = time_total + time_tracker; //do not count the time spent by the tracker

	//Delete no longer seen features from the state, covariance matrix and the features_extra:
	double time_del = (double)cv::getTickCount();
	filter.delete_features(features_extra);
	time_del = (double)cv::getTickCount() - time_del;
//	std::cout << "delete  = " << time_del/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;

	/*
	 * EKF Update step and map management (add new features to EKF)
	 */
	double time_update = (double)cv::getTickCount();
	filter.update(cam, features_extra);
	time_update = (double)cv::getTickCount() - time_update;
//	std::cout << "update  = " << time_update/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;


	//Add new features
	double time_add = (double)cv::getTickCount();
	filter.add_features_inverse_depth(cam, features_to_add);
	time_add = (double)cv::getTickCount() - time_add;
//	std::cout << "add_fea = " << time_add/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;


	/*
	 * Triangulation, surface and GUI data setting:
	 */
	double time_triangulation = (double)cv::getTickCount();

	std::vector< std::pair<Point2d, size_t> > triangle_list;
	std::list<Triangle> triangles_list_3d;

	const Eigen::VectorXd & x_k_k = filter.x_k_k();
	const Eigen::MatrixXd & p_k_k = filter.p_k_k();

	//Set the position, so the GUI can draw it:
	rW = x_k_k.segment<3>(0);//current position

	//Set the axes orientation and confidence:
	axes_orientation_and_confidence.setIdentity();//axes_orientation_and_confidence stores in each column one axis (X, Y, Z)
	axes_orientation_and_confidence *= 5; //make the lines larger, so they are actually informative
	//Apply rotation matrix:
	Eigen::Matrix3d qWR_R;//Rotation matrix of current orientation quaternion
	qWR = x_k_k.segment<4>(3);
	MotionModel::quaternion_matrix(qWR, qWR_R);
	axes_orientation_and_confidence.applyOnTheLeft(qWR_R); // == R * axes_orientation_and_confidence
	for (int axis=0 ; axis<axes_orientation_and_confidence.cols() ; axis++){
		//Set the length to be 3*sigma:
		axes_orientation_and_confidence.col(axis) *= 3*std::sqrt(p_k_k(axis, axis)); //the first 3 positions of the cov matrix define the confidence for the position
		//Translate origin:
		axes_orientation_and_confidence.col(axis) += rW;
	}

	int num_features = (x_k_k.rows()-13)/6;
	XYZs[0].resize(num_features);
	XYZs[1].resize(num_features);
	XYZs[2].resize(num_features);


	//Compute the 3d positions and inverse depth variances of all the points in the state
	int i=0; //Feature counter
	for (int start_feature=13 ; start_feature<x_k_k.rows() ; start_feature+=6){
		const int feature_inv_depth_index = start_feature + 5;

		//As with any normal distribution, nearly all (99.73%) of the possible depths lie within three standard deviations of the mean!
		const double sigma_3 = std::sqrt(p_k_k(feature_inv_depth_index, feature_inv_depth_index)); //sqrt(depth_variance)

		const Eigen::VectorXd & yi = x_k_k.segment(start_feature, 6);
		Eigen::VectorXd point_close(x_k_k.segment(start_feature, 6));
		Eigen::VectorXd point_far(x_k_k.segment(start_feature, 6));

		//Change the depth of the feature copy, so that it is possible to represent the range between -3*sigma and 3*sigma:
		point_close(5) += sigma_3;
		point_far(5) -= sigma_3;

		Eigen::Vector3d XYZ_mu = (Feature::compute_cartesian(yi)); //mu (mean)
		Eigen::Vector3d XYZ_close = (Feature::compute_cartesian(point_close)); //mean + 3*sigma. (since inverted signs are also inverted)
		Eigen::Vector3d XYZ_far = (Feature::compute_cartesian(point_far)); //mean - 3*sigma

		//The center of the model is ALWAYS the current position of the camera/robot, so have to 'cancel' the current orientation (R_inv) and translation (rWC = x_k_k.head(3)):
		//Note: It is nicer to do this in the GUI class, as it is only a presention/perspective change. But due to the structure, it was easier to do it here.
		XYZs[0][i] = Point3d(XYZ_mu(0), XYZ_mu(1), XYZ_mu(2)); //mu (mean)
		XYZs[1][i] = Point3d(XYZ_close(0), XYZ_close(1), XYZ_close(2)); //mean + 3*sigma. (since inverted signs are also inverted)
		XYZs[2][i] = Point3d(XYZ_far(0), XYZ_far(1), XYZ_far(2)); //mean - 3*sigma

		//If the size that contains the 99.73% of the inverse depth distribution is smaller than the current inverse depth, add it to the surface:
		const double size_sigma_3 = std::abs(1.0/(x_k_k(feature_inv_depth_index)-sigma_3) - 1.0/(x_k_k(feature_inv_depth_index)+sigma_3));
		if (size_sigma_3 < 1/x_k_k(feature_inv_depth_index)){
			triangle_list.push_back(std::make_pair(Point2d(features_extra[i].z(0), features_extra[i].z(1)), i));
		}

		if (x_k_k(feature_inv_depth_index) < 0 ){
			std::cout << "feature behind the camera!!! : idx=" << i << ", value=" << x_k_k(feature_inv_depth_index) << std::endl;
		}
		i++;
	}

	triangulation.insert(triangle_list.begin(), triangle_list.end());

	cv::Scalar delaunay_color = cv::Scalar(255, 0, 0); //blue
	for(Delaunay::Finite_faces_iterator fit = triangulation.finite_faces_begin(); fit != triangulation.finite_faces_end(); ++fit) {
		const Delaunay::Face_handle & face = fit;
		//face->vertex(i)->info() = index of the point in the observation list.
		line(frame, features_extra[face->vertex(0)->info()].z_cv, features_extra[face->vertex(1)->info()].z_cv, delaunay_color, 1);
		line(frame, features_extra[face->vertex(1)->info()].z_cv, features_extra[face->vertex(2)->info()].z_cv, delaunay_color, 1);
		line(frame, features_extra[face->vertex(2)->info()].z_cv, features_extra[face->vertex(0)->info()].z_cv, delaunay_color, 1);

		//Add the face of the linked 3d points of this 2d triangle:
		triangles_list_3d.push_back(Triangle(XYZs[1][face->vertex(0)->info()], XYZs[1][face->vertex(1)->info()], XYZs[1][face->vertex(2)->info()])); //XYZs[1] == close
	}

	// constructs AABB tree
	Tree tree(triangles_list_3d.begin(), triangles_list_3d.end());

	if (tree.size()>0){
		// compute closest point and squared distance
		Point3d point_query(rW[0], rW[1], rW[2]);
		closest_point = tree.closest_point(point_query);
//		FT sqd = tree.squared_distance(point_query);

		Eigen::Vector3d last_displacement_vector = last_position - rW;

//		double repealing_force = 0;
//		if (std::sqrt(sqd) < 0.2){
//			std::cout << "can crash! " << std::endl;
//			repealing_force = 1/std::sqrt(sqd);
//		}
//		std::cout << "distance = [distance, " << std::sqrt(sqd) << "];" << std::endl;
//		std::cout << "repealing_force = [repealing_force, " << repealing_force << "];" << std::endl;
	}


	//remember this position
	last_position = rW;
//	std::cout << "certaint= " << p_k_k.diagonal().sum() << std::endl;

	time_triangulation = (double)cv::getTickCount() - time_triangulation;
//	std::cout << "Triang  = " << time_triangulation/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;

	time_total = (double)cv::getTickCount() - time_total;
//	std::cout << "EKF     = " << time_total/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;

//	std::cout << "tracker = " << time_tracker/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;
}
Esempio n. 5
0
int Hsh::loadData(FILE* file, int width, int height, int basisTerm, bool urti, CallBackPos * cb,const QString& text)
{
	type = "HSH";
	w = width;
	h = height;

	ordlen = basisTerm;
	bands = 3;
	fread(gmin, sizeof(float), basisTerm, file);
	fread(gmax, sizeof(float), basisTerm, file);

	if (feof(file))
		return -1;

	int offset = 0;

	int size = w * h * basisTerm;
	float* redPtr = new float[size];
	float* greenPtr = new float[size];
	float* bluePtr = new float[size];
	
	unsigned char c;

	if (!urti)
	{
		for(int j = 0; j < h; j++)
		{
			if (cb != NULL && j % 50 == 0)(*cb)(j * 50.0 / h, text);
			for(int i = 0; i < w; i++)
			{				
				offset = j * w + i;
				for (int k = 0; k < basisTerm; k++)
				{
					if (feof(file))
						return -1;
					fread(&c, sizeof(unsigned char), 1, file);
					redPtr[offset*basisTerm + k] = (((float)c) / 255.0) * (gmax[k] - gmin[k]) + gmin[k];
				}
				for (int k = 0; k < basisTerm; k++)
				{
					if (feof(file))
						return -1;
					fread(&c, sizeof(unsigned char), 1, file);
					greenPtr[offset*basisTerm + k] = (((float)c) / 255.0) * (gmax[k] - gmin[k]) + gmin[k];
				}
				for (int k = 0; k < basisTerm; k++)
				{
					if (feof(file))
						return -1;
					fread(&c, sizeof(unsigned char), 1, file);
					bluePtr[offset*basisTerm + k] = (((float)c) / 255.0) * (gmax[k] - gmin[k]) + gmin[k];
				}
			}
		}
	}
	else
	{
		for(int j = 0; j < h; j++)
		{
			if (cb != NULL && j % 50 == 0)(*cb)(j * 50 / h, text);
			for(int i = 0; i < w; i++)
			{				
				offset = j * w + i;
				for (int k = 0; k < basisTerm; k++)
				{
					if (feof(file))
						return -1;
					fread(&c, sizeof(unsigned char), 1, file);
					redPtr[offset*basisTerm + k] = (((float)c) / 255.0) * gmin[k] + gmax[k];
				}
				for (int k = 0; k < basisTerm; k++)
				{
					if (feof(file))
						return -1;
					fread(&c, sizeof(unsigned char), 1, file);
					greenPtr[offset*basisTerm + k] = (((float)c) / 255.0) * gmin[k] + gmax[k];
				}
				for (int k = 0; k < basisTerm; k++)
				{
					if (feof(file))
						return -1;
					fread(&c, sizeof(unsigned char), 1, file);
					bluePtr[offset*basisTerm + k] = (((float)c) / 255.0) * gmin[k] + gmax[k];
				}
			}
		}
	}
	
	fclose(file);

	mipMapSize[0] = QSize(w, h);

	redCoefficients.setLevel(redPtr, size, 0);
	greenCoefficients.setLevel(greenPtr, size, 0);
	blueCoefficients.setLevel(bluePtr, size, 0);
	
	// Computes mip-mapping.
	if (cb != NULL)	(*cb)(50, "Mip mapping generation...");
	
	for (int level = 1; level < MIP_MAPPING_LEVELS; level++)
	{
		int width = mipMapSize[level - 1].width();
		int height = mipMapSize[level - 1].height();
		int width2 = ceil(width / 2.0);
		int height2 = ceil(height / 2.0);
		size = width2*height2*basisTerm;
		redCoefficients.setLevel(new float[size], size, level);
		greenCoefficients.setLevel(new float[size], size, level);
		blueCoefficients.setLevel(new float[size], size, level);
		int th_id;
		#pragma omp parallel for
		for (int i = 0; i < height - 1; i+=2)
		{
			th_id = omp_get_thread_num();
			if (th_id == 0)
			{
				if (cb != NULL && i % 50 == 0)	(*cb)(50 + (level-1)*8 + i*8.0/height, "Mip mapping generation...");
			}
			for (int j = 0; j < width - 1; j+=2)
			{
				int index1 = (i * width + j)*ordlen;
				int index2 = (i * width + j + 1)*ordlen;
				int index3 = ((i + 1) * width + j)*ordlen;
				int index4 = ((i + 1) * width + j + 1)*ordlen;
				int offset = (i/2 * width2 + j/2)*ordlen;
				for (int k = 0; k < basisTerm; k++)
				{
					redCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k, index3 + k , index4 + k);
					greenCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k, index3 + k , index4 + k);
					blueCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k, index3 + k , index4 + k);
				}
			}
		}
		if (width2 % 2 != 0)
		{
			for (int i = 0; i < height - 1; i+=2)
			{
				int index1 = ((i + 1) * width - 1)*ordlen;
				int index2 = ((i + 2) * width - 1)*ordlen;
				int offset = ((i/2 + 1) * width2 - 1)*ordlen;
				for (int k = 0; k < basisTerm; k++)
				{
					redCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
					greenCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
					blueCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
				}
			}
		}
		if (height % 2 != 0)
		{
			for (int i = 0; i < width - 1; i+=2)
			{
				int index1 = ((height - 1) * width + i)*ordlen;
				int index2 = ((height - 1) * width + i + 1)*ordlen;
				int offset = ((height2 - 1) * width2 + i/2)*ordlen;
				for (int k = 0; k < basisTerm; k++)
				{
					redCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
					greenCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
					blueCoefficients.calcMipMapping(level, offset + k, index1 + k, index2 + k);
				}
			}
		}
		if (height % 2 != 0 && width % 2 != 0)
		{
			int index1 = (height*width - 1)*ordlen;
			int offset = (height2*width2 - 1)*ordlen;
			for (int k = 0; k < basisTerm; k++)
			{
				redCoefficients.calcMipMapping(level, offset + k, index1 + k);
				greenCoefficients.calcMipMapping(level, offset + k, index1 + k);
				blueCoefficients.calcMipMapping(level, offset + k, index1 + k);
			}
		}
		mipMapSize[level] = QSize(width2, height2);
	}

	//Compute normals.
	if (cb != NULL) (*cb)(75 , "Normals generation...");
	Eigen::Vector3d l0(sin(M_PI/4)*cos(M_PI/6), sin(M_PI/4)*sin(M_PI/6), cos(M_PI/4));
	Eigen::Vector3d l1(sin(M_PI/4)*cos(5*M_PI / 6), sin(M_PI/4)*sin(5*M_PI / 6), cos(M_PI/4));
	Eigen::Vector3d l2(sin(M_PI/4)*cos(3*M_PI / 2), sin(M_PI/4)*sin(3*M_PI / 2), cos(M_PI/4));
    float hweights0[16], hweights1[16], hweights2[16];
	getHSH(M_PI / 4, M_PI / 6, hweights0, ordlen);
	getHSH(M_PI / 4, 5*M_PI / 6, hweights1, ordlen);
	getHSH(M_PI / 4, 3*M_PI / 2, hweights2, ordlen);
	
	
	Eigen::Matrix3d L;
	L.setIdentity();
	L.row(0) = l0;
	L.row(1) = l1;
	L.row(2) = l2;
	Eigen::Matrix3d LInverse = L.inverse();
	
	for (int level = 0; level < MIP_MAPPING_LEVELS; level++)
	{
		const float* rPtr = redCoefficients.getLevel(level);
		const float* gPtr = greenCoefficients.getLevel(level);
		const float* bPtr = blueCoefficients.getLevel(level);
		vcg::Point3f* temp = new vcg::Point3f[mipMapSize[level].width()*mipMapSize[level].height()];
		if (cb != NULL) (*cb)(75 + level*6, "Normal generation...");

		#pragma omp parallel for
		for (int y = 0; y < mipMapSize[level].height(); y++)
		{
			for (int x = 0; x < mipMapSize[level].width(); x++)
			{
				int offset= y * mipMapSize[level].width() + x;
				Eigen::Vector3d f(0, 0, 0);
				for (int k = 0; k < ordlen; k++)
				{
					f(0) += rPtr[offset*ordlen + k] * hweights0[k];
					f(1) += rPtr[offset*ordlen + k] * hweights1[k];
					f(2) += rPtr[offset*ordlen + k] * hweights2[k];
				}
				for (int k = 0; k < ordlen; k++)
				{
					f(0) += gPtr[offset*ordlen + k] * hweights0[k];
					f(1) += gPtr[offset*ordlen + k] * hweights1[k];
					f(2) += gPtr[offset*ordlen + k] * hweights2[k];
				}
				for (int k = 0; k < ordlen; k++)
				{
					f(0) += bPtr[offset*ordlen + k] * hweights0[k];
					f(1) += bPtr[offset*ordlen + k] * hweights1[k];
					f(2) += bPtr[offset*ordlen + k] * hweights2[k];
				}
				f /= 3.0;
				Eigen::Vector3d normal = LInverse * f;
				temp[offset] = vcg::Point3f(normal(0), normal(1), normal(2));
				temp[offset].Normalize();
			}
		}
		normals.setLevel(temp, mipMapSize[level].width()*mipMapSize[level].height(), level);
	}
	

	return 0;

}