virtual void calc() { if( fn_bind() ) // avoid GPF! { if( fn_calc() ) // check or recalc input { glm::mat4 camera_world_position(1.f); // camera world position glm::mat4 camera_view(1.f); // camera view transformation // load camera world position from gdom "wpos" set_glm_mat4_from_m4(camera_world_position, mp_wpos); // init glm-variable from gx-gdom-field // GetViewMatrix() const float* cwp = glm::value_ptr(camera_world_position); // used as float[16] glm::vec3 camera_position(cwp[12], cwp[13], cwp[14]); // cwp translation; glm::vec3 z(cwp[ 2], cwp[ 6], cwp[10]); // cwp z-vector; glm::vec3 look_at_point = camera_position + z; glm::vec3 up_vector (cwp[ 1], cwp[ 5], cwp[ 9]); // cwp y-vector; camera_view = glm::lookAt(camera_position, look_at_point, up_vector); // save camera_view to gdom "view" set_m4_from_glm_mat4(mp_view, camera_view); // reset state to GOOD return gx::fn::calc_success(); } else { return gx::fn::fn_calc_failed(); } } else { return gx::fn::fn_bind_failed(); } }
void scan_callback(const sensor_msgs::PointCloud2ConstPtr &cloud_msg){ //local variables double likelihood=0.0; double height=0.0; int id=1; Fuzzy::FuzzyInstance instance; std::vector<pcl::PointCloud<PointT> >::iterator cluster_iter; visualization_msgs::MarkerArray marker_array; impera_shape_msgs::CylinderShapeBag cylinder_shape_bag; pcl::ModelCoefficients coefficients; pcl::PointCloud<PointT> input_cloud;// the raw input point std::vector<pcl::PointCloud<PointT> > clusters; Fuzzy::FuzzyDLProducer producer; /////////////////////////////////////////////// //take the same frame id as the original cloud; if (cloud_msg->header.frame_id.empty()) frame_id_="/base_link"; else frame_id_=cloud_msg->header.frame_id; pcl::fromROSMsg(*cloud_msg, input_cloud); ROS_INFO("Cylinder segmentation received %d points",(int)input_cloud.size()); //clusterize point cloud before applying Ransac this->segment_clusters(boost::make_shared<pcl::PointCloud<PointT> >(input_cloud), clusters); data_file_.open("cluster_features.txt"); for (cluster_iter=clusters.begin();cluster_iter!=clusters.end();cluster_iter++){ impera_shape_msgs::CylinderShape cylinder_shape; PointT maxPointOnAxis; PointT minPointOnAxis; likelihood=get_cylinder_likelihood(boost::make_shared<pcl::PointCloud<PointT> >(*cluster_iter),coefficients); if (coefficients.values.size()!=7){ //check if model is found otherwise continue; continue; } //Debug coefficients //std::cout << "Coefficients: "<<coefficients<<std::endl; PointT pointOnAxis(coefficients.values[0],coefficients.values[1],coefficients.values[2]); tf::Vector3 axis_vector(coefficients.values[3],coefficients.values[4],coefficients.values[5]); height=get_cylinder_height(boost::make_shared<pcl::PointCloud<PointT> >(*cluster_iter),&maxPointOnAxis, &minPointOnAxis, coefficients); //calculate the Orientation of the Cylinder in Quaternions tf::Vector3 up_vector(0.0, 0.0, 1.0); tf::Vector3 right_vector = axis_vector.cross(up_vector); right_vector.normalize(); tf::Quaternion q(right_vector, -1.0*acos(axis_vector.dot(up_vector))); q.normalize(); geometry_msgs::Quaternion cylinder_orientation; geometry_msgs::Point cylinder_position; geometry_msgs::Pose label_pose; label_pose.orientation.w = 1.0; tf::quaternionTFToMsg(q, cylinder_orientation); cylinder_position.x=(maxPointOnAxis.x+minPointOnAxis.x)/2.0; cylinder_position.y=(maxPointOnAxis.y+minPointOnAxis.y)/2.0; cylinder_position.z=(maxPointOnAxis.z+minPointOnAxis.z)/2.0; label_pose.position.x=maxPointOnAxis.x; label_pose.position.y=maxPointOnAxis.y; label_pose.position.z=maxPointOnAxis.z+0.30; if (likelihood>=min_likelihood_){ visualization_msgs::Marker marker; visualization_msgs::Marker label; marker.ns = std::string("cylinders"); marker.header.frame_id = frame_id_ ; marker.id = id; marker.type = visualization_msgs::Marker::CYLINDER; marker.action = visualization_msgs::Marker::ADD; marker.scale.x = coefficients.values[6]*2.0; marker.scale.y = coefficients.values[6]*2.0; marker.scale.z = height; marker.color.g = 0.5; marker.color.r = 0.5; marker.color.b = 0.0; marker.color.a = 0.8; marker.pose.orientation=cylinder_orientation; marker.pose.position=cylinder_position; marker.lifetime = ros::Duration(10); marker.header.stamp = ros::Time::now(); label.ns = std::string("labels"); label.header.frame_id = frame_id_ ; label.id = id; label.type = visualization_msgs::Marker::TEXT_VIEW_FACING; label.action = visualization_msgs::Marker::ADD; label.text=std::string("Object "+boost::lexical_cast<std::string>(id)); label.scale.x = 0.2; label.scale.y = 0.2; label.scale.z = 0.2; label.color.g = 1.0; label.color.r = 1.0; label.color.b = 0.0; label.color.a = 1.0; label.pose=label_pose; label.lifetime = ros::Duration(10); label.header.stamp = ros::Time::now(); marker_array.markers.push_back(marker); marker_array.markers.push_back(label); //TODO send correct features as a shape cylinder_shape.height=height; //Take the base of the object as the position for the reasoner. It is important //where the object is standing cylinder_shape.position.x=cylinder_position.x; cylinder_shape.position.y=cylinder_position.y; cylinder_shape.position.z=cylinder_position.z; cylinder_shape.orientation.x=1; cylinder_shape.orientation.y=0; cylinder_shape.orientation.z=0; cylinder_shape.likelihood=likelihood; cylinder_shape.radius= coefficients.values[6]; cylinder_shape_bag.cylinder_shape_bag.push_back(cylinder_shape); instance.name=std::string("cylinder"+boost::lexical_cast<std::string>(id)); instance.hasCylinderType=likelihood; instance.hasCylinderHeight=cylinder_shape.height; instance.hasCylinderRadius=cylinder_shape.radius; instance.hasX=cylinder_shape.position.x; instance.hasY=cylinder_shape.position.y; instance.hasZ=cylinder_shape.position.z; instance.isOthogonal=(1.0-std::max(fabs(tf::Vector3(1.0,0.0,0).dot(axis_vector)),fabs(tf::Vector3(0.0,1.0,0).dot(axis_vector)))); instance.isParallel=std::max(fabs(tf::Vector3(1.0,0.0,0).dot(axis_vector)),fabs(tf::Vector3(0.0,1.0,0).dot(axis_vector))); //instance.isOthogonal=1.0-tf::Vector3(1.0,1.0,0).dot(axis_vector); //instance.isParallel=tf::Vector3(1.0,1.0,0).dot(axis_vector); producer.AddInstance(instance); //write extracted features data_file_<<"Object Nr: "<<id<<std::endl; data_file_<<"Height: "<<cylinder_shape.height<<std::endl; data_file_<<"Radius: "<<cylinder_shape.radius<<std::endl; data_file_<<"Position: "<<cylinder_shape.position.x<<","<<cylinder_shape.position.y<<","<<cylinder_shape.position.z<<std::endl; data_file_<<"Orientation: "<<axis_vector.getX()<<","<<axis_vector.getY()<<","<<axis_vector.getZ()<<std::endl; data_file_<<"##############################"<<std::endl; id++; pub_cylinder_marker_.publish(marker_array); } pub_cylinder_marker_.publish(marker_array); //_pub_cylinder_bag.publish(cylinder_shape_bag); } data_file_.close(); //Save the instances to a file for later reasoning //file has to be saved under the same name in any case if (std::getenv("ROS_HOME")==NULL){ producer.GenerateKB(std::string(std::getenv("HOME"))+std::string("/.ros/")+std::string(Fuzzy::CYLINDER_KB_FILENAME)); } else producer.GenerateKB(std::string(std::getenv("ROS_HOME"))+std::string("/")+std::string(Fuzzy::CYLINDER_KB_FILENAME)); /* //Message call to get the likelihood of the cylinders reasoner_msgs::GetCylinders getCylinders; reasoner_client_.call(getCylinders); std::vector<reasoner_msgs::Reasoner>::iterator iter=getCylinders.response.result.begin(); while (iter!=getCylinders.response.result.end()){ std::cout << (*iter).name<<": "<<(*iter).likelihood <<std::endl; iter++; } */ }
KFbxXMatrix FilmboxNode::GetWorldTransform( int frame ) const { KFbxXMatrix value; value.SetIdentity(); if( m_node ) { KFbxScene* scene = m_node->GetScene(); KFbxAnimEvaluator* evaluator = scene->GetEvaluator(); KTime frame_time; if( frame == 0xFFFFFFFF ) { frame_time = KTIME_INFINITE; } else { frame_time.SetTime( 0, 0, 0, frame ); } value = evaluator->GetNodeGlobalTransform( m_node, frame_time ); if( m_type == MCE::FBX::SceneObjectType::CAMERA ) { KFbxNode* fbx_target_node = m_node->GetTarget(); if( fbx_target_node ) { // For target cameras we need to manually replace the rotation to point towards the target KFbxVector4 camera_position = value.GetT(); MCE::FBX::FilmboxNode mce_target_node( fbx_target_node ); KFbxXMatrix target_transform = mce_target_node.GetWorldTransform( frame ); KFbxVector4 target_position = target_transform.GetT(); KFbxVector4 up_vector( 0.0, 1.0, 0.0, 0.0 ); KFbxVector4 look_axis = -( target_position - camera_position ); look_axis.Normalize(); KFbxVector4 right_axis = up_vector.CrossProduct( look_axis ); KFbxVector4 up_axis = look_axis.CrossProduct( right_axis ); KFbxMatrix rotation_matrix; rotation_matrix.SetRow( 0, right_axis ); rotation_matrix.SetRow( 1, up_axis ); rotation_matrix.SetRow( 2, look_axis ); rotation_matrix.SetRow( 3, KFbxVector4( 0.0, 0.0, 0.0, 1.0 ) ); KFbxQuaternion camera_rotation; double determinant; rotation_matrix.GetElements( KFbxVector4(), camera_rotation, KFbxVector4(), KFbxVector4(), determinant ); KFbxVector4 camera_scale( 1.0, 1.0, 1.0, 0.0 ); value.SetTQS( camera_position, camera_rotation, camera_scale ); } else { // Even though the SDK docs say that GetNodeGlobalTransform takes all transforms into account // For cameras it appears that you have to manually apply post-rotation KFbxVector4 post_rotation = m_node->GetPostRotation( KFbxNode::eSOURCE_SET ); KFbxXMatrix fbx_to_mce_camera; fbx_to_mce_camera.SetR( post_rotation ); value *= fbx_to_mce_camera; } } } return value; }
void findCameraPositions(drawing_3d_t &drawing, int case_index) { vector_t v_min = drawing.getWcsMin(); vector_t v_max = drawing.getWcsMax(); double x_min = v_min.getX(); double y_min = v_min.getY(); double z_min = v_min.getZ(); double x_max = v_max.getX(); double y_max = v_max.getY(); double z_max = v_max.getZ(); double e_x, e_y, e_z, a_x, a_y, a_z, up_x, up_y, up_z, L, R, T, B, N, F; if (case_index == 4 || case_index == 7){ //dcs, clipped dcs. e_x = (x_max + x_min)/2; e_y = (y_max + y_min)/2; e_z = 10; a_x = e_x; a_y = e_y; a_z = 0; up_x = 0; up_y = 1; up_z = 0; L = (x_min - x_max); R = (x_max - x_min); T = (y_max - y_min); B = (y_min - y_max); if(R > T) { T = R ; B = -T; } else { R = T; L = -R; } N = 5; F = 15; } else { e_x = (x_max + x_min)/2; e_y = 2*y_max - y_min; e_z = (z_max + z_min)/2; a_x = e_x; a_y = (y_max + y_min)/2; a_z = e_z; up_x = 1; up_y = 0, up_z = 0; L = (z_min - z_max); R = (z_max - z_min); T = (x_max - x_min); B = (x_min - x_max); if(R > T) { T = R ; B = -T; } else { R = T; L = -R; } N = (y_max-y_min)/2; F = N*5; } vector_t eye(e_x, e_y, e_z); vector_t lookat(a_x, a_y, a_z); vector_t up_vector(up_x, up_y, up_z); setCameraParameters(drawing, eye, lookat, up_vector, L, R, T, B, N, F); }