示例#1
0
/**
 * Tests that the state machine requests the cruise trajectory after a timeout
 */
TEST_F(StateMachineControlTest, TrajectoryTimeout) {

    StateMachineControl *fsm_control = new StateMachineControl(lcm_, "../TrajectoryLibrary/trajtest/full", "tvlqr-action-out", "state-machine-state", "altitude-reset", false);
    //fsm_control->GetFsmContext()->setDebugFlag(true);

    float altitude = 100;

    SubscribeLcmChannels(fsm_control);

    // force the state machine into autonmous mode
    ForceAutonomousMode();

    // send an obstacle to get it to transition to a new time

    float point[3] = { 17, 11, 3+altitude };
    SendStereoPointTriple(point);

    float point2[3] = { 24, 0, 0+altitude };
    SendStereoPointTriple(point2);

    mav::pose_t msg = GetDefaultPoseMsg();

    lcm_->publish(pose_channel_, &msg);
    ProcessAllLcmMessages(fsm_control);


    // ensure that we have changed trajectories
    EXPECT_EQ_ARM(fsm_control->GetCurrentTrajectory().GetTrajectoryNumber(), 3); // from matlab

    // wait for that trajectory to time out
    int64_t t_start = GetTimestampNow();
    double t = 0;
    while (t < 4.1) {
        usleep(7142); // 1/140 of a second

        msg.utime = GetTimestampNow();

        t = (msg.utime - t_start) / 1000000.0;

        msg.pos[0] = t * 10;

        lcm_->publish(pose_channel_, &msg);
        ProcessAllLcmMessages(fsm_control);
    }

    EXPECT_EQ_ARM(fsm_control->GetCurrentTrajectory().GetTrajectoryNumber(), 0);


    delete fsm_control;

    UnsubscribeLcmChannels();
}
示例#2
0
        mav::pose_t GetDefaultPoseMsg() {
            mav::pose_t msg;

            msg.utime = GetTimestampNow();

            msg.pos[0] = 0;
            msg.pos[1] = 0;
            msg.pos[2] = altitude_;

            msg.vel[0] = 0;
            msg.vel[1] = 0;
            msg.vel[2] = 0;

            msg.orientation[0] = 1;
            msg.orientation[1] = 0;
            msg.orientation[2] = 0;
            msg.orientation[3] = 0;

            msg.rotation_rate[0] = 0;
            msg.rotation_rate[1] = 0;
            msg.rotation_rate[2] = 0;

            msg.accel[0] = 0;
            msg.accel[1] = 0;
            msg.accel[2] = 0;

            return msg;
        }
示例#3
0
        void ForceAutonomousMode() {
            // step 1: ask for a single trajectory
            lcmt::tvlqr_controller_action trajectory_msg;
            trajectory_msg.timestamp = GetTimestampNow();
            trajectory_msg.trajectory_number = 0;
            lcm_->publish("rc-trajectory-commands", &trajectory_msg);

            ProcessAllLcmMessagesNoDelayedUpdate();

            // step 2: send an autonmous mode signal
            lcmt::timestamp autonomous_msg;
            autonomous_msg.timestamp = GetTimestampNow();
            lcm_->publish("state-machine-go-autonomous", &autonomous_msg);

            ProcessAllLcmMessagesNoDelayedUpdate();
        }
示例#4
0
    void AddManyPointsToOctree(StereoOctomap *octomap, float x_in[], float y_in[], float z_in[], int number_of_points, double altitude_offset) {
        lcmt::stereo msg;

        msg.timestamp = GetTimestampNow();

        vector<float> x, y, z;

        for (int i = 0; i < number_of_points; i++) {

            double this_point[3];

            this_point[0] = x_in[i];
            this_point[1] = y_in[i];
            this_point[2] = z_in[i] + altitude_offset;

            double point_transformed[3];
            GlobalToCameraFrame(this_point, point_transformed);

            //std::cout << "Point: (" << point_transformed[0] << ", " << point_transformed[1] << ", " << point_transformed[2] << ")" << std::endl;

            x.push_back(point_transformed[0]);
            y.push_back(point_transformed[1]);
            z.push_back(point_transformed[2]);
        }

        msg.x = x;
        msg.y = y;
        msg.z = z;

        msg.number_of_points = number_of_points;
        msg.video_number = 0;
        msg.frame_number = 0;

        octomap->ProcessStereoMessage(&msg);
    }
示例#5
0
TEST(StereoFilterTest, OnePoint) {
    StereoFilter filter(0.01);

    lcmt::stereo msg;

    msg.timestamp = GetTimestampNow();

    msg.video_number = 3;
    msg.frame_number = 452;

    msg.x.push_back(0);
    msg.y.push_back(0);
    msg.z.push_back(0);

    msg.number_of_points = 1;

    const lcmt::stereo *msg2;
    msg2 = filter.ProcessMessage(msg);

    EXPECT_EQ_ARM(msg2->number_of_points, 0);
    delete msg2;

    msg.frame_number = 453;
    msg2 = filter.ProcessMessage(msg);

    ASSERT_TRUE(msg2->number_of_points == 1);

    EXPECT_EQ_ARM(msg2->x[0], 0);
    EXPECT_EQ_ARM(msg2->y[0], 0);
    EXPECT_EQ_ARM(msg2->z[0], 0);
}
示例#6
0
double TvlqrControl::GetTNow() {

    int64_t delta_t = GetTimestampNow() - t0_;

    // convert to seconds
    return double(delta_t) / 1000000.0;

}
示例#7
0
        void SendStereoManyPointsTriple(vector<float> x_in, vector<float> y_in, vector<float> z_in) {
            lcmt::stereo msg;

            msg.timestamp = GetTimestampNow();

            vector<float> x, y, z;
            vector<unsigned char> grey;

            for (int i = 0; i < (int)x_in.size(); i++) {

                double this_point[3];

                this_point[0] = x_in[i];
                this_point[1] = y_in[i];
                this_point[2] = z_in[i];

                double point_transformed[3];
                GlobalToCameraFrame(this_point, point_transformed);

                //std::cout << "Point: (" << point_transformed[0] << ", " << point_transformed[1] << ", " << point_transformed[2] << ")" << std::endl;

                x.push_back(point_transformed[0]);
                y.push_back(point_transformed[1]);
                z.push_back(point_transformed[2]);
                grey.push_back(0);

                x.push_back(point_transformed[0]);
                y.push_back(point_transformed[1]);
                z.push_back(point_transformed[2]);
                grey.push_back(0);

                x.push_back(point_transformed[0]);
                y.push_back(point_transformed[1]);
                z.push_back(point_transformed[2]);
                grey.push_back(0);
            }

            msg.x = x;
            msg.y = y;
            msg.z = z;
            msg.grey = grey;

            msg.number_of_points = x.size();
            msg.video_number = 0;
            msg.frame_number = 0;

            lcm_->publish("stereo", &msg);

        }
示例#8
0
void TvlqrControl::InitializeState(const mav_pose_t *msg) {

    initial_state_ = PoseMsgToStateEstimatorVector(msg);

    // get the yaw from the initial state

    double rpy[3];

    bot_quat_to_roll_pitch_yaw(msg->orientation, rpy);

    Mz_ = rotz(-rpy[2]);

    t0_ = GetTimestampNow();

    state_initialized_ = true;

}
示例#9
0
TEST(StereoFilterTest, NoHitPoints) {
    StereoFilter filter(0.01);

    lcmt::stereo msg;

    msg.timestamp = GetTimestampNow();

    msg.video_number = 1;
    msg.frame_number = 0;

    msg.x.push_back(3.232);
    msg.y.push_back(10);
    msg.z.push_back(12.13);

    msg.number_of_points = 1;

    const lcmt::stereo *msg2;
    msg2 = filter.ProcessMessage(msg);
    delete msg2;

    msg2 = filter.ProcessMessage(msg);

    ASSERT_TRUE(msg2->number_of_points == 1);
    delete msg2;

    msg.x[0] = 1;

    msg.x.push_back(3.232);
    msg.y.push_back(10);
    msg.z.push_back(12.13);

    msg.number_of_points = 2;

    msg2 = filter.ProcessMessage(msg);

    EXPECT_EQ_ARM(msg2->number_of_points, 1);
    delete msg2;
}
示例#10
0
 double toc() {
     double outval = GetTimestampNow() / 1000000.0 - tictoc_wall_stack.top();
     tictoc_wall_stack.pop();
     return outval;
 }
示例#11
0
 void tic() {
     tictoc_wall_stack.push(GetTimestampNow() / 1000000.0);
 }
示例#12
0
/**
 * Tests an obstacle appearing during a trajectory execution
 */
TEST_F(StateMachineControlTest, TrajectoryInterrupt) {

    StateMachineControl *fsm_control = new StateMachineControl(lcm_, "../TrajectoryLibrary/trajtest/full", "tvlqr-action-out", "state-machine-state", "altitude-reset", false);
    //fsm_control->GetFsmContext()->setDebugFlag(true);

    SubscribeLcmChannels(fsm_control);

    ForceAutonomousMode();

    float altitude = 100;

    // send an obstacle to get it to transition to a new time

    mav::pose_t msg = GetDefaultPoseMsg();

    lcm_->publish(pose_channel_, &msg);
    ProcessAllLcmMessages(fsm_control);

    float point[3] = { 24, 0, 0+altitude };
    SendStereoPointTriple(point);
    ProcessAllLcmMessages(fsm_control);

    lcm_->publish(pose_channel_, &msg);
    ProcessAllLcmMessages(fsm_control);


    // ensure that we have changed trajectories
    EXPECT_EQ_ARM(fsm_control->GetCurrentTrajectory().GetTrajectoryNumber(), 2); // from matlab

    Trajectory running_traj = fsm_control->GetCurrentTrajectory();

    // wait for that trajectory to time out
    int64_t t_start = GetTimestampNow();
    double t = 0;
    while (t < 1.0) {
        usleep(7142); // 1/140 of a second

        msg.utime = GetTimestampNow();

        t = (msg.utime - t_start) / 1000000.0;

        Eigen::VectorXd state_t = running_traj.GetState(t);

        msg.pos[0] = state_t(0);
        msg.pos[1] = state_t(1);
        msg.pos[2] = state_t(2) + altitude;

        msg.vel[0] = state_t(6);
        msg.vel[1] = state_t(7);
        msg.vel[2] = state_t(8);

        double rpy[3];
        rpy[0] = state_t(3);
        rpy[1] = state_t(4);
        rpy[2] = state_t(5);

        double quat[4];
        bot_roll_pitch_yaw_to_quat(rpy, quat);

        msg.orientation[0] = quat[0];
        msg.orientation[1] = quat[1];
        msg.orientation[2] = quat[2];
        msg.orientation[3] = quat[3];

        msg.rotation_rate[0] = state_t(9);
        msg.rotation_rate[1] = state_t(10);
        msg.rotation_rate[2] = state_t(11);


        lcm_->publish(pose_channel_, &msg);
        ProcessAllLcmMessages(fsm_control);
    }

    // now add a new obstacle right in front!
    std::cout << "NEW POINT" << std::endl;
    lcm_->publish(pose_channel_, &msg);
    ProcessAllLcmMessages(fsm_control);

    float point2[3] = { 18, 12, 0+altitude };
    SendStereoPointTriple(point2);
    ProcessAllLcmMessages(fsm_control);

    lcm_->publish(pose_channel_, &msg);
    ProcessAllLcmMessages(fsm_control);

    fsm_control->GetOctomap()->Draw(lcm_->getUnderlyingLCM());
    BotTrans body_to_local;
    bot_frames_get_trans(bot_frames_, "body", "local", &body_to_local);
    fsm_control->GetTrajectoryLibrary()->Draw(lcm_->getUnderlyingLCM(), &body_to_local);

    fsm_control->GetTrajectoryLibrary()->Draw(lcm_->getUnderlyingLCM(), &body_to_local);

    bot_lcmgl_t *lcmgl = bot_lcmgl_init(lcm_->getUnderlyingLCM(), "Trjaectory");
    bot_lcmgl_color3f(lcmgl, 0, 1, 0);
    fsm_control->GetCurrentTrajectory().Draw(lcmgl ,&body_to_local);
    bot_lcmgl_switch_buffer(lcmgl);
    bot_lcmgl_destroy(lcmgl);


    EXPECT_EQ_ARM(fsm_control->GetCurrentTrajectory().GetTrajectoryNumber(), 3); // from matlab

    delete fsm_control;

    UnsubscribeLcmChannels();
}
示例#13
0
文件: tests.cpp 项目: 1ee7/flight
TEST_F(StereoOctomapTest, SimpleNearestNeighbor) {

    StereoOctomap *stereo_octomap = new StereoOctomap(bot_frames_);

    // first test when no points are there

    double origin[3];
    origin[0] = 0;
    origin[1] = 0;
    origin[2] = 0;

    EXPECT_TRUE(stereo_octomap->NearestNeighbor(origin) == -1) << "No points in octomap failed." << std::endl;

    // add a point
    double point[3], trans_point[3];
    point[0] = 1;
    point[1] = 0;
    point[2] = 0;


    GlobalToCameraFrame(point, trans_point);

    lcmt::stereo msg;

    msg.timestamp = GetTimestampNow();

    vector<float> x, y, z;

    x.push_back(trans_point[0]);
    y.push_back(trans_point[1]);
    z.push_back(trans_point[2]);

    msg.x = x;
    msg.y = y;
    msg.z = z;

    msg.number_of_points = 1;
    msg.frame_number = 0;
    msg.video_number = 0;

    stereo_octomap->ProcessStereoMessage(&msg);


    EXPECT_EQ_ARM(stereo_octomap->NearestNeighbor(origin), 1);

    EXPECT_EQ_ARM(stereo_octomap->NearestNeighbor(point), 0);

    double point2[3] = {0, 0, 1};

    EXPECT_NEAR(stereo_octomap->NearestNeighbor(point2), sqrt(2), TOLERANCE);


    lcmt::stereo msg2;
    msg2.timestamp = GetTimestampNow();

    vector<float> x_2, y_2, z_2;

    double trans_point2[3];

    point[0] = 0;
    point[1] = 2;
    point[2] = 0;
    GlobalToCameraFrame(point, trans_point2);

    x_2.push_back(trans_point2[0]);
    y_2.push_back(trans_point2[1]);
    z_2.push_back(trans_point2[2]);

    msg2.x = x_2;
    msg2.y = y_2;
    msg2.z = z_2;

    msg2.number_of_points = 1;
    msg2.frame_number = 0;
    msg2.video_number = 0;

    // add the points to the octomap
    stereo_octomap->ProcessStereoMessage(&msg2);

    // check

    EXPECT_NEAR(stereo_octomap->NearestNeighbor(origin), 1, TOLERANCE);

    double query[3] = { 1.5, 0, 0 };

    EXPECT_NEAR(stereo_octomap->NearestNeighbor(query), 0.5, TOLERANCE);

    query[0] = 1;
    query[1] = 0;
    query[2] = 1;

    // check z
    EXPECT_NEAR(stereo_octomap->NearestNeighbor(query), 1, TOLERANCE);

    query[0] = 0;
    query[1] = 2.01;
    query[2] = 0;

    EXPECT_NEAR(stereo_octomap->NearestNeighbor(query), 0.01, TOLERANCE);

    query[0] = 0;
    query[1] = 2.1;
    query[2] = 0.5;

    EXPECT_NEAR(stereo_octomap->NearestNeighbor(query), sqrt(0.1*0.1 + 0.5*0.5), TOLERANCE);

    query[0] = -1;
    query[1] = 2.5;
    query[2] = 1.5;

    EXPECT_NEAR(stereo_octomap->NearestNeighbor(query), sqrt(1*1 + .5*.5 + 1.5*1.5), TOLERANCE);

    delete stereo_octomap;

}
示例#14
0
文件: tests.cpp 项目: 1ee7/flight
TEST_F(StereoOctomapTest, CheckAgainstLinearSearch) {

    int num_points = 10000;

    vector<float> x;
    vector<float> y;
    vector<float> z;

    StereoOctomap *stereo_octomap = new StereoOctomap(bot_frames_);

    // create a random point cloud

    std::uniform_real_distribution<double> uniform_dist(-1000, 1000);
    std::random_device rd;
    std::default_random_engine rand_engine(rd());

    for (int i = 0; i < num_points; i++) {

        double this_point[3];

        // generate a random point
        this_point[0] = uniform_dist(rand_engine);
        this_point[1] = uniform_dist(rand_engine);
        this_point[2] = uniform_dist(rand_engine);

        //std::cout << "Point: (" << this_point[0] << ", " << this_point[1] << ", " << this_point[2] << ")" << std::endl;

        double translated_point[3];

        GlobalToCameraFrame(this_point, translated_point);

        x.push_back(translated_point[0]);
        y.push_back(translated_point[1]);
        z.push_back(translated_point[2]);
    }

    lcmt::stereo msg;

    msg.timestamp = GetTimestampNow();

    msg.x = x;
    msg.y = y;
    msg.z = z;

    msg.number_of_points = num_points;

    msg.frame_number = 0;
    msg.video_number = 0;

    stereo_octomap->ProcessStereoMessage(&msg);

    // now perform a bunch of searches

    double time_linear = 0, time_octomap = 0;

    int num_searches = 1000;

    for (int i = 0; i < num_searches; i++) {

        double search_point[3];

        // generate a random point
        search_point[0] = uniform_dist(rand_engine);
        search_point[1] = uniform_dist(rand_engine);
        search_point[2] = uniform_dist(rand_engine);

        //std::cout << "search_point: (" << search_point[0] << ", " << search_point[1] << ", " << search_point[2] << ")" << std::endl;


        tic();
        double linear_search_dist = NearestNeighborLinear(x, y, z, search_point);
        time_linear += toc();

        tic();
        double octomap_dist = stereo_octomap->NearestNeighbor(search_point);
        time_octomap += toc();

        EXPECT_NEAR(octomap_dist, linear_search_dist, TOLERANCE);

    }

    std::cout << "\tFor " << num_searches << " searches over " << num_points << " points: " << std:: endl << "\t\tlinear time = " << time_linear << ", octree time = " << time_octomap << ", for a speedup of: " << time_linear / time_octomap << "x" << std::endl;


    delete stereo_octomap;

}