int main(int argc, char* argv[]) { #pragma omp master { #ifdef _OPENMP int nthreads = omp_get_num_threads(); std::cout << "Using OpenMP - There are " << nthreads << " threads" << std::endl; #else std::cout << "Not using OpenMP" << '\n'; #endif } // ------------------------------------------------------------------------------------- // Create "tiy_log/" subdirectory (win) or "/home/<username>/tiy_log/" (linux) // ------------------------------------------------------------------------------------- std::string log_file_directory = "tiy_log/"; #ifdef WIN32 #else log_file_directory = std::string(getpwuid(getuid())->pw_dir) + "/" + log_file_directory; #endif boost::filesystem::path dir_path(log_file_directory); if (!boost::filesystem::is_directory(dir_path) && !boost::filesystem::create_directory(dir_path)) { std::cerr << "Could not create log subdirectory." << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } // ------------------------------------------------------------------------------------- // Input ARG // ------------------------------------------------------------------------------------- char *arg_camera_config_file = (char *)"config_camera.xml"; char *arg_object_config_file = (char *)"config_object.xml"; char *arg_run_parameter_config_file = (char *)"config_run_parameters.xml"; if (argc == 1) { std::cerr << "USING DEFAULT CONFIG FILES: config_camera.xml config_object.xml config_run_parameters.xml" << std::endl; } else if (argc!=1 && argc != 4) { std::cerr << "Usage: server <camera_config_file> <object_config_file> <run_parameters_config_file>" << std::endl; std::cerr << "default: server config_camera.xml config_object.xml config_run_parameters.xml" << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } else { arg_camera_config_file = argv[0]; arg_object_config_file = argv[1]; arg_run_parameter_config_file = argv[2]; } // ------------------------------------------------------------------------------------- // Get Run Parameters from XML Config File // ------------------------------------------------------------------------------------- cv::FileStorage input_file_storage; if (!input_file_storage.open(arg_run_parameter_config_file, cv::FileStorage::READ)) { std::cerr << "could NOT open " << arg_run_parameter_config_file << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } int do_use_kalman_filter=-1, do_interactive_mode=-1, multicast_port=-1, do_show_graphics=-1, do_output_debug=-1, do_output_2D=-1, do_output_3D=-1, do_output_object=-1, do_output_virt_point=-1, do_log_2D=-1, do_log_3D=-1, do_log_object=-1, do_log_virt_point=-1, do_log_video=-1, do_log_frame=-1, do_send_object_pose=-1, do_send_virt_point_pose=-1; do_use_kalman_filter = (int)input_file_storage["do_use_kalman_filter"]; do_interactive_mode = (int)input_file_storage["do_interactive_mode"]; multicast_port = (int)input_file_storage["multicast_port"]; do_show_graphics = (int)input_file_storage["do_show_graphics"]; do_output_debug = (int)input_file_storage["do_output_debug"]; do_output_2D = (int)input_file_storage["do_output_2D"]; do_output_3D = (int)input_file_storage["do_output_3D"]; do_output_object = (int)input_file_storage["do_output_object"]; do_output_virt_point = (int)input_file_storage["do_output_virt_point"]; do_log_2D = (int)input_file_storage["do_log_2D"]; do_log_3D = (int)input_file_storage["do_log_3D"]; do_log_object = (int)input_file_storage["do_log_object"]; do_log_virt_point = (int)input_file_storage["do_log_virt_point"]; do_log_video = (int)input_file_storage["do_log_video"]; do_log_frame = (int)input_file_storage["do_log_frame"]; do_send_object_pose = (int)input_file_storage["do_send_object_pose"]; do_send_virt_point_pose = (int)input_file_storage["do_send_virt_point_pose"]; std::string multicast_adress = (std::string)input_file_storage["multicast_adress"]; std::string input_device_src = (std::string)input_file_storage["input_device_src"]; // (m: Mouse, k: Keyboard) std::string mouse_device_id = (std::string)input_file_storage["mouse_device_id"]; std::string keyboard_device_id = (std::string)input_file_storage["keyboard_device_id"]; std::string input_src = (std::string)input_file_storage["input_src"]; // (b: Basler Camera, o: OpenCV Camera, v: Video files, t: 2D point files) std::string video_left = (std::string)input_file_storage["video_left"]; std::string video_right = (std::string)input_file_storage["video_right"]; std::string points_2D_left = (std::string)input_file_storage["points_2D_left"]; std::string points_2D_right = (std::string)input_file_storage["points_2D_right"]; std::string log_points_2D_left = log_file_directory + (std::string)input_file_storage["log_points_2D_left"]; std::string log_points_2D_right = log_file_directory + (std::string)input_file_storage["log_points_2D_right"]; std::string log_points_3D = log_file_directory + (std::string)input_file_storage["log_points_3D"]; std::string log_object_pose = log_file_directory + (std::string)input_file_storage["log_object_pose"]; std::string log_virt_point_pose = log_file_directory + (std::string)input_file_storage["log_virt_point_pose"]; std::string log_video_left = log_file_directory + (std::string)input_file_storage["log_video_left"]; std::string log_video_right = log_file_directory + (std::string)input_file_storage["log_video_right"]; std::string log_frame_left_prefix = log_file_directory + (std::string)input_file_storage["log_frame_left_prefix"]; std::string log_frame_right_prefix = log_file_directory + (std::string)input_file_storage["log_frame_right_prefix"]; input_file_storage.release(); if (do_use_kalman_filter==-1 || do_interactive_mode==-1 || multicast_port==-1 || do_show_graphics==-1 || do_output_debug==-1 || do_output_2D==-1 || do_output_3D==-1 || do_output_object==-1 || do_output_virt_point==-1 || do_log_2D==-1 || do_log_3D==-1 || do_log_object==-1 || do_log_virt_point==-1 || do_log_video==-1 || do_log_frame==-1 || do_send_object_pose==-1 || do_send_virt_point_pose==-1 || multicast_adress.empty() || input_device_src.empty() || mouse_device_id.empty() || keyboard_device_id.empty() || input_src.empty() || video_left.empty() || video_right.empty() || points_2D_left.empty() || points_2D_right.empty() || log_points_2D_left.empty() || log_points_2D_right.empty() || log_points_3D.empty() || log_object_pose.empty() || log_virt_point_pose.empty() || log_video_left.empty() || log_video_right.empty() || log_frame_left_prefix.empty() || log_frame_right_prefix.empty()) { std::cerr << "Read all run parameters from " << arg_run_parameter_config_file << " failed" << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } if (do_log_video && (input_src == "v")) { std::cerr << "Cannot read video files and record to files at the same time." << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } bool do_debugging = (do_output_debug != 0); // ------------------------------------------------------------------------------------- // Initialize Motion Capturing (segmentation/marker extraction, marker template fitting) // ------------------------------------------------------------------------------------- tiy::MarkerTracking m_track(do_debugging); if (!m_track.readConfigFiles(arg_camera_config_file, arg_object_config_file)) { std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } // ------------------------------------------------------------------------------------- // Input device // ------------------------------------------------------------------------------------- boost::scoped_ptr<tiy::MouseDevice> mouse_device; boost::scoped_ptr<tiy::KeyboardDevice> keyboard_device; #ifdef WIN32 mouse_device.reset(new tiy::WindowsMouse(do_debugging)); keyboard_device.reset(new tiy::WindowsKeyboard(do_debugging)); #else mouse_device.reset(new tiy::LinuxMouse(do_debugging)); keyboard_device.reset(new tiy::LinuxKeyboard(do_debugging)); #endif int read_intervall_ms = 1; if ((input_device_src == "m") && (!mouse_device->openAndReadMouse(mouse_device_id, read_intervall_ms))) { std::cout << "MouseDevice::openAndReadMouse() failed" << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } if (!keyboard_device->openAndReadKeyboard(keyboard_device_id, read_intervall_ms)) { std::cout << "KeyboardDevice::openAndReadKeyboard() failed" << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } // ------------------------------------------------------------------------------------- // Stereo camera // ------------------------------------------------------------------------------------- boost::scoped_ptr<tiy::StereoCamera> stereo_camera; std::string camera_id_left = m_track.left_camera_id; std::string camera_id_right = m_track.right_camera_id; if (input_src == "b") { #ifdef USE_aravis stereo_camera.reset(new tiy::BaslerGigEStereoCamera(do_debugging, camera_id_left, camera_id_right, m_track.frame_width, m_track.frame_height, m_track.camera_exposure, m_track.camera_gain, m_track.frame_rate)); #else std::cerr << "BaslerGigEStereoCamera not available, as aravis NOT found/used." << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; #endif } else if (input_src == "o") stereo_camera.reset(new tiy::OpenCVStereoCamera(do_debugging, camera_id_left, camera_id_right, m_track.frame_width, m_track.frame_height, m_track.camera_exposure, m_track.camera_gain, m_track.frame_rate)); else if (input_src == "v") stereo_camera.reset(new tiy::OpenCVStereoCamera(do_debugging, camera_id_left, camera_id_right, m_track.frame_width, m_track.frame_height, m_track.camera_exposure, m_track.camera_gain, m_track.frame_rate, video_left, video_right)); else { std::cerr << "No input source \"input_src\" specified in the configuration file \"" << arg_run_parameter_config_file << "\"" << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } if (stereo_camera->openCam()) stereo_camera->startCam(); else { std::cerr << "MarkerTracking::connectStereoCamera() failed" << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } cv::Mat image_left = stereo_camera->createImage(); cv::Mat image_right = stereo_camera->createImage(); long long int frame_timestamp; // ------------------------------------------------------------------------------------- // BOOST ASIO MULTICAST SERVER // ------------------------------------------------------------------------------------- boost::asio::io_service server_io_service; tiy::MulticastServer multicast_server(server_io_service, boost::asio::ip::address::from_string(multicast_adress), multicast_port, do_debugging); boost::system::error_code error_c; boost::thread server_io_service_thread(boost::bind(&boost::asio::io_service::run, &server_io_service, error_c)); // ------------------------------------------------------------------------------------- // Logging // ------------------------------------------------------------------------------------- std::ofstream log_2D_left, log_2D_right, log_3D, log_object, log_virt_point; if (do_log_2D) { log_2D_left.open(log_points_2D_left.c_str()); log_2D_right.open(log_points_2D_right.c_str()); } if (do_log_3D) log_3D.open(log_points_3D.c_str()); if (do_log_object) log_object.open(log_object_pose.c_str()); if (do_log_virt_point) log_virt_point.open(log_virt_point_pose.c_str()); if (do_log_video) stereo_camera->startRecording(log_video_left, log_video_right); // ------------------------------------------------------------------------------------- // MAIN LOOP // ------------------------------------------------------------------------------------- int capture_counter = 1; bool is_base_temp = false; int test_points_counter = 0; // time measurement boost::posix_time::ptime start_time, end_time; start_time = boost::posix_time::microsec_clock::universal_time(); for(int i = 0; true; i++) { // ------------------------------------------------------------------------------------- // Grab stereo frame // ------------------------------------------------------------------------------------- if(!stereo_camera->grabFrame(image_left, image_right, frame_timestamp)) { if (input_src == "v") { std::cout << "Video file finished." << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } std::cerr << "Grabbing failed" << std::endl; std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } if (do_log_video) stereo_camera->recordFrame(); // ------------------------------------------------------------------------------------- // Extract (or read from file) 2D points // ------------------------------------------------------------------------------------- cv::vector<cv::Point2f> points_2D_left, points_2D_right; #pragma omp parallel sections { #pragma omp section { if (input_src == "t") m_track.get2DPointsFromFile("testpoints_left", &points_2D_left, test_points_counter); else m_track.get2DPointsFromImage(image_left, &points_2D_left); } #pragma omp section { if (input_src == "t") m_track.get2DPointsFromFile("testpoints_right", &points_2D_right, test_points_counter); else m_track.get2DPointsFromImage(image_right, &points_2D_right); } } test_points_counter++; // ------------------------------------------------------------------------------------- // Compute 3D points from 2D points // ------------------------------------------------------------------------------------- cv::Mat points_3D = m_track.get3DPointsFrom2DPoints(points_2D_left, points_2D_right); // ------------------------------------------------------------------------------------- // Search for marker objects (templates) // ------------------------------------------------------------------------------------- std::vector<cv::Mat>RT_template_leftcam; std::vector<float>avg_dev; for(int t = 0; t < m_track.num_templates;t++) { RT_template_leftcam.push_back(cv::Mat::zeros(4,4,CV_32F)); avg_dev.push_back(0); } #pragma omp parallel for for(int r = 0; r < m_track.num_templates; r++) m_track.fit3DPointsToObjectTemplate(points_3D, r, RT_template_leftcam[r], &avg_dev[r]); // ------------------------------------------------------------------------------------- // Update mouse and keyboard status // ------------------------------------------------------------------------------------- bool was_SPACE_pressed=false, was_ESC_pressed=false; keyboard_device->getStatusSinceLastReset(was_SPACE_pressed, was_ESC_pressed); if (was_ESC_pressed) { std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; } keyboard_device->resetStatus(); bool was_left_button_pressed=false, was_left_button_released=false, is_left_button_pressed=false, was_right_button_pressed=false, was_right_button_released=false, is_right_button_pressed=false, has_mouse_wheel_changed=false; static int mouse_wheel_position=0; if (input_device_src == "m") { mouse_device->getStatusSinceLastReset(was_left_button_pressed, was_left_button_released, is_left_button_pressed, was_right_button_pressed, was_right_button_released, is_right_button_pressed, has_mouse_wheel_changed, mouse_wheel_position); mouse_device->resetStatus(); } // ------------------------------------------------------------------------------------- // OUTPUT (Send/Display/Log) the selected data // ------------------------------------------------------------------------------------- if (!do_interactive_mode || ((input_device_src == "m") && was_left_button_pressed) || ((input_device_src == "k") && was_SPACE_pressed)) { // ------------------------------------------------------------------------------------- // Send (publish the object/virtual point pose over multicast) // ------------------------------------------------------------------------------------- if(do_send_object_pose) { std::string send_string; for(int r = 0; r < m_track.num_templates; r++) { cv::Mat rodrigues_orientation = cv::Mat::zeros(3, 1, CV_32F); if (countNonZero(RT_template_leftcam[r])) Rodrigues(RT_template_leftcam[r](cv::Range(0,3),cv::Range(0,3)), rodrigues_orientation); int last_col = RT_template_leftcam[r].size.p[0] - 1; std::stringstream frame_timestamp_ss; // as boost::format not compatible with long long int frame_timestamp_ss << frame_timestamp; std::string send_buffer = (boost::format("%s\t%d\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t") % frame_timestamp_ss.str() % r % RT_template_leftcam[r].at<float>(0,last_col) % RT_template_leftcam[r].at<float>(1,last_col) % RT_template_leftcam[r].at<float>(2,last_col) % rodrigues_orientation.at<float>(0,0) % rodrigues_orientation.at<float>(1,0) % rodrigues_orientation.at<float>(2,0) ).str(); send_string += send_buffer; } multicast_server.sendString(send_string); if(do_debugging) std::cout << "-------------" << std::endl << "SENDING :" << send_string << std::endl << "----------------" << std::endl; } if(do_send_virt_point_pose) { std::string send_string; for(int r = 0; r < m_track.num_templates; r++) { cv::Mat RT_virt_point_to_leftcam = cv::Mat::zeros(4, 4, CV_32F); cv::Mat rodrigues_orientation = cv::Mat::zeros(3, 1, CV_32F); if (countNonZero(RT_template_leftcam[r]) && countNonZero(m_track.RT_virt_point_to_template[r] - cv::Mat::eye(4, 4, CV_32F))) { RT_virt_point_to_leftcam = RT_template_leftcam[r] * m_track.RT_virt_point_to_template[r]; Rodrigues(RT_virt_point_to_leftcam(cv::Range(0,3),cv::Range(0,3)), rodrigues_orientation); } int last_col = RT_virt_point_to_leftcam.size.p[0] - 1; std::stringstream frame_timestamp_ss; // as boost::format not compatible with long long int frame_timestamp_ss << frame_timestamp; std::string send_buffer = (boost::format("%s\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t") % frame_timestamp_ss.str() % RT_virt_point_to_leftcam.at<float>(0,last_col) % RT_virt_point_to_leftcam.at<float>(1,last_col) % RT_virt_point_to_leftcam.at<float>(2,last_col) % rodrigues_orientation.at<float>(0,0) % rodrigues_orientation.at<float>(1,0) % rodrigues_orientation.at<float>(2,0) ).str(); send_string += send_buffer; } multicast_server.sendString(send_string); if(do_debugging) std::cout << "-------------" << std::endl << "SENDING :" << send_string << std::endl << "----------------" << std::endl; } // ------------------------------------------------------------------------------------- // Display // ------------------------------------------------------------------------------------- if (do_debugging) { if (was_left_button_pressed) std::cout << "LEFT" << std::endl; if (was_left_button_released) std::cout << "LEFT RELEASED" << std::endl; if (was_right_button_pressed) std::cout << "RIGHT" << std::endl; if (was_right_button_released) std::cout << "RIGHT RELEASED" << std::endl; if (has_mouse_wheel_changed) std::cout << "WHEEL: " << mouse_wheel_position << std::endl; if (is_left_button_pressed) std::cout << "LEFT STILL" << std::endl; if (is_right_button_pressed) std::cout << "RIGHT STILL" << std::endl; if (was_SPACE_pressed) std::cout << "SPACE" << std::endl; if (was_ESC_pressed) std::cout << "ESC" << std::endl; } if (do_output_2D) { std::cout << frame_timestamp; for(unsigned int p = 0; p < points_2D_left.size(); p++) std::cout << "\t" << points_2D_left[p].x << "\t" << points_2D_left[p].y; std::cout << std::endl; std::cout << frame_timestamp; for(unsigned int p = 0; p < points_2D_right.size(); p++) std::cout << "\t" << points_2D_right[p].x << "\t" << points_2D_right[p].y; std::cout << std::endl; } if (do_output_3D) { std::cout << frame_timestamp; for(int p = 0; p < points_3D.cols; p++) std::cout << "\t" << points_3D.at<float>(0,p) << "\t" << points_3D.at<float>(1,p) << "\t" << points_3D.at<float>(2,p); std::cout << std::endl; } if (do_output_object) { std::cout << frame_timestamp; for(int r = 0; r < m_track.num_templates; r++) { cv::Mat rodrigues_orientation = cv::Mat::zeros(3, 1, CV_32F); if (countNonZero(RT_template_leftcam[r])) Rodrigues(RT_template_leftcam[r](cv::Range(0,3),cv::Range(0,3)), rodrigues_orientation); int last_col = RT_template_leftcam[r].size.p[0] - 1; std::cout << "\t" << RT_template_leftcam[r].at<float>(0,last_col) << "\t" << RT_template_leftcam[r].at<float>(1,last_col) << "\t" << RT_template_leftcam[r].at<float>(2,last_col) << "\t" << rodrigues_orientation.at<float>(0,0) << "\t" << rodrigues_orientation.at<float>(1,0) << "\t" << rodrigues_orientation.at<float>(2,0); //std::cout << std::endl << "avg_dev = " << avg_dev[r]; } std::cout << std::endl; } if (do_output_virt_point) { std::cout << frame_timestamp; for(int r = 0; r < m_track.num_templates; r++) { cv::Mat RT_virt_point_to_leftcam = cv::Mat::zeros(4, 4, CV_32F); cv::Mat rodrigues_orientation = cv::Mat::zeros(3, 1, CV_32F); if (countNonZero(RT_template_leftcam[r]) && countNonZero(m_track.RT_virt_point_to_template[r] - cv::Mat::eye(4, 4, CV_32F))) { RT_virt_point_to_leftcam = RT_template_leftcam[r] * m_track.RT_virt_point_to_template[r]; Rodrigues(RT_virt_point_to_leftcam(cv::Range(0,3),cv::Range(0,3)), rodrigues_orientation); } int last_col = RT_virt_point_to_leftcam.size.p[0] - 1; std::cout << "\t" << RT_virt_point_to_leftcam.at<float>(0,last_col) << "\t" << RT_virt_point_to_leftcam.at<float>(1,last_col) << "\t" << RT_virt_point_to_leftcam.at<float>(2,last_col) << "\t" << rodrigues_orientation.at<float>(0,0) << "\t" << rodrigues_orientation.at<float>(1,0) << "\t" << rodrigues_orientation.at<float>(2,0); } std::cout << std::endl; } // ------------------------------------------------------------------------------------- // Log // ------------------------------------------------------------------------------------- if (do_log_2D) { log_2D_left << frame_timestamp; for(unsigned int p = 0; p < points_2D_left.size(); p++) log_2D_left << "\t" << points_2D_left[p].x << "\t" << points_2D_left[p].y; log_2D_left << std::endl; log_2D_right << frame_timestamp; for(unsigned int p = 0; p < points_2D_right.size(); p++) log_2D_right << "\t" << points_2D_right[p].x << "\t" << points_2D_right[p].y; log_2D_right << std::endl; } if (do_log_3D) { log_3D << frame_timestamp; for(int p = 0; p < points_3D.cols; p++) log_3D << "\t" << points_3D.at<float>(0,p) << "\t" << points_3D.at<float>(1,p) << "\t" << points_3D.at<float>(2,p); log_3D << std::endl; } if (do_log_object) { log_object << frame_timestamp; for(int r = 0; r < m_track.num_templates; r++) { cv::Mat rodrigues_orientation = cv::Mat::zeros(3, 1, CV_32F); if (countNonZero(RT_template_leftcam[r])) Rodrigues(RT_template_leftcam[r](cv::Range(0,3),cv::Range(0,3)), rodrigues_orientation); int last_col = RT_template_leftcam[r].size.p[0] - 1; log_object << "\t" << RT_template_leftcam[r].at<float>(0,last_col) << "\t" << RT_template_leftcam[r].at<float>(1,last_col) << "\t" << RT_template_leftcam[r].at<float>(2,last_col) << "\t" << rodrigues_orientation.at<float>(0,0) << "\t" << rodrigues_orientation.at<float>(1,0) << "\t" << rodrigues_orientation.at<float>(2,0); //log_object << std::endl << "avg_dev = " << avg_dev[r]; } log_object << std::endl; } if (do_log_virt_point) { log_virt_point << frame_timestamp; for(int r = 0; r < m_track.num_templates; r++) { cv::Mat RT_virt_point_to_leftcam = cv::Mat::zeros(4, 4, CV_32F); cv::Mat rodrigues_orientation = cv::Mat::zeros(3, 1, CV_32F); if (countNonZero(RT_template_leftcam[r]) && countNonZero(m_track.RT_virt_point_to_template[r] - cv::Mat::eye(4, 4, CV_32F))) { RT_virt_point_to_leftcam = RT_template_leftcam[r] * m_track.RT_virt_point_to_template[r]; Rodrigues(RT_virt_point_to_leftcam(cv::Range(0,3),cv::Range(0,3)), rodrigues_orientation); } int last_col = RT_virt_point_to_leftcam.size.p[0] - 1; log_virt_point << "\t" << RT_virt_point_to_leftcam.at<float>(0,last_col) << "\t" << RT_virt_point_to_leftcam.at<float>(1,last_col) << "\t" << RT_virt_point_to_leftcam.at<float>(2,last_col) << "\t" << rodrigues_orientation.at<float>(0,0) << "\t" << rodrigues_orientation.at<float>(1,0) << "\t" << rodrigues_orientation.at<float>(2,0); } log_virt_point << std::endl; } if (do_log_video) stereo_camera->recordFrame(); } // ------------------------------------------------------------------------------------- // Capture stereo frame // ------------------------------------------------------------------------------------- if (do_log_frame && (((input_device_src == "m") && was_left_button_pressed) || ((input_device_src == "k") && was_SPACE_pressed))) { std::string save_file; save_file = (boost::format("%s%03i.jpg") % log_frame_left_prefix % capture_counter).str(); cv::imwrite(save_file, image_left); save_file = (boost::format("%s%03i.jpg") % log_frame_right_prefix % capture_counter).str(); cv::imwrite(save_file, image_right); if (do_debugging) std::cout << frame_timestamp << "Frame captured." << std::endl; capture_counter++; } // ------------------------------------------------------------------------------------- // Visualize stereo frame with detected points // ------------------------------------------------------------------------------------- if(do_show_graphics && !(input_src == "t")) { // needed, as changing image content (costs 0.5-1.5 [ms]) cv::Mat image_left_cpy, image_right_cpy; image_left.copyTo(image_left_cpy); image_right.copyTo(image_right_cpy); for(unsigned int p=0; p < points_2D_left.size(); p++) cv::circle(image_left_cpy, points_2D_left[p], 2, cv::Scalar(0), 1, CV_AA, 0); for(unsigned int p=0; p < points_2D_right.size(); p++) cv::circle(image_right_cpy, points_2D_right[p], 2, cv::Scalar(0), 1, CV_AA, 0); cv::Mat object_rotation(3, 1, CV_32F); cv::Mat object_translation(3, 1, CV_32F); cv::vector<cv::Point2f> object_2D; for(int r = 0; r < m_track.num_templates; r++) { if (avg_dev[r] < std::numeric_limits<float>::infinity()) { Rodrigues(RT_template_leftcam[r](cv::Range(0,3),cv::Range(0,3)), object_rotation); object_translation = RT_template_leftcam[r](cv::Range(0,3),cv::Range(3,4)).t(); cv::vector<cv::Point3f> object_points; object_points.push_back(cv::Point3f(RT_template_leftcam[r].at<float>(0,3), RT_template_leftcam[r].at<float>(1,3), RT_template_leftcam[r].at<float>(2,3))); projectPoints(cv::Mat(object_points), cv::Mat::zeros(3,1,CV_32F), cv::Mat::zeros(3,1,CV_32F), m_track.KK_left, m_track.kc_left, object_2D); cv::circle(image_left_cpy, object_2D[0], 4, cv::Scalar(255,255,255), 1, CV_AA, 0); cv::circle(image_left_cpy, object_2D[0], 3, cv::Scalar(0,0,150), 1, CV_AA, 0); projectPoints(cv::Mat(object_points), m_track.om_leftcam_to_rightcam, m_track.T_leftcam_to_rightcam, m_track.KK_right, m_track.kc_right, object_2D); cv::circle(image_right_cpy, object_2D[0], 4, cv::Scalar(255,255,255), 1, CV_AA, 0); cv::circle(image_right_cpy, object_2D[0], 3, cv::Scalar(0,0,150), 1, CV_AA, 0); } } imshow("Image Left", image_left_cpy); imshow("Image Right", image_right_cpy); cv::waitKey(1); } // ------------------------------------------------------------------------------------- // END MEASURE of the computation time (of one cycle) // ------------------------------------------------------------------------------------- if (do_debugging) { end_time = boost::posix_time::microsec_clock::universal_time(); boost::posix_time::time_duration time_diff = end_time - start_time; std::cout << "comp_time = " << time_diff.total_microseconds() << " [us]" << std::endl; start_time = boost::posix_time::microsec_clock::universal_time(); } } //end MAIN LOOP if (log_2D_left.is_open()) log_2D_left.close(); if (log_2D_right.is_open()) log_2D_right.close(); if (log_3D.is_open()) log_3D.close(); if (log_object.is_open()) log_object.close(); stereo_camera->closeCam(); std::cerr << "PRESS A KEY TO EXIT"; cv::destroyAllWindows(); cv::waitKey(1); std::cin.get(); return 0; }
void DrawAxis::openWebcam() { webcam.open(0); webcam.set(CV_CAP_PROP_FRAME_WIDTH, frameWidth); webcam.set(CV_CAP_PROP_FRAME_HEIGHT, frameHeight); rvec = Mat(Size(3, 1), CV_64F); tvec = Mat(Size(3, 1), CV_64F); cout << "intrinsinc = " << intrinsic_params << endl; cout << "dist = " << distortion_params << endl; cout << "intrinsic.size = " << intrinsic_params.size() << endl; while (true){ webcam.read(webcamImage); bool findCorners = findChessboardCorners(webcamImage, boardSize, corners, CALIB_CB_FAST_CHECK); if (findCorners){ solvePnP(Mat(boardPoints), Mat(corners), intrinsic_params, distortion_params, rvec, tvec, false); projectPoints(cubePoints, rvec, tvec, intrinsic_params, distortion_params, cubeFramePoints); projectPoints(framePoints, rvec, tvec, intrinsic_params, distortion_params, imageFramePoints); drawAxis(webcamImage, color, 3); drawCube(webcamImage, cubeFramePoints, Scalar(255, 0, 255), 2); } namedWindow("OpenCV Webcam", 0); imshow("OpenCV Webcam", webcamImage); waitKey(10); } }
ProjectPoints::ProjectPoints(cv::Mat image,std::vector<cv::Point2f>originalpoints,std::vector<cv::Point3f> points3d, cv::Mat rotation, cv::Mat translation, cv::Mat intrinsic, cv::Mat distor_coeff) { inputimage = image.clone(); originpoints = originalpoints; projectPoints(points3d, rotation, translation, intrinsic, distor_coeff, projectpoints); }
void ofxRGBDRenderer::generateTextureCoordinates(){ if(!calibrationSetup){ ofLogError("ofxRGBDRenderer::generateTextureCoordinates -- no calibration set up"); return; } if(!simpleMesh.hasTexCoords()){ for (int y = 0; y < 640*480; y++){ simpleMesh.addTexCoord(ofVec2f(0,0)); } } Mat pcMat = Mat(toCv(simpleMesh)); imagePoints.clear(); projectPoints(pcMat, rotationDepthToRGB, translationDepthToRGB, rgbCalibration.getDistortedIntrinsics().getCameraMatrix(), rgbCalibration.getDistCoeffs(), imagePoints); for(int i = 0; i < imagePoints.size(); i++) { //TODO account for fudge factor that the shader listens to simpleMesh.setTexCoord(i, ofVec2f(imagePoints[i].x, imagePoints[i].y)); } }
void CvDrawingUtils::draw3dAxis(cv::Mat &Image,Board &B,const CameraParameters &CP) { Mat objectPoints (4,3,CV_32FC1); objectPoints.at<float>(0,0)=0; objectPoints.at<float>(0,1)=0; objectPoints.at<float>(0,2)=0; objectPoints.at<float>(1,0)=2*B[0].ssize; objectPoints.at<float>(1,1)=0; objectPoints.at<float>(1,2)=0; objectPoints.at<float>(2,0)=0; objectPoints.at<float>(2,1)=2*B[0].ssize; objectPoints.at<float>(2,2)=0; objectPoints.at<float>(3,0)=0; objectPoints.at<float>(3,1)=0; objectPoints.at<float>(3,2)=2*B[0].ssize; vector<Point2f> imagePoints; projectPoints( objectPoints, B.Rvec,B.Tvec, CP.CameraMatrix, CP.Distorsion, imagePoints); //draw lines of different colours cv::line(Image,imagePoints[0],imagePoints[1],Scalar(0,0,255,255),2,CV_AA); cv::line(Image,imagePoints[0],imagePoints[2],Scalar(0,255,0,255),2,CV_AA); cv::line(Image,imagePoints[0],imagePoints[3],Scalar(255,0,0,255),2,CV_AA); putText(Image,"X", imagePoints[1],FONT_HERSHEY_SIMPLEX, 1, Scalar(0,0,255,255),2); putText(Image,"Y", imagePoints[2],FONT_HERSHEY_SIMPLEX, 1, Scalar(0,255,0,255),2); putText(Image,"Z", imagePoints[3],FONT_HERSHEY_SIMPLEX, 1, Scalar(255,0,0,255),2); }
static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints, const vector<vector<Point2f> >& imagePoints, const vector<Mat>& rvecs, const vector<Mat>& tvecs, const Mat& cameraMatrix, const Mat& distCoeffs, vector<float>& perViewErrors ) { vector<Point2f> imagePoints2; int i, totalPoints = 0; double totalErr = 0, err; perViewErrors.resize(objectPoints.size()); for( i = 0; i < (int)objectPoints.size(); i++ ) { projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2); err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2); int n = (int)objectPoints[i].size(); perViewErrors[i] = (float)std::sqrt(err*err/n); totalErr += err*err; totalPoints += n; } return std::sqrt(totalErr/totalPoints); }
void testApp::updateColors() { imagePoints.clear(); // rotate, translate the points to fit the colorCalibration perspective // and project them onto the colorCalibration image space // and undistort them projectPoints(Mat(pointCloud), rotationKinectToColor, translationKinectToColor, colorCalibration.getDistortedIntrinsics().getCameraMatrix(), colorCalibration.getDistCoeffs(), imagePoints); // get the color at each of the projectedPoints inside curColor // add them into pointCloudColors pointCloudColors.clear(); int w = curColor.getWidth(); int h = curColor.getHeight(); int n = w * h; unsigned char* pixels = curColor.getPixels(); for(int i = 0; i < imagePoints.size(); i++) { int j = (int) imagePoints[i].y * w + (int) imagePoints[i].x; if(j < 0 || j >= n) { pointCloudColors.push_back(Point3f(1, 1, 1)); } else { j *= 3; pointCloudColors.push_back(Point3f(pixels[j + 0] / 255.f, pixels[j + 1] / 255.f, pixels[j + 2] / 255.f)); } } }
void CvDrawingUtils::draw3dCube(cv::Mat &Image,Board &B,const CameraParameters &CP) { float cubeSize=B[0].ssize; float txz=-cubeSize/2; Mat objectPoints (8,3,CV_32FC1); objectPoints.at<float>(0,0)=txz;objectPoints.at<float>(0,1)=0;objectPoints.at<float>(0,2)=txz; objectPoints.at<float>(1,0)=txz+cubeSize;objectPoints.at<float>(1,1)=0;objectPoints.at<float>(1,2)=txz; objectPoints.at<float>(2,0)=txz+cubeSize;objectPoints.at<float>(2,1)=cubeSize;objectPoints.at<float>(2,2)=txz; objectPoints.at<float>(3,0)=txz;objectPoints.at<float>(3,1)=cubeSize;objectPoints.at<float>(3,2)=txz; objectPoints.at<float>(4,0)=txz;objectPoints.at<float>(4,1)=0;objectPoints.at<float>(4,2)=txz+cubeSize; objectPoints.at<float>(5,0)=txz+cubeSize;objectPoints.at<float>(5,1)=0;objectPoints.at<float>(5,2)=txz+cubeSize; objectPoints.at<float>(6,0)=txz+cubeSize;objectPoints.at<float>(6,1)=cubeSize;objectPoints.at<float>(6,2)=txz+cubeSize; objectPoints.at<float>(7,0)=txz;objectPoints.at<float>(7,1)=cubeSize;objectPoints.at<float>(7,2)=txz+cubeSize; vector<Point2f> imagePoints; projectPoints( objectPoints,B.Rvec,B.Tvec, CP.CameraMatrix, CP.Distorsion, imagePoints); //draw lines of different colours for(int i=0;i<4;i++) cv::line(Image,imagePoints[i],imagePoints[(i+1)%4],Scalar(0,0,255,255),1,CV_AA); for(int i=0;i<4;i++) cv::line(Image,imagePoints[i+4],imagePoints[4+(i+1)%4],Scalar(0,0,255,255),1,CV_AA); for(int i=0;i<4;i++) cv::line(Image,imagePoints[i],imagePoints[i+4],Scalar(0,0,255,255),1,CV_AA); }
double computeReprojectionErrors(InputArray points3D, InputArray points2D, InputArray cameraMatrix, InputArray distCoeffs, InputArray rvec, InputArray tvec, OutputArray _proj_points2D, vector<double> *individual_error) { // set proper type for the output Mat x = points2D.getMat(); Mat proj_points2D = _proj_points2D.getMat(); proj_points2D.create(x.rows, x.cols, x.type()); // project points projectPoints(points3D, rvec, tvec, cameraMatrix, distCoeffs, proj_points2D); // save output if it is needed (no default parameter) if (_proj_points2D.needed()) { proj_points2D.copyTo(_proj_points2D); } // return error return calib::norm(x, proj_points2D, individual_error); }
Point2f project3d2d(Point2f pt, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) { vector<Point3f> p3d; vector<Point2f> p2d; p3d.push_back(Point3f(pt.x, pt.y, BASE_HEIGHT)); projectPoints(p3d, rvec, tvec, cameraMatrix, distCoeffs, p2d); return p2d[0]; }
void ConvexHull::run() { // Perform the monotone chain convex hull algorithm. // Information on this can be found here: http://www.algorithmist.com/index.php/Monotone_Chain_Convex_Hull cout << "ConvexHull::run(). Starting." << endl; boost::posix_time::ptime start = boost::posix_time::microsec_clock::local_time(); // Clear all the previously stored convex // hull points, if there are any. hull_indices.indices.erase(hull_indices.indices.begin(), hull_indices.indices.end()); vector<PointNXYZIJ> hull_points; projectPoints(); // Now for some convex hull specific things. int n = (int)projected_cloud.size(); int k = 0; hull_points.resize(2*n); // Sort the points lexicographically. sort(projected_cloud.begin(), projected_cloud.end()); // Build the lower hull. for (int i = 0; i < n; i++) { while (k >= 2 && cross(hull_points[k-2], hull_points[k-1], projected_cloud[i]) <= 0) { k--; } hull_points[k++] = projected_cloud[i]; } // Build the upper hull. for (int i = n-2, t = k+1; i >= 0; i--) { while (k >= t && cross(hull_points[k-2], hull_points[k-1], projected_cloud[i]) <= 0) { k--; } hull_points[k++] = projected_cloud[i]; } hull_points.resize(k); //sort(hull_points.begin(), hull_points.end()); // Now we have all the hull points! So let's // fill the indices with the relevant positions. for (size_t i = 0; i < hull_points.size(); i++) { hull_indices.indices.emplace_back(hull_points[i].index); } boost::posix_time::ptime stop = boost::posix_time::microsec_clock::local_time(); boost::posix_time::time_duration diff = stop - start; cout << "ConvexHull::run(). Ended with " << hull_indices.indices.size() << " hull points." << " Taken " << diff.total_microseconds() << " us." << endl; }
bool CameraProjectorCalibration::setDynamicProjectorImagePoints(cv::Mat img){ vector<cv::Point2f> chessImgPts; bool bPrintedPatternFound = calibrationCamera.findBoard(img, chessImgPts, true); if(bPrintedPatternFound) { cv::Mat boardRot; cv::Mat boardTrans; calibrationCamera.computeCandidateBoardPose(chessImgPts, boardRot, boardTrans); const auto & camCandObjPts = calibrationCamera.getCandidateObjectPoints(); Point3f axisX = camCandObjPts[1] - camCandObjPts[0]; Point3f axisY = camCandObjPts[calibrationCamera.getPatternSize().width] - camCandObjPts[0]; Point3f pos = camCandObjPts[0] - axisY * (calibrationCamera.getPatternSize().width-2); vector<Point3f> auxObjectPoints; for(int i = 0; i < calibrationProjector.getPatternSize().height; i++) { for(int j = 0; j < calibrationProjector.getPatternSize().width; j++) { auxObjectPoints.push_back(pos + axisX * float((2 * j) + (i % 2)) + axisY * i); } } Mat Rc1, Tc1, Rc1inv, Tc1inv, Rc2, Tc2, Rp1, Tp1, Rp2, Tp2; Rp1 = calibrationProjector.getBoardRotations().back(); Tp1 = calibrationProjector.getBoardTranslations().back(); Rc1 = calibrationCamera.getBoardRotations().back(); Tc1 = calibrationCamera.getBoardTranslations().back(); Rc2 = boardRot; Tc2 = boardTrans; Mat auxRinv = Mat::eye(3,3,CV_32F); Rodrigues(Rc1,auxRinv); auxRinv = auxRinv.inv(); Rodrigues(auxRinv, Rc1inv); Tc1inv = -auxRinv*Tc1; Mat Raux, Taux; composeRT(Rc2, Tc2, Rc1inv, Tc1inv, Raux, Taux); composeRT(Raux, Taux, Rp1, Tp1, Rp2, Tp2); vector<Point2f> followingPatternImagePoints; projectPoints(Mat(auxObjectPoints), Rp2, Tp2, calibrationProjector.getDistortedIntrinsics().getCameraMatrix(), calibrationProjector.getDistCoeffs(), followingPatternImagePoints); calibrationProjector.setCandidateImagePoints(followingPatternImagePoints); } return bPrintedPatternFound; }
std::vector<cv::Point2f> Pattern::getPositions(Mat& frame, const Mat& camMatrix, const Mat& distMatrix){ Mat modelPts = (Mat_<float>(4,3) << 0, 0, 0, size, 0, 0, size, size, 0, 0, size, 0 ); std::vector<cv::Point2f> model2ImagePts; /* project model 3D points to the image. Points through the transformation matrix (defined by rotVec and transVec) "are transfered" from the pattern CS to the camera CS, and then, points are projected using camera parameters (camera matrix, distortion matrix) from the camera 3D CS to its image plane */ projectPoints(modelPts, rotVec, transVec, camMatrix, distMatrix, model2ImagePts); return model2ImagePts; }
void Pattern::draw(Mat& frame, const Mat& camMatrix, const Mat& distMatrix) { CvScalar color = cvScalar(255,255,255); switch (id){ case 1: color = cvScalar(255,0,255); break; case 2: color = cvScalar(255,255,0); break; case 3: color = cvScalar(0,255,255); break; } //model 3D points: they must be projected to the image plane Mat modelPts = (Mat_<float>(8,3) << 0, 0, 0, size, 0, 0, size, size, 0, 0, size, 0, 0, 0, -size, size, 0, -size, size, size, -size, 0, size, -size ); std::vector<cv::Point2f> model2ImagePts; /* project model 3D points to the image. Points through the transformation matrix (defined by rotVec and transVec) "are transfered" from the pattern CS to the camera CS, and then, points are projected using camera parameters (camera matrix, distortion matrix) from the camera 3D CS to its image plane */ projectPoints(modelPts, rotVec, transVec, camMatrix, distMatrix, model2ImagePts); //draw cube, or whatever int i; for (i =0; i<4; i++){ cv::line(frame, model2ImagePts.at(i%4), model2ImagePts.at((i+1)%4), color, 3); } for (i =4; i<7; i++){ cv::line(frame, model2ImagePts.at(i%8), model2ImagePts.at((i+1)%8), color, 3); } cv::line(frame, model2ImagePts.at(7), model2ImagePts.at(4), color, 3); for (i =0; i<4; i++){ cv::line(frame, model2ImagePts.at(i), model2ImagePts.at(i+4), color, 3); } //draw the line that reflects the orientation. It indicates the bottom side of the pattern cv::line(frame, model2ImagePts.at(2), model2ImagePts.at(3), cvScalar(80,255,80), 3); model2ImagePts.clear(); }
void projectPoints(const image_geometry::PinholeCameraModel &cam_model, const vector<cv::Point3d> &xyz, vector<cv::Point2d> *points2D) { size_t n = xyz.size(); points2D->clear(); points2D->reserve(n); for (size_t i = 0; i < n; i++) { cv::Point2d current_pt; projectPoints(cam_model, xyz[i], ¤t_pt); points2D->push_back(current_pt); } }
vector<Point2f> CameraProjectorCalibration::getProjected(const vector<Point3f> & pts, const cv::Mat & rotObjToCam, const cv::Mat & transObjToCam){ cv::Mat rotObjToProj, transObjToProj; cv::composeRT(rotObjToCam, transObjToCam, rotCamToProj, transCamToProj, rotObjToProj, transObjToProj); vector<Point2f> out; projectPoints(Mat(pts), rotObjToProj, transObjToProj, calibrationProjector.getDistortedIntrinsics().getCameraMatrix(), calibrationProjector.getDistCoeffs(), out); return out; }
double Camera::reprojectionError(const std::vector< std::vector<cv::Point3f> >& objectPoints, const std::vector< std::vector<cv::Point2f> >& imagePoints, const std::vector<cv::Mat>& rvecs, const std::vector<cv::Mat>& tvecs, cv::OutputArray _perViewErrors) const { int imageCount = objectPoints.size(); size_t pointsSoFar = 0; double totalErr = 0.0; bool computePerViewErrors = _perViewErrors.needed(); cv::Mat perViewErrors; if (computePerViewErrors) { _perViewErrors.create(imageCount, 1, CV_64F); perViewErrors = _perViewErrors.getMat(); } for (int i = 0; i < imageCount; ++i) { size_t pointCount = imagePoints.at(i).size(); pointsSoFar += pointCount; std::vector<cv::Point2f> estImagePoints; projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i), estImagePoints); double err = 0.0; for (size_t j = 0; j < imagePoints.at(i).size(); ++j) { err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j)); } if (computePerViewErrors) { perViewErrors.at<double>(i) = err / pointCount; } totalErr += err; } return totalErr / pointsSoFar; }
void CvDrawingUtils::draw3dCube(cv::Mat &Image,Marker &m,const CameraParameters &CP) { Mat objectPoints (8,3,CV_32FC1); double halfSize=m.ssize/2; objectPoints.at<float>(0,0)=-halfSize; objectPoints.at<float>(0,1)=0; objectPoints.at<float>(0,2)=-halfSize; objectPoints.at<float>(1,0)=halfSize; objectPoints.at<float>(1,1)=0; objectPoints.at<float>(1,2)=-halfSize; objectPoints.at<float>(2,0)=halfSize; objectPoints.at<float>(2,1)=0; objectPoints.at<float>(2,2)=halfSize; objectPoints.at<float>(3,0)=-halfSize; objectPoints.at<float>(3,1)=0; objectPoints.at<float>(3,2)=halfSize; objectPoints.at<float>(4,0)=-halfSize; objectPoints.at<float>(4,1)=m.ssize; objectPoints.at<float>(4,2)=-halfSize; objectPoints.at<float>(5,0)=halfSize; objectPoints.at<float>(5,1)=m.ssize; objectPoints.at<float>(5,2)=-halfSize; objectPoints.at<float>(6,0)=halfSize; objectPoints.at<float>(6,1)=m.ssize; objectPoints.at<float>(6,2)=halfSize; objectPoints.at<float>(7,0)=-halfSize; objectPoints.at<float>(7,1)=m.ssize; objectPoints.at<float>(7,2)=halfSize; vector<Point2f> imagePoints; projectPoints( objectPoints, m.Rvec,m.Tvec, CP.CameraMatrix,CP.Distorsion, imagePoints); //draw lines of different colours for (int i=0;i<4;i++) cv::line(Image,imagePoints[i],imagePoints[(i+1)%4],Scalar(0,0,255,255),1,CV_AA); for (int i=0;i<4;i++) cv::line(Image,imagePoints[i+4],imagePoints[4+(i+1)%4],Scalar(0,0,255,255),1,CV_AA); for (int i=0;i<4;i++) cv::line(Image,imagePoints[i],imagePoints[i+4],Scalar(0,0,255,255),1,CV_AA); }
// Function that projects the RGB orientation vectors back onto the image void Visualization::projectOrientationVectorsOnImage(cv::Mat &image, const std::vector<cv::Point3f> points_to_project, const cv::Mat camera_matrix_K, const std::vector<double> camera_distortion_coeffs) { std::vector<cv::Point2f> projected_points; // 0 rotation cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64F); // 0 translation cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64F); projectPoints(points_to_project, rvec, tvec, camera_matrix_K, camera_distortion_coeffs, projected_points); cv::line(image, projected_points[0], projected_points[1], CV_RGB(255, 0, 0), 2); cv::line(image, projected_points[0], projected_points[2], CV_RGB(0, 255, 0), 2); cv::line(image, projected_points[0], projected_points[3], CV_RGB(0, 0, 255), 2); }
void Calibration::updateReprojectionError() { vector<Point2f> imagePoints2; int totalPoints = 0; double totalErr = 0; perViewErrors.clear(); perViewErrors.resize(objectPoints.size()); for(int i = 0; i < (int)objectPoints.size(); i++) { projectPoints(Mat(objectPoints[i]), boardRotations[i], boardTranslations[i], distortedIntrinsics.getCameraMatrix(), distCoeffs, imagePoints2); double err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2); int n = objectPoints[i].size(); perViewErrors[i] = sqrt(err * err / n); totalErr += err * err; totalPoints += n; ofLog(OF_LOG_VERBOSE, "view " + ofToString(i) + " has error of " + ofToString(perViewErrors[i])); } reprojectionError = sqrt(totalErr / totalPoints); ofLog(OF_LOG_VERBOSE, "all views have error of " + ofToString(reprojectionError)); }
void CustomPattern::drawOrientation(InputOutputArray image, InputArray tvec, InputArray rvec, InputArray cameraMatrix, InputArray distCoeffs, double axis_length, double axis_width) { Point3f ptrCtr3d = Point3f((img_roi.cols * pxSize)/2, (img_roi.rows * pxSize)/2, 0); vector<Point3f> axis(4); axis[0] = ptrCtr3d; axis[1] = Point3f(axis_length * pxSize, 0, 0) + ptrCtr3d; axis[2] = Point3f(0, axis_length * pxSize, 0) + ptrCtr3d; axis[3] = Point3f(0, 0, -axis_length * pxSize) + ptrCtr3d; vector<Point2f> proj_axis; projectPoints(axis, rvec, tvec, cameraMatrix, distCoeffs, proj_axis); Mat img = image.getMat(); line(img, proj_axis[0], proj_axis[1], CV_RGB(255, 0, 0), axis_width); line(img, proj_axis[0], proj_axis[2], CV_RGB(0, 255, 0), axis_width); line(img, proj_axis[0], proj_axis[3], CV_RGB(0, 0, 255), axis_width); img.copyTo(image); }
RotatedRect createArenaMask(float x_rad, float y_rad, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) { Point2f center(0, 0); Point3f ellipsePath; vector<Point3f> c3d; vector<Point2f> c2d; RotatedRect ellipseMask; for (double angle = 0; angle <= 2 * CV_PI; angle += 0.001) //You are using radians so you will have to increase by a very small amount { if ( (angle >= 0 && angle < 90) || (angle > 270 && angle <= 360) ) { ellipsePath.x = (x_rad*y_rad) / (sqrt((y_rad*y_rad) + x_rad*x_rad*tan(angle)*tan(angle))); ellipsePath.y = (x_rad*y_rad*tan(angle)) / (sqrt((y_rad*y_rad) + x_rad*x_rad*tan(angle)*tan(angle))); ellipsePath.z = BASE_HEIGHT; c3d.push_back(ellipsePath); } if (angle > 90 && angle < 270) { ellipsePath.x = -(x_rad*y_rad) / (sqrt((y_rad*y_rad) + x_rad*x_rad*tan(angle)*tan(angle))); ellipsePath.y = -(x_rad*y_rad*tan(angle)) / (sqrt((y_rad*y_rad) + x_rad*x_rad*tan(angle)*tan(angle))); ellipsePath.z = BASE_HEIGHT; c3d.push_back(ellipsePath); } } projectPoints(c3d, rvec, tvec, cameraMatrix, distCoeffs, c2d); ellipseMask = fitEllipse(c2d); return ellipseMask; }
void OrganizedPlaneSegmentor::operator()( const PointCloudTypePtr &input, std::vector<PlaneType> &planes ) { OrganizedPlaneSegmentResult segment_result; segment( input, segment_result ); // cout << BOLDWHITE << "OMPS planes = " << BOLDCYAN << segment_result.regions.size() << RESET << endl; // convert format for( int i = 0; i < segment_result.regions.size(); i++) { pcl::ModelCoefficients &coef = segment_result.model_coeffs[i]; pcl::PointIndices &indices = segment_result.inlier_indices[i]; pcl::PlanarRegion<PointType> &pr = segment_result.regions[i]; pcl::PointIndices &boundary = segment_result.boundary_indices[i]; // PlaneType plane; Eigen::Vector3f centroid = pr.getCentroid(); plane.centroid.x = centroid[0]; plane.centroid.y = centroid[1]; plane.centroid.z = centroid[2]; plane.coefficients[0] = coef.values[0]; plane.coefficients[1] = coef.values[1]; plane.coefficients[2] = coef.values[2]; plane.coefficients[3] = coef.values[3]; plane.sigmas[0] = 0.008; plane.sigmas[1] = 0.008; plane.sigmas[2] = 0.008; plane.inlier = indices.indices; plane.boundary_inlier = boundary.indices; plane.hull_inlier = boundary.indices; projectPoints( *input, plane.inlier, plane.coefficients, *(plane.cloud) ); // getPointCloudFromIndices( input, plane.inlier, plane.cloud ); // getPointCloudFromIndices( input, plane.boundary_inlier, plane.cloud_boundary ); // getPointCloudFromIndices( input, plane.hull_inlier, plane.cloud_hull ); // planes.push_back( plane ); } }
/* Post: fill _err with projection errors */ void computeError( InputArray _m1, InputArray _m2, InputArray _model, OutputArray _err ) const { Mat opoints = _m1.getMat(), ipoints = _m2.getMat(), model = _model.getMat(); int i, count = opoints.checkVector(3); Mat _rvec = model.col(0); Mat _tvec = model.col(1); Mat projpoints(count, 2, CV_32FC1); projectPoints(opoints, _rvec, _tvec, cameraMatrix, distCoeffs, projpoints); const Point2f* ipoints_ptr = ipoints.ptr<Point2f>(); const Point2f* projpoints_ptr = projpoints.ptr<Point2f>(); _err.create(count, 1, CV_32FC1); float* err = _err.getMat().ptr<float>(); for ( i = 0; i < count; ++i) err[i] = (float)norm( Matx21f(ipoints_ptr[i] - projpoints_ptr[i]), NORM_L2SQR ); }
void CameraProjectorCalibration::stereoCalibrate(){ const auto & objectPoints = calibrationProjector.getObjectPoints(); vector<vector<cv::Point2f> > auxImagePointsCamera; for (int i=0; i<objectPoints.size() ; i++ ) { vector<cv::Point2f> auxImagePoints; projectPoints(cv::Mat(objectPoints[i]), calibrationCamera.getBoardRotations()[i], calibrationCamera.getBoardTranslations()[i], calibrationCamera.getDistortedIntrinsics().getCameraMatrix(), calibrationCamera.getDistCoeffs(), auxImagePoints); auxImagePointsCamera.push_back(auxImagePoints); } cv::Mat projectorMatrix = calibrationProjector.getDistortedIntrinsics().getCameraMatrix(); cv::Mat projectorDistCoeffs = calibrationProjector.getDistCoeffs(); cv::Mat cameraMatrix = calibrationCamera.getDistortedIntrinsics().getCameraMatrix(); cv::Mat cameraDistCoeffs = calibrationCamera.getDistCoeffs(); cv::Mat fundamentalMatrix, essentialMatrix; cv::Mat rotation3x3; cv::stereoCalibrate(objectPoints, auxImagePointsCamera, calibrationProjector.imagePoints, cameraMatrix, cameraDistCoeffs, projectorMatrix, projectorDistCoeffs, calibrationCamera.getDistortedIntrinsics().getImageSize(), rotation3x3, transCamToProj, essentialMatrix, fundamentalMatrix); cv::Rodrigues(rotation3x3, rotCamToProj); }
PERF_TEST_P(DevInfo, projectPoints, testing::ValuesIn(devices())) { DeviceInfo devInfo = GetParam(); setDevice(devInfo.deviceID()); Mat src_host(1, 10000, CV_32FC3); declare.in(src_host, WARMUP_RNG); GpuMat src(src_host); GpuMat dst; declare.time(0.5).iterations(100); SIMPLE_TEST_CYCLE() { projectPoints(src, Mat::ones(1, 3, CV_32FC1), Mat::ones(1, 3, CV_32FC1), Mat::ones(3, 3, CV_32FC1), Mat(), dst); } Mat dst_host(dst); SANITY_CHECK(dst_host); }
void Projector::reproject(bool gpuView) { libfreenect2::Registration* registration = new libfreenect2::Registration(_dev->getIrCameraParams(), _dev->getColorCameraParams()); libfreenect2::Frame undistorted(512, 424, 4), registered(512, 424, 4); libfreenect2::FrameMap frames; SimpleViewer viewer; bool shutdown = false; cv::Mat board(480, 640, CV_8UC4, cv::Scalar::all(255)); if (!gpuView) { cv::namedWindow("reprojection", CV_WINDOW_NORMAL); cv::moveWindow("reprojection", 1200, 0); cv::setWindowProperty("reprojection", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN); } else { viewer.setSize(480, 640); // TO-DO change resolution viewer.initialize(); libfreenect2::Frame b(640, 480, 4); b.data = board.data; viewer.addFrame("RGB", &b); shutdown = shutdown || viewer.render(); } while (!shutdown) { board = cv::Mat(480, 640, CV_8UC4, cv::Scalar::all(255)); std::vector<cv::Point3f> wrldSrc; if (!gpuView) cv::imshow("reprojection", board); (_listener)->waitForNewFrame(frames); libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color]; libfreenect2::Frame *depth = frames[libfreenect2::Frame::Depth]; registration->apply(rgb, depth, &undistorted, ®istered, true, NULL, NULL); for (int i = 0; i<512; i++) { for (int j = 0; j<424; j++) { float x = 0, y = 0, z = 0, color = 0; registration->getPointXYZRGB(&undistorted, ®istered, i, j, x, y, z, color); if (z>0.5 && z<1.7) { x = static_cast<float>(x + right / ((double)640.0)); //////////TO-DO fix that y = static_cast<float>(y + up / ((double)480.0)); x -= 0.5; y -= 0.5; double PI = 3.14159265; x = static_cast<float>(std::cos(rotX * PI / 180) * x - std::sin(rotX * PI / 180) * y); y = static_cast<float>(std::sin(rotX * PI / 180) * x + std::cos(rotX * PI / 180) * y); x += 0.5; y += 0.5; wrldSrc.push_back(cv::Point3f(x * 100, y * 100, z * 100)); } } } if (wrldSrc.size() > 0) { std::vector<cv::Point2f> projected = projectPoints(wrldSrc); for (int i = 0; i < projected.size(); i++) { if (480 - projected[i].x >0 && projected[i].y > 0 && 480 - projected[i].x < 475 && projected[i].y < 630) { cv::Mat ROI = board(cv::Rect(static_cast<int>(projected[i].y), static_cast<int>(480 - projected[i].x), 2, 2)); ROI.setTo(cv::Scalar(100, 100, 150, 100)); } } if (!gpuView) imshow("reprojection", board); else { libfreenect2::Frame b(640, 480, 4); b.data = board.data; viewer.addFrame("RGB", &b); shutdown = shutdown || viewer.render(); } } (_listener)->release(frames); if (!gpuView) { int op = cv::waitKey(50); if (op == 100 || (char)(op) == 'd') right -= 1; if (op == 115 || (char)(op) == 's') up += 1; if (op == 97 || (char)(op) == 'a') right += 1; if (op == 119 || (char)(op) == 'w') up -= 1; if (op == 114 || (char)(op) == 'r') rotX -= 0.5; if (op == 102 || (char)(op) == 'f') rotX += 0.5; if (op == 1113997 || op == 1048586 || op == 1048608 || op == 10 || op == 32) { std::cout << "right = " << right << ";\nup = " << up << ";\nrotX = " << rotX << ";\n"; break; } } else { right = viewer.offsetX; up = viewer.offsetY; rotX = viewer.rot; } } if (!gpuView) cv::destroyWindow("reprojection"); else { viewer.stopWindow(); } }
vector<Point2d> PoseEstimator::GetImageCoordinates(const vector<Point3d>& ModelCoordinates) const { vector<Point2d> ImageCoordinates; projectPoints(ModelCoordinates, rotationMat, translationMat, cameraMat, distCoeffs, ImageCoordinates); return ImageCoordinates; }
void Projector::objProjectionOffline(std::string objPath, std::string objName, bool gpuView) { std::cout << "Camera init: "; objObject obj(objPath, objName); obj.loadData(); cout << "DONE\n"; cv::namedWindow("objTest", CV_WINDOW_NORMAL); cv::moveWindow("objTest", 0, 0); indices.resize(480); for (int i = 0; i < 480; i++) indices[i].resize(640); libfreenect2::Registration* registration = new libfreenect2::Registration(_dev->getIrCameraParams(), _dev->getColorCameraParams()); libfreenect2::Frame undistorted(512, 424, 4), registered(512, 424, 4); libfreenect2::FrameMap frames; SimpleViewer viewer; bool shutdown = false; cv::Mat board(480, 640, CV_8UC4, cv::Scalar::all(255)); cv::Vec3f prevNormal(-1, -1, -1); if (!gpuView) { cv::namedWindow("reprojection", CV_WINDOW_NORMAL); cv::moveWindow("reprojection", 200, 200); //setWindowProperty("reprojection", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN); } else { viewer.setSize(480, 640); // TO-DO change resolution viewer.initialize(); libfreenect2::Frame b(640, 480, 4); b.data = board.data; viewer.addFrame("RGB", &b); shutdown = shutdown || viewer.render(); } while (!shutdown) { board = cv::Mat(480, 640, CV_8UC4, cv::Scalar::all(255)); std::vector<cv::Point3f> plnSrc; if (!gpuView) cv::imshow("reprojection", board); (_listener)->waitForNewFrame(frames); libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color]; libfreenect2::Frame *depth = frames[libfreenect2::Frame::Depth]; registration->apply(rgb, depth, &undistorted, ®istered, true, NULL, NULL); PlaneData pln = findRectangle(registration, &undistorted, ®istered); if (pln.points.size() > 0) { std::vector<cv::Point2f> projected = projectPoints(pln.points); cv::Mat cont = cv::Mat(480, 640, CV_8UC1, cv::Scalar::all(0)); for (int i = 0; i < projected.size(); i++) { if (480 - projected[i].x >0 && projected[i].y > 0 && 480 - projected[i].x < 475 && projected[i].y < 630) { cv::Mat ROI = board(cv::Rect(static_cast<int>(projected[i].y), static_cast<int>(480 - projected[i].x), 2, 2)); ROI.setTo(cv::Scalar(250, 100, 100, 100)); cont.at<uchar>(static_cast<int>(480 - projected[i].x), static_cast<int>(projected[i].y), 0) = 255; plnSrc.push_back(pln.points[i]); } } vector<vector<cv::Point> > contours; vector<cv::Vec4i> hierarchy; cv::GaussianBlur(cont, cont, cv::Size(7, 7), 5, 11); findContours(cont, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_NONE, cv::Point(0, 0)); vector<vector<cv::Point> > contours_poly(contours.size()); vector<cv::Rect> boundRect(contours.size()); vector<cv::Point2f>center(contours.size()); vector<float>radius(contours.size()); for (int i = 0; i < contours.size(); i++) { cv::approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 10, true); } for (int i = 0; i < contours.size(); i++) { drawContours(board, contours_poly, 0, cv::Scalar(0, 255, 0), 5); } cv::Mat data_pts = cv::Mat(300, 3, CV_64FC1); cv::Vec3f normal(0, 0, 0); int jump = plnSrc.size() / 300; for (int i = 0; i < 100; i++) { data_pts.at<double>(i, 0) = plnSrc[i*jump].x; data_pts.at<double>(i, 1) = plnSrc[i*jump].y; data_pts.at<double>(i, 2) = plnSrc[i*jump].z; data_pts.at<double>(i + 100, 0) = plnSrc[(i + 100)*jump].x; data_pts.at<double>(i + 100, 1) = plnSrc[(i + 100)*jump].y; data_pts.at<double>(i + 100, 2) = plnSrc[(i + 100)*jump].z; data_pts.at<double>(i + 200, 0) = plnSrc[(i + 200) *jump].x; data_pts.at<double>(i + 200, 1) = plnSrc[(i + 200)*jump].y; data_pts.at<double>(i + 200, 2) = plnSrc[(i + 200)*jump].z; } cv::PCA pca_analysis(data_pts, cv::Mat(), CV_PCA_DATA_AS_ROW); cv::Vec3f cntr = cv::Vec3f((pca_analysis.mean.at<double>(0, 0)), (pca_analysis.mean.at<double>(0, 1)), (pca_analysis.mean.at<double>(0, 2))); vector<cv::Point3f> eigen_vecs(2); vector<double> eigen_val(2); for (int i = 0; i < 2; ++i) { eigen_vecs[i] = cv::Point3f(pca_analysis.eigenvectors.at<double>(i, 0), pca_analysis.eigenvectors.at<double>(i, 1), pca_analysis.eigenvectors.at<double>(i, 2)); eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i); } cv::Vec3f p1 = cv::Vec3f((eigen_vecs[0].x * eigen_val[0]), (eigen_vecs[0].y * eigen_val[0]), (eigen_vecs[0].z * eigen_val[0])); cv::Vec3f p2 = cv::Vec3f((eigen_vecs[1].x * eigen_val[1]), (eigen_vecs[1].y * eigen_val[1]), (eigen_vecs[1].z * eigen_val[1])); normal = p1.cross(p2); normal = cv::normalize(normal); //pln.center = cntr; pln.normal = normal; obj.setCamera(cv::Point3f(pln.center.x, -pln.center.y, -pln.center.z + 150), cv::Vec3f(pln.normal[0], pln.normal[1], pln.normal[2])); if (!gpuView) imshow("reprojection", board); else { libfreenect2::Frame b(640, 480, 4); b.data = board.data; viewer.addFrame("RGB", &b); shutdown = shutdown || viewer.render(); } } cv::Mat im = obj.render(); cv::imshow("objTest", im); //} (_listener)->release(frames); if (!gpuView) { int op = cv::waitKey(50); if (op == 100 || (char)(op) == 'd') right -= 1; if (op == 115 || (char)(op) == 's') up += 1; if (op == 97 || (char)(op) == 'a') right += 1; if (op == 119 || (char)(op) == 'w') up -= 1; if (op == 114 || (char)(op) == 'r') rotX -= 0.5; if (op == 102 || (char)(op) == 'f') rotX += 0.5; if (op == 1113997 || op == 1048586 || op == 1048608 || op == 10 || op == 32) { std::cout << "right = " << right << ";\nup = " << up << ";\nrotX = " << rotX << ";\n"; break; } } else { //right = 0; //up = 0; //rotX = 0; right = viewer.offsetX; up = viewer.offsetY; rotX = viewer.rot; } } if (!gpuView) cv::destroyWindow("reprojection"); else { viewer.stopWindow(); } cv::destroyWindow("objTest"); }
void Projector::showRectangle(bool gpuView) { indices.resize(480); for (int i = 0; i < 480; i++) indices[i].resize(640); libfreenect2::Registration* registration = new libfreenect2::Registration(_dev->getIrCameraParams(), _dev->getColorCameraParams()); libfreenect2::Frame undistorted(512, 424, 4), registered(512, 424, 4); libfreenect2::FrameMap frames; SimpleViewer viewer; bool shutdown = false; cv::Mat board(480, 640, CV_8UC4, cv::Scalar::all(255)); if (!gpuView) { cv::namedWindow("reprojection", CV_WINDOW_NORMAL); cv::moveWindow("reprojection", 0, 0); //setWindowProperty("reprojection", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN); } else { viewer.setSize(480, 640); // TO-DO change resolution viewer.initialize(); libfreenect2::Frame b(640, 480, 4); b.data = board.data; viewer.addFrame("RGB", &b); shutdown = shutdown || viewer.render(); } while (!shutdown) { board = cv::Mat(480, 640, CV_8UC4, cv::Scalar::all(255)); std::vector<cv::Point3f> wrldSrc; std::vector<cv::Point3f> plnSrc; if (!gpuView) cv::imshow("reprojection", board); (_listener)->waitForNewFrame(frames); libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color]; libfreenect2::Frame *depth = frames[libfreenect2::Frame::Depth]; registration->apply(rgb, depth, &undistorted, ®istered, true, NULL, NULL); for (int i = 0; i<512; i++) { for (int j = 0; j<424; j++) { float x = 0, y = 0, z = 0, color = 0; registration->getPointXYZRGB(&undistorted, ®istered, i, j, x, y, z, color); if (z>0.5 && z<2.1) { x = static_cast<float>(x + right / ((double)640.0)); //////////TO-DO fix that y = static_cast<float>(y + up / ((double)480.0)); x -= 0.5; y -= 0.5; double PI = 3.14159265; x = static_cast<float>(std::cos(rotX * PI / 180) * x - std::sin(rotX * PI / 180) * y); y = static_cast<float>(std::sin(rotX * PI / 180) * x + std::cos(rotX * PI / 180) * y); x += 0.5; y += 0.5; wrldSrc.push_back(cv::Point3f(x * 100, y * 100, z * 100)); } } } PlaneData pln = findRectangle(registration, &undistorted, ®istered); if (wrldSrc.size() > 0) { std::vector<cv::Point2f> projected = projectPoints(wrldSrc); for (int i = 0; i < projected.size(); i++) { if (480 - projected[i].x >0 && projected[i].y > 0 && 480 - projected[i].x < 475 && projected[i].y < 630) { cv::Mat ROI = board(cv::Rect(static_cast<int>(projected[i].y), static_cast<int>(480 - projected[i].x), 2, 2)); ROI.setTo(cv::Scalar(100, 100, 150, 100)); } } if (pln.points.size() > 0) { projected = projectPoints(pln.points); cv::Mat cont = cv::Mat(480, 640, CV_8UC1, cv::Scalar::all(0)); for (int i = 0; i < projected.size(); i++) { if (480 - projected[i].x >0 && projected[i].y > 0 && 480 - projected[i].x < 475 && projected[i].y < 630) { cv::Mat ROI = board(cv::Rect(static_cast<int>(projected[i].y), static_cast<int>(480 - projected[i].x), 2, 2)); ROI.setTo(cv::Scalar(250, 100, 100, 100)); cont.at<uchar>(static_cast<int>(480 - projected[i].x), static_cast<int>(projected[i].y), 0) = 255; indices[static_cast<int>(480 - projected[i].x)][static_cast<int>(projected[i].y)] = i; } } vector<vector<cv::Point> > contours; vector<cv::Vec4i> hierarchy; cv::GaussianBlur(cont, cont, cv::Size(7, 7), 5, 11); findContours(cont, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_NONE, cv::Point(0, 0)); vector<vector<cv::Point> > contours_poly(contours.size()); vector<cv::Rect> boundRect(contours.size()); vector<cv::Point2f>center(contours.size()); vector<float>radius(contours.size()); int nPoly; for (int i = 0; i < contours.size(); i++) { cv::approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 10, true); nPoly = contours_poly[i].size(); } for (int i = 0; i< contours.size(); i++) { drawContours(board, contours_poly, 0, cv::Scalar(0, 255, 0), 5); } } if (!gpuView) imshow("reprojection", board); else { libfreenect2::Frame b(640, 480, 4); b.data = board.data; viewer.addFrame("RGB", &b); shutdown = shutdown || viewer.render(); } } (_listener)->release(frames); if (!gpuView) { int op = cv::waitKey(50); if (op == 100 || (char)(op) == 'd') right -= 1; if (op == 115 || (char)(op) == 's') up += 1; if (op == 97 || (char)(op) == 'a') right += 1; if (op == 119 || (char)(op) == 'w') up -= 1; if (op == 114 || (char)(op) == 'r') rotX -= 0.5; if (op == 102 || (char)(op) == 'f') rotX += 0.5; if (op == 1113997 || op == 1048586 || op == 1048608 || op == 10 || op == 32) { std::cout << "right = " << right << ";\nup = " << up << ";\nrotX = " << rotX << ";\n"; break; } } else { right = 0; up = 0; rotX = 0; right = viewer.offsetX; up = viewer.offsetY; rotX = viewer.rot; } } if (!gpuView) cv::destroyWindow("reprojection"); else { viewer.stopWindow(); } }