void ViewCull::setFrustum(const float & horizontalApeture, const float & verticalApeture, const float & focalLength, const float & clipNear, const float & clipFar) { BaseView::setFrustum(horizontalApeture, verticalApeture, focalLength, clipNear, clipFar); /// 1 / 30 of port width m_detailWidth = -clipFar * hfov() * .066f; }
void BaseView::frameAll(const BoundingBox & b) { Vector3F eye = b.center(); eye.z = b.getMax(2) + b.distance(0) / hfov() * .55f + 120.f; setEyePosition(eye); Matrix44F m; m.setTranslation(eye); *cameraSpaceR() = m; m.inverse(); *cameraInvSpaceR() = m; setFrustum(1.33f, 1.f, 26.2f, -1.f, -1000.f); }
/// in viewport void ViewCull::ndc(const Vector3F & cameraP, float & coordx, float & coordy) const { float d = -cameraP.z; if(d<1.f) d= 1.f; float h_max = d * hfov(); float h_min = -h_max; float v_max = h_max * aspectRatio(); float v_min = -v_max; coordx = (cameraP.x/m_overscan - h_min) / (h_max - h_min); coordy = (cameraP.y/m_overscan/(m_portAspectRatio / aspectRatio() ) - v_min) / (v_max - v_min); if(coordx < 0.f) coordx = 0.f; if(coordx > .999f) coordx = .999f; if(coordy < 0.f) coordy = 0.f; if(coordy > .999f) coordy = .999f; }
int main(int argc, char * argv[]) try { rs::log_to_console(rs::log_severity::warn); //rs::log_to_file(rs::log_severity::debug, "librealsense.log"); rs::context ctx; if(ctx.get_device_count() == 0) throw std::runtime_error("No device detected. Is it plugged in?"); rs::device & dev = *ctx.get_device(0); dev.enable_stream(rs::stream::depth, rs::preset::best_quality); dev.enable_stream(rs::stream::color, rs::preset::best_quality); dev.enable_stream(rs::stream::infrared, rs::preset::best_quality); try { dev.enable_stream(rs::stream::infrared2, 0, 0, rs::format::any, 0); } catch(...) {} // Compute field of view for each enabled stream for(int i = 0; i < 4; ++i) { auto stream = rs::stream(i); if(!dev.is_stream_enabled(stream)) continue; auto intrin = dev.get_stream_intrinsics(stream); std::cout << "Capturing " << stream << " at " << intrin.width << " x " << intrin.height; std::cout << std::setprecision(1) << std::fixed << ", fov = " << intrin.hfov() << " x " << intrin.vfov() << ", distortion = " << intrin.model() << std::endl; } // Start our device dev.start(); // For the libuvc backend, this sleep is required before touching any of the camera // options after a device has been .start()'d std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // Report the status of each supported option /*for(int i = 0; i < RS_OPTION_COUNT; ++i) { auto option = rs::option(i); if(dev.supports_option(option)) { std::cout << "Option " << option << ": "; try { std::cout << dev.get_option(option) << std::endl; } catch(const std::exception & e) { std::cout << e.what() << std::endl; } } }*/ // Open a GLFW window glfwInit(); std::ostringstream ss; ss << "CPP Capture Example (" << dev.get_name() << ")"; GLFWwindow * win = glfwCreateWindow(1280, 960, ss.str().c_str(), 0, 0); glfwSetWindowUserPointer(win, &dev); glfwSetKeyCallback(win, [](GLFWwindow * win, int key, int scancode, int action, int mods) { auto dev = reinterpret_cast<rs::device *>(glfwGetWindowUserPointer(win)); if(action != GLFW_RELEASE) switch(key) { case GLFW_KEY_R: color_rectification_enabled = !color_rectification_enabled; break; case GLFW_KEY_C: align_color_to_depth = !align_color_to_depth; break; case GLFW_KEY_D: align_depth_to_color = !align_depth_to_color; break; case GLFW_KEY_E: if(dev->supports_option(rs::option::r200_emitter_enabled)) { int value = !dev->get_option(rs::option::r200_emitter_enabled); std::cout << "Setting emitter to " << value << std::endl; dev->set_option(rs::option::r200_emitter_enabled, value); } break; case GLFW_KEY_A: if(dev->supports_option(rs::option::r200_lr_auto_exposure_enabled)) { int value = !dev->get_option(rs::option::r200_lr_auto_exposure_enabled); std::cout << "Setting auto exposure to " << value << std::endl; dev->set_option(rs::option::r200_lr_auto_exposure_enabled, value); } break; } }); glfwMakeContextCurrent(win); while (!glfwWindowShouldClose(win)) { // Wait for new images glfwPollEvents(); dev.wait_for_frames(); // Clear the framebuffer int w,h; glfwGetFramebufferSize(win, &w, &h); glViewport(0, 0, w, h); glClear(GL_COLOR_BUFFER_BIT); // Draw the images glPushMatrix(); glfwGetWindowSize(win, &w, &h); glOrtho(0, w, h, 0, -1, +1); buffers[0].show(dev, align_color_to_depth ? rs::stream::color_aligned_to_depth : (color_rectification_enabled ? rs::stream::rectified_color : rs::stream::color), 0, 0, w/2, h/2); buffers[1].show(dev, align_depth_to_color ? (color_rectification_enabled ? rs::stream::depth_aligned_to_rectified_color : rs::stream::depth_aligned_to_color) : rs::stream::depth, w/2, 0, w-w/2, h/2); buffers[2].show(dev, rs::stream::infrared, 0, h/2, w/2, h-h/2); buffers[3].show(dev, rs::stream::infrared2, w/2, h/2, w-w/2, h-h/2); glPopMatrix(); glfwSwapBuffers(win); } glfwDestroyWindow(win); glfwTerminate(); return EXIT_SUCCESS; } catch(const rs::error & e) { std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl; return EXIT_FAILURE; } catch(const std::exception & e) { std::cerr << e.what() << std::endl; return EXIT_FAILURE; }