void handle_scanline_frame(const struct frame_scanline *scanline) { std::cerr << "Scanline: angle " << scanline->angle << " range " << scanline->range_meters << " length " << scanline->scan_length_bytes << std::endl; std::vector<garmin_sample_ptr> samples; const unsigned char *data = scanline->line_data; unsigned short n = scanline->scan_length_bytes / 4; unsigned short i, k; for(i = 0; i < 4; i++) { double angle = scanline->angle + i * 0.5; std::cout << "angle " << std::fixed << std::setprecision(1) << angle << ": "; for(k = 0; k < n; k++) { unsigned short p = k + i * n; std::cout << ((int)data[p]) << " "; } std::cout << std::endl; garmin_sample_ptr sample = boost::make_shared<garmin_sample> ((scanline->angle * 100) + (i * 25), scanline->range_meters); sample->set_samples(n, data); samples.push_back(sample); } print_controls(scanline); }
void handle_response_frame(const struct frame_response *frame) { print_controls(frame); }
camera_v4l2<Tdata>::camera_v4l2(const char *device, int height_, int width_, bool grayscale_) : camera<Tdata>(height_, width_), started(false), nbuffers(grayscale_ ? 1 : 3), buffers(new void*[nbuffers]), sizes(new int[nbuffers]) { cout << "Initializing V4l2 camera from device " << device << endl; if (grayscale_) cout << "V4l2 output is set to grayscale." << endl; #ifndef __LINUX__ eblerror("V4l2 is for linux only"); #else int fps = 30; int height1 = -1; // height returned by camera int width1 = -1; // width returned by camera fd = open(device, O_RDWR); if (fd == -1) eblerror("could not open v4l2 device"); struct v4l2_capability cap; struct v4l2_cropcap cropcap; struct v4l2_crop crop; struct v4l2_format fmt; memset((void*) &cap, 0, sizeof(struct v4l2_capability)); int ret = ioctl(fd, VIDIOC_QUERYCAP, &cap); if (ret < 0) { // (==> this cleanup) eblerror("could not query v4l2 device"); } if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) { // (==> this cleanup) eblerror("v4l2 device does not support video capture"); } if (!(cap.capabilities & V4L2_CAP_STREAMING)) { // (==> this cleanup) eblerror("v4l2 device does not support streaming i/o"); } // resetting cropping to full frame memset((void*) &cropcap, 0, sizeof(struct v4l2_cropcap)); cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (0 == ioctl(fd, VIDIOC_CROPCAP, &cropcap)) { crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; crop.c = cropcap.defrect; ioctl(fd, VIDIOC_S_CROP, &crop); } // set format memset((void*) &fmt, 0, sizeof(struct v4l2_format)); fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; // TODO: error when ratio not correct fmt.fmt.pix.width = width_; fmt.fmt.pix.height = height_; fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; fmt.fmt.pix.field = V4L2_FIELD_ANY; if (ioctl(fd, VIDIOC_S_FMT, &fmt) < 0) { // (==> this cleanup) eblerror("unable to set v4l2 format"); } height1 = fmt.fmt.pix.height; width1 = fmt.fmt.pix.width; if (height != height1 || width != width1) { cout << "Warning: requested resolution " << height << "x" << width << " but camera changed it to " << height1 << "x" << width1 << endl; // enabling resizing as postprocessing bresize = true; } else // already resized to our target, disable resizing bresize = false; // set framerate struct v4l2_streamparm setfps; memset((void*) &setfps, 0, sizeof(struct v4l2_streamparm)); setfps.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; setfps.parm.capture.timeperframe.numerator = 1; setfps.parm.capture.timeperframe.denominator = fps; ioctl(fd, VIDIOC_S_PARM, &setfps); // allocate and map the buffers struct v4l2_requestbuffers rb; rb.count = nbuffers; rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; rb.memory = V4L2_MEMORY_MMAP; ret = ioctl(fd, VIDIOC_REQBUFS, &rb); if (ret < 0) { // (==> this cleanup) eblerror("could not allocate v4l2 buffers"); } ret = 0; for (int i = 0; i < nbuffers; i++) { struct v4l2_buffer buf; int r; memset((void*) &buf, 0, sizeof(struct v4l2_buffer)); buf.index = i; buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; r = ioctl(fd, VIDIOC_QUERYBUF, &buf); // printf("i=%u, length: %u, offset: %u, r=%d\n", i, buf.length, buf.m.offset, r); if (r < 0) ret = -(i+1); if (ret == 0) { buffers[i] = mmap(0, buf.length, PROT_READ + PROT_WRITE, MAP_SHARED, fd, buf.m.offset); sizes[i] = buf.length; if (buffers[i] == MAP_FAILED) ret = -(i+1000); } } if (ret < 0) { cout << "ret = " << ret << endl; if (ret > -1000) { cout << "query buffer " << - (1 + ret) << endl; //(==> this cleanup) eblerror("could not query v4l2 buffer"); } else { cout << "map buffer " << - (1000 + ret) << endl; //(==> this cleanup) eblerror("could not map v4l2 buffer"); } } frame = idx<Tdata>(height1, width1, nbuffers); print_controls(); set_boolean_control(V4L2_CID_AUTOGAIN, false); set_boolean_control(V4L2_CID_AUTO_WHITE_BALANCE, false); // set_integer_control(V4L2_CID_BACKLIGHT_COMPENSATION, 0); print_controls(); #endif }
int run_program(int argc, char **argv) { CommandLineArgument<std::string> model_pathname; for (int i = 1; i < argc; i++) { std::string argument(argv[i]); if (argument == "--help") { print_usage(); return 0; } else if (!assign_argument(argument, model_pathname)) { throw make_runtime_error("Do not know how to process argument '%s'", argument.c_str()); } } if (!have_argument_p(model_pathname)) model_pathname = AVATAR::DefaultAvatarModelPathname(); if (!file_exists_p(model_pathname->c_str())) throw make_runtime_error("Model file '%s' does not exist", model_pathname->c_str()); AVATAR::Avatar *avatar = AVATAR::LoadAvatar(model_pathname->c_str()); if (avatar->numberOfAvatars() == 0) return 0; print_controls(); const int minimum_avatar = 0; const int maximum_avatar = avatar->numberOfAvatars() - 1; int index = 0; bool quit_application_p = false; printf("Number of avatars: %d\n", avatar->numberOfAvatars()); show_avatar(avatar, index); while (!quit_application_p) { char ch = cv::waitKey(10); switch (ch) { case 'a': case 2: index = std::max(minimum_avatar, index - 1); show_avatar(avatar, index); break; case 'd': case 3: index = std::min(maximum_avatar, index + 1); show_avatar(avatar, index); break; case 'q': case 27: quit_application_p = true; break; case -1: break; default: break; } } printf("\n"); delete avatar; return 0; }