// ###################################################################### void* WorkThreadServer::c_run(void* p) { try { // block all signals in this worker thread; instead of receiving // signals here, we rely on the main thread to catch any // important signals and then twiddle with the WorkThreadServer // object as needed (for example, destroying it to cleanly shut // down all worker threads) sigset_t ss; if (sigfillset(&ss) != 0) PLFATAL("sigfillset() failed"); if (pthread_sigmask(SIG_SETMASK, &ss, 0) != 0) PLFATAL("pthread_sigmask() failed"); ThreadData* const dat = static_cast<ThreadData*>(p); WorkThreadServer* const srv = dat->srv; srv->run(dat); } catch (...) { REPORT_CURRENT_EXCEPTION; abort(); } return NULL; }
// ############################################################################################################## jevois::Camera::Camera(std::string const & devname, unsigned int const nbufs) : jevois::VideoInput(devname, nbufs), itsFd(-1), itsBuffers(nullptr), itsFormat(), itsStreaming(false), itsFps(0.0F) { JEVOIS_TRACE(1); JEVOIS_TIMED_LOCK(itsMtx); // Get our run() thread going and wait until it is cranking, it will flip itsRunning to true as it starts: itsRunFuture = std::async(std::launch::async, &jevois::Camera::run, this); while (itsRunning.load() == false) std::this_thread::sleep_for(std::chrono::milliseconds(5)); // Open the device: itsFd = open(devname.c_str(), O_RDWR | O_NONBLOCK, 0); if (itsFd == -1) PLFATAL("Camera device open failed on " << devname); // See what kinds of inputs we have and select the first one that is a camera: int camidx = -1; struct v4l2_input inp = { }; while (true) { try { XIOCTL_QUIET(itsFd, VIDIOC_ENUMINPUT, &inp); } catch (...) { break; } if (inp.type == V4L2_INPUT_TYPE_CAMERA) { if (camidx == -1) camidx = inp.index; LDEBUG("Input " << inp.index << " [" << inp.name << "] is a camera sensor"); } else LDEBUG("Input " << inp.index << " [" << inp.name << "] is a NOT camera sensor"); ++inp.index; } if (camidx == -1) LFATAL("No valid camera input found on device " << devname); // Select the camera input, this seems to be required by VFE for the camera to power on: XIOCTL(itsFd, VIDIOC_S_INPUT, &camidx); // Find out what camera can do: struct v4l2_capability cap = { }; XIOCTL(itsFd, VIDIOC_QUERYCAP, &cap); LINFO('[' << itsFd << "] V4L2 camera " << devname << " card " << cap.card << " bus " << cap.bus_info); if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) LFATAL(devname << " is not a video capture device"); if ((cap.capabilities & V4L2_CAP_STREAMING) == 0) LFATAL(devname << " does not support streaming"); // List the supported formats: struct v4l2_fmtdesc fmtdesc = { }; fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; while (true) { try { XIOCTL_QUIET(itsFd, VIDIOC_ENUM_FMT, &fmtdesc); } catch (...) { break; } LDEBUG("Format " << fmtdesc.index << " is [" << fmtdesc.description << "] fcc " << std::showbase << std::hex << fmtdesc.pixelformat << " [" << jevois::fccstr(fmtdesc.pixelformat) << ']'); ++fmtdesc.index; } }
// ###################################################################### void TaskRelevanceMapTigs2::getGistPCAMatrix() { FILE* itsFile = fopen(itsGistPCAMatrixName.getVal().c_str(), "rb"); if(itsFile == 0) PLFATAL("Can not open GistPCAMatrix for read PCA"); int gistDims = 0; if(fread(&gistDims, sizeof(int), 1, itsFile) != 1) LFATAL("fread failed"); Image<float> matrixTmp(gistDims, itsGistPCADims.getVal(), NO_INIT); size_t sz = itsGistPCADims.getVal()*gistDims; if(fread(matrixTmp.beginw(), sizeof(float), sz, itsFile) != sz) LFATAL("fread failed"); itsGistPCAMatrix = transpose(matrixTmp); fclose(itsFile); }
// ###################################################################### void TaskRelevanceMapTigs::getTigsMatrix() { FILE* itsFile = fopen(itsTigsMatrixName.getVal().c_str(), "rb"); if(itsFile == 0) PLFATAL("Can not open TigsMatrix for read Tigs coefficients"); int w,h; if(fread(&h, sizeof(int), 1, itsFile) != 1) LFATAL("fread failed"); if(fread(&w, sizeof(int), 1, itsFile) != 1) LFATAL("fread failed"); Image<float> matrixTmp(h, w, NO_INIT); // keep in mind that the definition of Image is Image(width, height) if(fread(matrixTmp.beginw(), sizeof(float), w*h, itsFile) != size_t(w*h)) LFATAL("fread failed"); itsTigsMatrix = transpose(matrixTmp); fclose(itsFile); }
// ############################################################################################################## jevois::Gadget::Gadget(std::string const & devname, jevois::VideoInput * camera, jevois::Engine * engine, size_t const nbufs) : itsFd(-1), itsNbufs(nbufs), itsBuffers(nullptr), itsCamera(camera), itsEngine(engine), itsRunning(false), itsFormat(), itsFps(0.0F), itsStreaming(false), itsErrorCode(0), itsControl(0), itsEntity(0) { JEVOIS_TRACE(1); if (itsCamera == nullptr) LFATAL("Gadget requires a valid camera to work"); jevois::VideoMapping const & m = itsEngine->getDefaultVideoMapping(); fillStreamingControl(&itsProbe, m); fillStreamingControl(&itsCommit, m); // Get our run() thread going and wait until it is cranking, it will flip itsRunning to true as it starts: itsRunFuture = std::async(std::launch::async, &jevois::Gadget::run, this); while (itsRunning.load() == false) std::this_thread::sleep_for(std::chrono::milliseconds(5)); // Open the device: itsFd = open(devname.c_str(), O_RDWR | O_NONBLOCK); if (itsFd == -1) PLFATAL("Gadget device open failed for " << devname); // Get ready to handle UVC events: struct v4l2_event_subscription sub = { }; sub.type = UVC_EVENT_SETUP; XIOCTL(itsFd, VIDIOC_SUBSCRIBE_EVENT, &sub); sub.type = UVC_EVENT_DATA; XIOCTL(itsFd, VIDIOC_SUBSCRIBE_EVENT, &sub); sub.type = UVC_EVENT_STREAMON; XIOCTL(itsFd, VIDIOC_SUBSCRIBE_EVENT, &sub); sub.type = UVC_EVENT_STREAMOFF; XIOCTL(itsFd, VIDIOC_SUBSCRIBE_EVENT, &sub); // Find out what the driver can do: struct v4l2_capability cap = { }; XIOCTL(itsFd, VIDIOC_QUERYCAP, &cap); LINFO('[' << itsFd << "] UVC gadget " << devname << " card " << cap.card << " bus " << cap.bus_info); if ((cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) == 0) LFATAL(devname << " is not a video output device"); if ((cap.capabilities & V4L2_CAP_STREAMING) == 0) LFATAL(devname << " does not support streaming"); }
// ###################################################################### void JobWithSemaphore::markFinished() { if (! this->finishedFlag.post()) PLFATAL("Semaphore::post() failed"); }
// ###################################################################### void V4Lgrabber::start1() { itsFd = open(itsDevName.getVal().c_str(), O_RDWR | O_NONBLOCK); if (itsFd == -1) PLFATAL("Cannot open V4L device %s", itsDevName.getVal().c_str()); // get frame grabber capabilities: struct video_capability vc; if (ioctl_nointr(itsFd, VIDIOCGCAP, &vc) < 0) IDPLFATAL("Cannot get V4L device capabilities"); IDLINFO("FrameGrabber board name is: %s", vc.name); IDLINFO("maxwidth = %d, maxheight = %d", vc.maxwidth, vc.maxheight); if (itsDims.getVal().w() > vc.maxwidth || itsDims.getVal().h() > vc.maxheight) IDLFATAL("Requested grab size %dx%d too large", itsDims.getVal().w(), itsDims.getVal().h()); // select input channel & norm: struct video_channel vch; vch.channel = itsChannel.getVal(); if (ioctl_nointr(itsFd, VIDIOCGCHAN, &vch) < 0) IDPLERROR("Cannot get V4L device channel information"); vch.norm = VIDEO_MODE_NTSC; // set NTSC norm vch.type = VIDEO_TYPE_CAMERA; // camera input IDLINFO("Channel %d is '%s' [norm %d]", vch.channel, vch.name, vch.norm); if (ioctl_nointr(itsFd, VIDIOCSCHAN, &vch) < 0) IDPLERROR("Cannot set V4L device channel information"); switch (vch.norm) { case VIDEO_MODE_PAL: itsFrameTime = SimTime::HERTZ(25.0); break; case VIDEO_MODE_NTSC: itsFrameTime = SimTime::HERTZ(29.97); break; case VIDEO_MODE_SECAM: itsFrameTime = SimTime::HERTZ(25.0); break; default: itsFrameTime = SimTime::ZERO(); break; } // get specs of video buffer: struct video_mbuf vmb; if (ioctl_nointr(itsFd, VIDIOCGMBUF, &vmb) < 0) IDPLFATAL("Cannot get V4L device buffer"); IDLINFO("video Mbuf: 0x%x bytes, %d frames", vmb.size, vmb.frames); for (int i = 0; i < vmb.frames; ++i) IDLINFO("buffer offset[%d] = %d", i, vmb.offsets[i]); // get the picture properties struct video_picture vp; if (ioctl_nointr(itsFd, VIDIOCGPICT, &vp) != 0) IDPLFATAL("ioctl(VIDIOCSPICT) get picture properties failed"); // get ready for capture, for all frames in buffer: switch (itsGrabMode.getVal()) { case VIDFMT_GREY: itsVmmInfo.format = VIDEO_PALETTE_GREY; break; case VIDFMT_RAW: itsVmmInfo.format = VIDEO_PALETTE_RAW; break; case VIDFMT_RGB555: itsVmmInfo.format = VIDEO_PALETTE_RGB555; break; case VIDFMT_RGB565: itsVmmInfo.format = VIDEO_PALETTE_RGB565; break; case VIDFMT_RGB24: itsVmmInfo.format = VIDEO_PALETTE_RGB24; break; case VIDFMT_RGB32: itsVmmInfo.format = VIDEO_PALETTE_RGB32; break; case VIDFMT_YUYV: itsVmmInfo.format = VIDEO_PALETTE_YUYV; break; case VIDFMT_UYVY: itsVmmInfo.format = VIDEO_PALETTE_UYVY; break; case VIDFMT_YUV422: itsVmmInfo.format = VIDEO_PALETTE_YUV422; break; case VIDFMT_YUV411: itsVmmInfo.format = VIDEO_PALETTE_YUV411; break; case VIDFMT_YUV420: itsVmmInfo.format = VIDEO_PALETTE_YUV420; break; case VIDFMT_YUV422P: itsVmmInfo.format = VIDEO_PALETTE_YUV422P; break; case VIDFMT_YUV411P: itsVmmInfo.format = VIDEO_PALETTE_YUV411P; break; case VIDFMT_YUV420P: itsVmmInfo.format = VIDEO_PALETTE_YUV420P; break; case VIDFMT_YUV410P: itsVmmInfo.format = VIDEO_PALETTE_YUV410P; break; case VIDFMT_AUTO: // Auto selection of grab mode: struct V4LPalette* pal; LINFO("Probing for supported palettes:"); #define CHECK_PALETTE(p) \ { \ vp.palette = p; \ vp.depth = 32; \ ioctl_nointr(itsFd, VIDIOCSPICT, &vp);\ ioctl_nointr(itsFd, VIDIOCGPICT, &vp);\ if (vp.palette == p) \ LINFO(" %-22s supported", #p); \ else \ LINFO(" %-22s NOT supported", #p); \ } CHECK_PALETTE(VIDEO_PALETTE_GREY); CHECK_PALETTE(VIDEO_PALETTE_HI240); CHECK_PALETTE(VIDEO_PALETTE_RGB565); CHECK_PALETTE(VIDEO_PALETTE_RGB24); CHECK_PALETTE(VIDEO_PALETTE_RGB32); CHECK_PALETTE(VIDEO_PALETTE_RGB555); CHECK_PALETTE(VIDEO_PALETTE_YUV422); CHECK_PALETTE(VIDEO_PALETTE_YUYV); CHECK_PALETTE(VIDEO_PALETTE_UYVY); CHECK_PALETTE(VIDEO_PALETTE_YUV420); CHECK_PALETTE(VIDEO_PALETTE_YUV411); CHECK_PALETTE(VIDEO_PALETTE_RAW); CHECK_PALETTE(VIDEO_PALETTE_YUV422P); CHECK_PALETTE(VIDEO_PALETTE_YUV411P); CHECK_PALETTE(VIDEO_PALETTE_YUV420P); CHECK_PALETTE(VIDEO_PALETTE_YUV410P); #undef CHECK_PALETTE // Brutal, loop through all available modes for (pal = &palettes[0]; pal->pal >= 0; ++pal) { vp.palette = pal->pal; vp.depth = pal->depth; ioctl_nointr(itsFd, VIDIOCSPICT, &vp); ioctl_nointr(itsFd, VIDIOCGPICT, &vp); if (vp.palette == pal->pal) { LINFO(" Using palette \"%s\" with depth %u", pal->name, vp.depth); // hack itsGrabMode.setVal(pal->vidformat); itsVmmInfo.format = vp.palette; break; } else LINFO(" Palette \"%s\" not supported", pal->name); } if (pal->pal < 0) IDLFATAL("Auto palette selection failed - try setting manually."); break; default: LFATAL("Unsupported grab mode"); } // get ready to grab frames, starting with buffer/frame 0: itsNumBufFrames = vmb.frames; itsTotalBufSize = vmb.size; itsCurrentFrame = 0; itsGrabbing = new bool[itsNumBufFrames]; for (int i = 0; i < itsNumBufFrames; ++i) itsGrabbing[i] = false; itsVmmInfo.width = itsDims.getVal().w(); itsVmmInfo.height = itsDims.getVal().h(); itsVmmInfo.frame = 0; // decide on mmap'ed or read() access: if (ioctl_nointr(itsFd, VIDIOCGMBUF, &vmb) != -1) { IDLINFO("Using mmap'ed image capture"); // setup mmap'ed access to the video buffer: itsMmapBuf = static_cast<byte*>(mmap((void*)0, vmb.size, PROT_READ|PROT_WRITE, MAP_SHARED, itsFd, 0)); if (itsMmapBuf == MAP_FAILED) IDPLFATAL("mmap failed"); itsReadBuf = Image<byte>(); } else { IDLINFO("Using read() image capture"); itsMmapBuf = NULL; itsReadBuf = Image<byte>(getFrameSize(itsGrabMode.getVal(), itsDims.getVal()), 1, NO_INIT); } // set picture properties vp.brightness = itsBrightness.getVal(); vp.hue = itsHue.getVal(); vp.colour = itsColour.getVal(); vp.contrast = itsContrast.getVal(); vp.whiteness = itsWhiteness.getVal(); vp.palette = itsVmmInfo.format; LINFO("bright=%u hue=%u color=%u contrast=%u white=%u depth=%u palette=%u", vp.brightness, vp.hue, vp.colour, vp.contrast, vp.whiteness, vp.depth, vp.palette); if (ioctl_nointr(itsFd, VIDIOCSPICT, &vp) != 0) IDPLERROR("ioctl(VIDIOCSPICT) set picture properties failed"); }
// ###################################################################### int submain(const int argc, const char** argv) { // catch signals and redirect them for a clean exit (in particular, this gives us a chance to do useful things like // flush and close output files that would otherwise be left in a bogus state, like mpeg output files): catchsignals(&signum); LINFO("#############################################STARTING##############################################"); ModelManager mgr("App Scorbot MultiGrab"); scorbot.reset(new ScorbotSimple(mgr)); mgr.addSubComponent(scorbot); setupGrabbers(mgr); ofs.reset(new OutputFrameSeries(mgr)); mgr.addSubComponent(ofs); if(mgr.parseCommandLine(argc, argv, "FilePrefix SceneID", 2, 2) == false) return -1; mgr.start(); if (grabbers.size() < NUMCAMS) LFATAL("Only found %" ZU " cameras instead of %d. Reboot your machine and try again.", grabbers.size(), NUMCAMS); // get our grabbers to start grabbing: for (size_t cameraID = 0; cameraID < grabbers.size(); ++cameraID) grabberThreadServer.enqueueJob(rutz::make_shared(new GrabJob(cameraID))); sceneDir = mgr.getExtraArg(0); sceneID = boost::lexical_cast<int>(mgr.getExtraArg(1)); pthread_t displayThread; pthread_create(&displayThread, NULL, &displayThreadMethod, NULL); // Create the interactive window userInteractiveWindow = new XWinManaged(Dims(640,480), -1, -1, "User Interactive"); userInteractiveWindow->setVisible(false); userInteractiveWindow->setPosition(0, 0); // Main loop: int runnumber = 0; while(true) { if(signum != 0) break; // home the robot once in a while: if ((runnumber % 5) == 0) { int gogo = 0; getInt("Perform robot homing sequence and press ENTER", gogo); } // select the scene: getInt("Enter scene ID (-1 to exit):", sceneID); if (sceneID == -1) break; // abort on scene -1 // STEP 1. Load the scene file SceneSetup setup = loadSceneSetup(sceneID); // STEP 2. Show the interactive window: userInteractiveWindow->setVisible(true); // STEP 3. Display background image and ask the user to place it on the scene Image< PixRGB<byte> > backgroundImage = Raster::ReadRGB(setup.setupPath + "/" + setup.backgroundFileName); backgroundImage = rescale(backgroundImage, Dims(640, 480)); writeText(backgroundImage, Point2D<int>(0, 0), "Please place this background on the scene and press ENTER."); userInteractiveWindow->drawImage(backgroundImage, 0, 0, true); LINFO("Place background map on scene and add houses, trees, and other background objects. See User Interactive window for instructions..."); // eat all previous mouse clicks and key presses, just in case: while (userInteractiveWindow->getLastMouseClick() != Point2D<int>(-1, -1)) { } while (userInteractiveWindow->getLastKeyPress() != -1) { } // wait for ENTER: while(userInteractiveWindow->getLastKeyPress() != 36) usleep(100000); LINFO("Background map done. Make sure you have built a nice scene."); // STEP 4. Display each object and ask user to put on the scene and specify its bounding box for (size_t i = 0; i < setup.objects.size(); ++i) setup.objects[i].outline = promptPlaceObjectOnScene(setup, i); // STEP 5. Hide the interactive window userInteractiveWindow->setVisible(false); // STEP 6. Write out outlines to a file { std::string objectFileName = sformat("%s/RobotScene-s%04d-polygons.txt", dataDir, sceneID); std::ofstream objectFile(objectFileName.c_str()); LINFO("Saving the object bounding boxes into: %s", objectFileName.c_str()); for(size_t i=0; i<setup.objects.size(); ++i) { for(size_t j=0; j<setup.objects[i].outline.size(); ++j) { Point2D<int> &pnt = setup.objects[i].outline[j]; if(j != 0) objectFile << ','; objectFile << pnt.i << ',' << pnt.j; } objectFile << std::endl; } } // STEP 7. Execute the path and record the videos for (pathID = 0; pathID < int(setup.pathIndex.size()); ++pathID) { if(signum != 0) break; // create a directory for this scene / light: const std::string dir = sformat("%s/RobotScene-s%04d-p%02d", dataDir, sceneID, pathID); const std::string cmd = sformat("/bin/mkdir -p %s", dir.c_str()); if (system(cmd.c_str()) == -1) PLFATAL("Could not create directory %s", dir.c_str()); int gogo = pathID; getInt("Set light and press ENTER to start video recording", gogo); // make sure we don't have too many pending disk writes: while(writer.size() > 1000) { LINFO("Waiting for image writer thread, queue size = %" ZU "...", writer.size()); usleep(1000000); } LINFO("Running Scene %04d Path %02d ...", sceneID, setup.pathIndex[pathID]); executePath(setup.pathIndex[pathID]); } if(signum != 0) break; // STEP 8. Instruct users to place the objects back into the bins userInteractiveWindow->setVisible(true); userInteractiveWindow->setPosition(0, 0); for(size_t i=0; i<setup.objects.size(); ++i) promptReturnObjectToTray(setup, i); userInteractiveWindow->setVisible(false); // STEP 9. Ready for next scene ++sceneID; ++runnumber; } // stop grabbing: keepgoing = false; // wait for all pics to be written (note: this just waits until the queue of pending jobs is empty, the writer's // destructor will wait until all jobs are complete): while(writer.size()) { LINFO("Waiting for image writer thread, queue size = %" ZU "...", writer.size()); usleep(500000); } writer.flushQueue(250000, true); // make sure all is done LINFO("Cleaning up... Stand by..."); usleep(2000000); // stop all our ModelComponents mgr.stop(); LINFO("Finished."); return 0; }