// ######################################################################
GenericFrame RasterInputSeries::readFrame()
{
  if (itsFrameNumber < 0)
    LFATAL("frame number is %d, but readFrame() requires a "
           "non-negative frame number", itsFrameNumber);

  // figure out the file name to use:
  const std::string fname(computeInputFileName(itsStem, itsFrameNumber));

  // check if we've already read that file; if so, then return an
  // empty image
  if (fname == itsPrevFilename)
  {
    LINFO("repeated input file skipped: %s", fname.c_str());
    return GenericFrame();
  }

  // check whether the file exists; if not, then return an empty image:
  if (!Raster::fileExists(fname, itsRasterFileFormat.getVal()))
    {
      LINFO("no such file: %s", fname.c_str());
      return GenericFrame();
    }

  // load the image:
  const GenericFrame ima =
    Raster::ReadFrame(fname, itsRasterFileFormat.getVal());

  itsPrevFilename = fname;

  textLog(itsLogFile.getVal(), "ReadFrame", fname);

  return ima;
}
// ######################################################################
GenericFrame RasterlistInputSeries::readFrame()
{
  if (itsFrameNumber < 0)
    LFATAL("expected a non-negative frame number, but got %d",
           itsFrameNumber);

  if (size_t(itsFrameNumber) >= itsFiles.size())
    // out of input; end of stream
    return GenericFrame();

  if (!Raster::fileExists(itsFiles[itsFrameNumber]))
    {
      LINFO("at line %d of %s: no such file: %s",
            itsFrameNumber, itsListFname.c_str(),
            itsFiles[itsFrameNumber].c_str());
      return GenericFrame();
    }

  const GenericFrame result =
    Raster::ReadFrame(itsFiles[itsFrameNumber]);

  //if (result.frameSpec() != itsFrameSpec)
  //  LFATAL("at line %d of %s: expected %s to have format '%s', "
  //         "but got format '%s'",
  //         itsFrameNumber, itsListFname.c_str(),
  //         itsFiles[itsFrameNumber].c_str(),
  //         itsFrameSpec.getDescription().c_str(),
  //         result.frameSpec().getDescription().c_str());

  return result;
}
Exemple #3
0
// ######################################################################
GenericFrame PngParser::getFrame()
{
  if (rep->isGray())       return GenericFrame(parseGray());
  else if (rep->isColor()) return GenericFrame(parseRGB());
  // else...
  rep->onError("unsupported image type (neither grayscale nor RGB)");
  /* can't happen */ return GenericFrame();
}
Exemple #4
0
GenericFrame JpegParser::getFrame()
{
  if (rep->rgb.initialized())
    return GenericFrame(rep->rgb);

  // else...
  ASSERT(rep->gray.initialized());
  return GenericFrame(rep->gray);
}
Exemple #5
0
// ######################################################################
GenericFrame V4Lgrabber::readFrame()
{
  const GenericFrame frame =
    itsStreamingMode.getVal()
    ? GenericFrame(this->grabRaw())
    : GenericFrame(this->grabSingleRaw());

  if (itsListener.get() != 0)
    itsListener->onRawFrame(frame);

  return frame;
}
Exemple #6
0
Point2D evolveBrain(Image<PixRGB<byte> > &img, DescriptorVec& descVec, int ii)
{

  nub::ref<StdBrain>  brain = dynCastWeak<StdBrain>(mgr->subComponent("Brain"));
  nub::ref<SimEventQueueConfigurator> seqc =
    dynCastWeak<SimEventQueueConfigurator>(mgr->subComponent("SimEventQueueConfigurator"));
  nub::soft_ref<SimEventQueue> seq  = seqc->getQ();

  LINFO("Evolve Brain");

  if (mgr->started()){    //give the image to the brain

    if (img.initialized())
      {
        //place the image in the inputFrame queue
        rutz::shared_ptr<SimEventInputFrame>
          e(new SimEventInputFrame(brain.get(), GenericFrame(img), 0));
        seq->post(e);
       // brain->input(img, seq);
        descVec.setInputImg(img);
      }

    SimTime end_time = seq->now() + SimTime::MSECS(3.0);

    while (seq->now() < end_time)
    {
      brain->evolve(*seq); //evolve the brain

      // Any new WTA winner?
      if (SeC<SimEventWTAwinner> e = seq->check<SimEventWTAwinner>(brain.get()))
      {
        const Point2D winner = e->winner().p;

        //get the saliency map output
        if (debug)
        {
          if (SeC<SimEventSaliencyMapOutput> smo =
              seq->check<SimEventSaliencyMapOutput>(brain.get(), SEQ_ANY))
          {
            Image<float> img = smo->sm();
            //SHOWIMG(rescale(img, img.getWidth()*16, img.getHeight()*16));
          }
        }
        seq->evolve();

        return winner;
      }


      seq->evolve();
      LINFO("Evolve 1\n");

    }
  }

  return Point2D();

}
Exemple #7
0
void Logger::saveSingleEventFrame(MbariImage< PixRGB<byte> >& img,
        int frameNum,
        MbariVisualEvent::VisualEvent *event) {
    ASSERT(event->frameInRange(frameNum));

    // create the file stem
    string evnum;
    if (itsSaveEventFeatures.getVal().length() > 0)
        evnum = sformat("%s_evt%04d_", itsSaveEventFeatures.getVal().c_str(), event->getEventNum() );
    else
        evnum = sformat("evt%04d_", event->getEventNum());

    Dims maxDims = event->getMaxObjectDims();
    Dims d((float)maxDims.w()*itsScaleW, (float)maxDims.h()*itsScaleH);

    // compute the correct bounding box and cut it out
    Rectangle bbox1 = event->getToken(frameNum).bitObject.getBoundingBox();
    Rectangle bbox = Rectangle::tlbrI(bbox1.top()*itsScaleH, bbox1.left()*itsScaleW,
                                    bbox1.bottomI()*itsScaleH, bbox1.rightI()*itsScaleW);
    //Point2D cen = event.getToken(frameNum).bitObject.getCentroid();

    // first the horizontal direction
    int wpad = (d.w() - bbox.width()) / 2;
    int ll = bbox.left() - wpad;
    //int ll = cen.i - d.w() / 2;
    int rr = ll + d.w();
    if (ll < 0) {
        rr -= ll;
        ll = 0;
    }
    if (rr >= img.getWidth()) {
        rr = img.getWidth() - 1;
        ll = rr - d.w();
    }

    // now the same thing with the vertical direction
    int hpad = (d.h() - bbox.height()) / 2;
    int tt = bbox.top() - hpad;
    //int tt = cen.j - d.h() / 2;
    int bb = tt + d.h();
    if (tt < 0) {
        bb -= tt;
        tt = 0;
    }
    if (bb >= img.getHeight()) {
        bb = img.getHeight() - 1;
        tt = bb - d.h();
    }

    Rectangle bboxFinal = Rectangle::tlbrI(tt, ll, bb, rr);
    bboxFinal = bboxFinal.getOverlap(Rectangle(Point2D<int>(0, 0), img.getDims() - 1));

    // scale if needed and cut out the rectangle and save it
    Image< PixRGB<byte> > cut = crop(img, bboxFinal);
    itsOfs->writeFrame(GenericFrame(cut), evnum, FrameInfo(evnum, SRC_POS));
}
// ######################################################################
GenericFrame QuickTimeGrabber::readFrame()
{
  if (!this->started())
    LFATAL("start() must be called before readFrame()");

#ifndef HAVE_QUICKTIME_QUICKTIME_H
  LFATAL("you must have QuickTime installed to use QuickTimeGrabber");
  /* can't happen */ return GenericFrame();
#else
  ASSERT(rep != 0);
  return rep->readFrame();
#endif
}
// ######################################################################
GenericFrame QuickTimeGrabber::Impl::readFrame()
{
  if (!itsStreamStarted)
    this->startStream();

  while (1)
    {
      itsGotFrame = false;
      itsErrorMsg = "";
      if (noErr != SGIdle(itsSeqGrab.it))
        LFATAL("SGIdle() failed");

      if (itsErrorMsg.length() > 0)
        {
          // some error specific to SGIdle occurred - any errors
          // returned from the data proc will also show up here and we
          // don't want to write over them

          // in QT 4 you would always encounter a cDepthErr error
          // after a user drags the window, this failure condition has
          // been greatly relaxed in QT 5 it may still occur but
          // should only apply to vDigs that really control the screen

          // you don't always know where these errors originate from,
          // some may come from the VDig...

          LFATAL("QuickTimeGrabber error during SGIdle (%s)",
                 itsErrorMsg.c_str());

          // ...to fix this we simply call SGStop and SGStartRecord
          // again calling stop allows the SG to release and
          // re-prepare for grabbing hopefully fixing any problems,
          // this is obviously a very relaxed approach
          SGStop(itsSeqGrab.it);
          SGStartRecord(itsSeqGrab.it);
        }

      if (itsGotFrame)
        return GenericFrame(itsCurrentImage);

      usleep(20000);
    }
}
Exemple #10
0
// Main function
int main(const int argc, const char **argv)
{

  MYLOGVERB = LOG_INFO;  // suppress debug messages

  // Instantiate a ModelManager:
  ModelManager manager("Gist Features Extraction");

  // we cannot use saveResults() on our various ModelComponent objects
  // here, so let's not export the related command-line options.
  manager.allowOptions(OPTEXP_ALL & (~OPTEXP_SAVE));

  // Instantiate our various ModelComponents:
  nub::soft_ref<SimEventQueueConfigurator>
    seqc(new SimEventQueueConfigurator(manager));
  manager.addSubComponent(seqc);

  nub::soft_ref<InputMPEGStream>
    ims(new InputMPEGStream(manager, "Input MPEG Stream", "InputMPEGStream"));
  manager.addSubComponent(ims);

  nub::soft_ref<StdBrain> brain(new StdBrain(manager));
  manager.addSubComponent(brain);

  nub::ref<SpatialMetrics> metrics(new SpatialMetrics(manager));
  manager.addSubComponent(metrics);

  manager.exportOptions(MC_RECURSE);
  metrics->setFOAradius(30); // FIXME
  metrics->setFoveaRadius(30); // FIXME

  // setting up the GIST ESTIMATOR
  manager.setOptionValString(&OPT_GistEstimatorType,"Std");
  //manager.setOptionValString(&OPT_GistEstimatorType,"FFT");

  // Request a bunch of option aliases (shortcuts to lists of options):
  REQUEST_OPTIONALIAS_NEURO(manager);

  // Parse command-line:
  if (manager.parseCommandLine(argc, argv, "<*.mpg or *_gistList.txt>",
                               1, 1) == false)
    return(1);

  nub::soft_ref<SimEventQueue> seq = seqc->getQ();

  // if the file passed ends with _gistList.txt
  // we have a different protocol
  bool isGistListInput = false;
  int ifLen = manager.getExtraArg(0).length();
  if(ifLen > 13 &&
     manager.getExtraArg(0).find("_gistList.txt",ifLen - 13) !=
     std::string::npos)
    isGistListInput = true;

  // NOTE: this could now be controlled by a command-line option
  // --preload-mpeg=true
  manager.setOptionValString(&OPT_InputMPEGStreamPreload, "true");

  // do post-command-line configs:
  std::vector<std::string> tag;
  std::vector<int> start;
  std::vector<int> num;
  unsigned int cLine = 0; int cIndex = 0;
  if(isGistListInput)
    {
      LINFO("we have a gistList input");
      getGistFileList(manager.getExtraArg(0).c_str(), tag, start, num);
      cIndex = start[0];
    }
  else
    {
      LINFO("we have an mpeg input");
      ims->setFileName(manager.getExtraArg(0));
      manager.setOptionValString(&OPT_InputFrameDims,
                                 convertToString(ims->peekDims()));
    }

  // frame delay in seconds
  //double fdelay = 33.3667/1000.0; // real time
  double fdelay = 3.3667/1000.0;

  // let's get all our ModelComponent instances started:
  manager.start();

  // get the GistEstimator
  nub::soft_ref<GistEstimatorStd> ge;////// =
  ///////    dynCastWeak<GistEstimatorStd>(brain->getGE());
  LFATAL("fixme");
  if (ge.isInvalid()) LFATAL("I am useless without a GistEstimator");

  // MAIN LOOP
  SimTime prevstime = SimTime::ZERO();
  int fNum = 0;
  Image< PixRGB<byte> > inputImg;  Image< PixRGB<byte> > dispImg;
  Image<double> cgist;
  std::string folder =  "";
  std::string::size_type sPos = manager.getExtraArg(0).rfind("/",ifLen);
  if(sPos != std::string::npos)
    folder = manager.getExtraArg(0).substr(0,sPos+1);

  LINFO("let's start");
  while(1)
  {
    // has the time come for a new frame?
    if (fNum == 0 ||
        (seq->now() - 0.5 * (prevstime - seq->now())).secs() - fNum * fdelay > fdelay)
    {
      // load new frame
      std::string fName;
      if(isGistListInput)
        {
          if (cLine >= tag.size()) break;  // end of input list

          // open the current file
          char tNumStr[100]; sprintf(tNumStr,"%06d",cIndex);
          fName = folder + tag[cLine] + std::string(tNumStr) + ".ppm";

          inputImg = Raster::ReadRGB(fName);
          cIndex++;

          if(cIndex >= start[cLine] + num[cLine])
            {
              cLine++;
              if (cLine < tag.size()) cIndex = start[cLine];
            }

          // reformat the file name to a gist name
          int fNameLen = fName.length();
          unsigned int uPos = fName.rfind("_",fNameLen);
          fName = fName.substr(0,uPos)+ ".ppm";

        }
      else
        {
          fName = manager.getExtraArg(0);
          inputImg = ims->readRGB(); //Raster::ReadRGB(manager.getExtraArg(1));
          if (inputImg.initialized() == false) break;  // end of input stream
          // format new frame
          inputImg = crop(inputImg,
                          Rectangle(Point2D<int>(0,25),
                                    Dims(inputImg.getHeight(),
                                         inputImg.getWidth()-25+1)));
          cIndex = fNum+1;
        }

      dispImg = inputImg;
      LINFO("\nnew frame :%d",fNum);

      // pass input to brain:
      rutz::shared_ptr<SimEventInputFrame>
        e(new SimEventInputFrame(brain.get(), GenericFrame(inputImg), 0));
      seq->post(e); // post the image to the brain

      // get the gist feature vector
      cgist = ge->getGist();
      //for(uint k = 0; k < cgist.getSize(); k++) LINFO("%d: %f",k, cgist.getVal(0,k));

//       // setup display at the start of stream
//       if (fNum == 0)
//       {
//         int s = SQ_SIZE;
//         inputWin = new XWinManaged(Dims(w, h), 0, 0, manager.getExtraArg(0).c_str());
//         wList.add(inputWin);
//         gistWin = new XWinManaged(Dims(NUM_GIST_COL * s, NUM_GIST_FEAT * s), 0,0, "Gist");
//         wList.add(gistWin);
//       }

//       // display the input image and the gist histogram
//       drawGrid(dispImg, w/4,h/4,1,1,PixRGB<byte>(255,255,255));
//       inputWin->drawImage(dispImg,0,0);
//       gistWin->drawImage(ge->getGistHistogram(SQ_SIZE),0,0);

      // SAVE GIST FEATURES TO A FILE
      saveData(cgist, fName, cIndex-1);
      //LINFO("\nFrame number just saved:%d",fNum);Raster::waitForKey();

      // increase frame count
      fNum++;
    }

    // evolve brain:
    prevstime = seq->now(); // time before current step
    const SimStatus status = seq->evolve();
    if (SIM_BREAK == status) // Brain decided it's time to quit
      break;

  }

  // stop all our ModelComponents
  manager.stop();

  // all done!
  return 0;
}
Exemple #11
0
void Logger::save(const Image< PixRGB<byte> >& img,
        const uint frameNum,
        const string& resultName,
        const int resNum) {
    itsOfs->writeFrame(GenericFrame(img), getFileStem(resultName, resNum), FrameInfo(resultName, SRC_POS));
}
Exemple #12
0
void Logger::save(const Image<float>& img,
        const uint frameNum,
        const string& resultName,
        const int resNum) {
    itsOfs->writeFrame(GenericFrame(img, FLOAT_NORM_0_255), getFileStem(resultName, resNum), FrameInfo(resultName, SRC_POS));
}
Exemple #13
0
// ######################################################################
GenericFrame MgzJDecoder::readFrame()
{

  // Grab the journal entry for this frame and allocate an appropriate GenericFrame
  MgzJEncoder::journalEntry entry = itsJournal.at(itsFrameNum);
  const Dims dims(entry.width, entry.height);
  const GenericFrame::NativeType pix_type = GenericFrame::NativeType(entry.pix_type);
  const int num_pix = dims.sz();
  GenericFrame frame;

  //Read in the compressed image to a buffer
  uint64 comp_image_buf_size = entry.end_byte - entry.start_byte;
  byte * comp_image_buf = new byte[comp_image_buf_size];
  itsFile.seekg(entry.start_byte, std::ios::beg);
  itsFile.read((char*)comp_image_buf, comp_image_buf_size);

  //Prepare zlib to do the decompression
  z_stream strm;
  strm.zalloc   = Z_NULL;
  strm.zfree    = Z_NULL;
  strm.opaque   = Z_NULL;
  strm.avail_in = 0;
  strm.next_in  = Z_NULL;
  int ret = inflateInit(&strm);
  if(ret != Z_OK) 
   LFATAL("Could not initialize zlib!"); 

  strm.avail_in = comp_image_buf_size;
  strm.next_in  = comp_image_buf;
  switch(pix_type)
  {
    case GenericFrame::GRAY_U8:
      {
        Image<byte> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(byte);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::GRAY_U16:
      {
        Image<uint16> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(uint16);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::GRAY_F32:
      {
        Image<float> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(float);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img, entry.flags);

        break;
      }
    case GenericFrame::RGB_U8:
      {
        Image<PixRGB<byte> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<byte>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::RGB_U16:
      {
        Image<PixRGB<uint16> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<uint16>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::RGB_F32:
      {
        Image<PixRGB<float> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<float>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img, entry.flags);

        break;
      }
      case GenericFrame::VIDEO:
      {
        const size_t vidSize = getFrameSize(VideoFormat(entry.flags), dims);
        ArrayHandle<byte> vidBuffer(new ArrayData<byte>(Dims(vidSize,1), NO_INIT));
        strm.avail_out = vidSize;
        strm.next_out = (unsigned char*)vidBuffer.uniq().dataw();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(VideoFrame(vidBuffer, dims, VideoFormat(entry.flags), bool(entry.byte_swap)));
        break;
      }
    default:
      LFATAL("Could Not Open Frame Of Type: %d!", pix_type);
  }
  
  inflateEnd(&strm);
  delete [] comp_image_buf;
  return frame;
}
Exemple #14
0
// ######################################################################
void Logger::run(nub::soft_ref<MbariResultViewer> rv, MbariImage<PixRGB <byte> >& img,
                                        MbariVisualEvent::VisualEventSet& eventSet, const Dims scaledDims)
{
    // adjust scaling if needed
    Dims d = img.getDims();
    itsScaleW = (float)d.w()/(float)scaledDims.w();
    itsScaleH = (float)d.h()/(float)scaledDims.h();

    // initialize property vector and FOE estimator
    MbariVisualEvent::PropertyVectorSet pvs;

    // this is a list of all the events that have a token in this frame
    std::list<MbariVisualEvent::VisualEvent *> eventFrameList;

    // this is a complete list of all those events that are ready to be written
    std::list<MbariVisualEvent::VisualEvent *> eventListToSave;

    // get event frame list for this frame and those events that are ready to be saved
    // this is a list of all the events that have a token in this frame
    eventFrameList = eventSet.getEventsForFrame(img.getFrameNum());

    // this is a complete list of all those events that are ready to be written
    eventListToSave = eventSet.getEventsReadyToSave(img.getFrameNum());

    // write out eventSet?
    if (itsSaveEventsName.getVal().length() > 0 ) saveVisualEvent(eventSet, eventFrameList);

    // write out summary ?
    if (itsSaveSummaryEventsName.getVal().length() > 0) saveVisualEventSummary(Version::versionString(), eventListToSave);

    // flag events that have been saved for delete
    std::list<MbariVisualEvent::VisualEvent *>::iterator i;
    for (i = eventListToSave.begin(); i != eventListToSave.end(); ++i)
        (*i)->flagWriteComplete();

    // write out positions?
    if (itsSavePositionsName.getVal().length() > 0) savePositions(eventFrameList);

    MbariVisualEvent::PropertyVectorSet pvsToSave = eventSet.getPropertyVectorSetToSave();

    // write out property vector set?
    if (itsSavePropertiesName.getVal().length() > 0) saveProperties(pvsToSave);

    // TODO: this is currently not used...look back in history to where this got cut-out
    // need to obtain the property vector set?
    if (itsLoadPropertiesName.getVal().length() > 0) pvs = eventSet.getPropertyVectorSet();

    // get a list of events for this frame
    eventFrameList = eventSet.getEventsForFrame(img.getFrameNum());

    // write out eventSet to XML?
    if (itsSaveXMLEventSetName.getVal().length() > 0) {
        saveVisualEventSetToXML(eventFrameList,
                img.getFrameNum(),
                img.getMetaData().getTC(),
                itsFrameRange);
    }

    const int circleRadiusRatio = 40;
    const int circleRadius = img.getDims().w() / circleRadiusRatio;

    Image< PixRGB<byte> > output = rv->createOutput(img,
            eventSet,
            circleRadius,
            itsScaleW, itsScaleH);

    // write  ?
    if (itsSaveOutput.getVal())
        itsOfs->writeFrame(GenericFrame(output), "results", FrameInfo("results", SRC_POS));

    // display output ?
    rv->display(output, img.getFrameNum(), "Results");

    // need to save any event clips?
    if (itsSaveEventNumsAll) {
        //save all events
        std::list<MbariVisualEvent::VisualEvent *>::iterator i;
        for (i = eventFrameList.begin(); i != eventFrameList.end(); ++i)
            saveSingleEventFrame(img, img.getFrameNum(), *i);
    } else {
        // need to save any particular event clips?
        uint csavenum = numSaveEventClips();
        for (uint idx = 0; idx < csavenum; ++idx) {
            uint evnum = getSaveEventClipNum(idx);
            if (!eventSet.doesEventExist(evnum)) continue;

            MbariVisualEvent::VisualEvent *event = eventSet.getEventByNumber(evnum);
            if (event->frameInRange(img.getFrameNum()))
                saveSingleEventFrame(img, img.getFrameNum(), event);
        }
    }

    //flag events that have been saved for delete otherwise takes too much memory
    for (i = eventListToSave.begin(); i != eventListToSave.end(); ++i)
        (*i)->flagForDelete();
    while (!eventFrameList.empty()) eventFrameList.pop_front();
    while (!eventListToSave.empty()) eventListToSave.pop_front();

}
Exemple #15
0
// ######################################################################
GenericFrame XCgrabberFlex::readFrame()
{
  return GenericFrame(this->grabRaw());
}
int main(const int argc, const char **argv)
{

        MYLOGVERB = LOG_INFO;
        mgr = new ModelManager("Test LabelMeSaliency");

        nub::soft_ref<SimEventQueueConfigurator>
                seqc(new SimEventQueueConfigurator(*mgr));
        mgr->addSubComponent(seqc);

        //our brain
        nub::ref<StdBrain>  brain(new StdBrain(*mgr));
        mgr->addSubComponent(brain);


        mgr->exportOptions(MC_RECURSE);
        mgr->setOptionValString(&OPT_RawVisualCortexChans, "IOC");
        //mgr.setOptionValString(&OPT_RawVisualCortexChans, "I");
        //mgr->setOptionValString(&OPT_RawVisualCortexChans, "GNO");
        //mgr.setOptionValString(&OPT_RawVisualCortexChans, "N");
        //manager.setOptionValString(&OPT_UseOlderVersion, "false");
        // set the FOA and fovea radii
        mgr->setOptionValString(&OPT_SaliencyMapType, "Fast");
        mgr->setOptionValString(&OPT_SMfastInputCoeff, "1");

        mgr->setOptionValString(&OPT_WinnerTakeAllType, "Fast");
        mgr->setOptionValString(&OPT_SimulationTimeStep, "0.2");

        mgr->setModelParamVal("FOAradius", 128, MC_RECURSE);
        mgr->setModelParamVal("FoveaRadius", 128, MC_RECURSE);

        mgr->setOptionValString(&OPT_IORtype, "Disc");

        if (mgr->parseCommandLine(
                                (const int)argc, (const char**)argv, "<path to images>", 1, 1) == false);

        nub::soft_ref<SimEventQueue> seq = seqc->getQ();

        mgr->start();
        //nub::ref<StdBrain>  brain = dynCastWeak<StdBrain>(mgr->subComponent("Brain"));

        //"/lab/ilab15/tmp/objectsDB/mit/labelMe/05june05_static_indoor",

        ComplexChannel *cc =
                &*dynCastWeak<ComplexChannel>(brain->getVC());

        TestImages testImages(mgr->getExtraArg(0).c_str(), TestImages::MIT_LABELME);

        Image<float> allObjImg = Raster::ReadFloat("allObjImg.pfm", RASFMT_PFM);
        inplaceNormalize(allObjImg, 0.0F, 1.0F);

        printf("## \"Filename\", \"Size\",\"fovea Radius\",\"Number of objects\",\"Salient Location\", \"Hits\",");
        printf("\"Obj Saliency Max\",\"Obj Saliency Min\",\"Obj Saliency Sum\",\"Obj Saliency Area\"");
        printf("\"Dist Saliency Max\",\"Dist Saliency Min\",\"Dist Saliency Sum\",\"Dist Saliency Area\"");
        printf("\n");

        for(uint scene=0; scene<testImages.getNumScenes(); scene++)
        {

                //get the image
                LINFO("Get scene %i", scene);
                Image<PixRGB<byte> > img = testImages.getScene(scene);
                std::string sceneFile = testImages.getSceneFilename(scene);
                LINFO("Size %ix%i", img.getWidth(), img.getHeight());


                //set the fovea and foa radius to be 1/4 the size of the image width
                int fRadius = 128; //(img.getWidth()/16);
                //TODO: fixme
                //brain->getSM()->setFoveaRadius(fRadius);
                //brain->getSM()->setFOAradius(fRadius);

                initRandomNumbers();

                if (testImages.getNumObj() > 0)  //if we have any labled objects
                {

                        //bias the vc
                        Image<float> mask = rescale(allObjImg, img.getDims());
                        biasVC(*cc, mask);
                        //evolve the brain

                        Image<float> SMap;

                        rutz::shared_ptr<SimEventInputFrame> //place the image in the queue
                                e(new SimEventInputFrame(brain.get(), GenericFrame(img), 0));
                        seq->post(e);

                        //set the task relevance map

                        Point2D<int> winner;
                        float interestLevel=100.0F;
                        int nHits=0;
                        int nTimes=95;

                        printf("[ ");
                        Point2D<int> lastLoc(-1,-1);
                        while(interestLevel > 0.01F && nTimes < 100) //do until no more activation
                        {
                                nTimes++;
                                LINFO("InterestLevel %f", interestLevel);
                                Point2D<int> currentWinner = evolveBrain(img, SMap, &interestLevel, seq);

                                if (debug)
                                {
                                        if (lastLoc.isValid())
                                        {
                                                        drawLine(img, lastLoc, currentWinner,
                                                                         PixRGB<byte>(0, 255, 0), 4);
                                        } else {
                                                drawCircle(img, currentWinner, fRadius-10, PixRGB<byte>(255,0,0), 3);
                                        }
                                        lastLoc = currentWinner;


                                         drawCircle(img, currentWinner, fRadius, PixRGB<byte>(0,255,0), 3);
                                }

                                //check if the winner is inside an object (all objects)
                                int hit = -1;
                                for (uint obj=0; obj<testImages.getNumObj(); obj++)
                                {
                                        int lineWidth = int(img.getWidth()*0.003);
                                        std::vector<Point2D<int> > objPoly = testImages.getObjPolygon(obj);
                                        if (debug)
                                        {
                                                Point2D<int> p1 = objPoly[0];
                                                for(uint i=1; i<objPoly.size(); i++)
                                                {
                                                        drawLine(img, p1, objPoly[i], PixRGB<byte>(255, 0, 0), lineWidth);
                                                        p1 = objPoly[i];
                                                }
                                                drawLine(img, p1, objPoly[0], PixRGB<byte>(255, 0, 0), lineWidth); //close the polygon
                                        }

                                        // if (testImages.pnpoly(objPoly, winner))
                                        //  hit = 1;
                                        if (testImages.pnpoly(objPoly, currentWinner))
                                        {
                                                hit = obj;
                                        }

                                }
                                printf("%i ", hit);
                                if (hit != -1)
                                {
                                        winner = currentWinner;
                                        nHits++;
                                }

                        }

                        if (debug)
                        {
                                                        Raster::WriteRGB(img, "IORSaliency.ppm");
                                Image<PixRGB<byte> > tmp  = rescale(img, 512, 512);
                                SHOWIMG(tmp);
                        }
                        printf("] ");
                        printf("\"%s\",\"%ix%i\",\"%i\",\"%i\",\"(%i,%i)\",\"%i\"",
                                        sceneFile.c_str(), img.getWidth(), img.getHeight(), fRadius,
                                        testImages.getNumObj(), winner.i, winner.j, nHits);
                        printf("\n");

                        if (debug)
                        {
                                Image<PixRGB<byte> > tmp  = rescale(img, 512, 512);
                                SHOWIMG(tmp);

                        }


                        //Compute the saliency ratio
                        /*Image<byte> imgMask;
                        //get the obj mask
                        for (uint obj=0; obj<testImages.getNumObj(); obj++)
                        {
                        LINFO("Adding obj %i", obj);
                        Image<byte> objMask = testImages.getObjMask(obj);
                        if (imgMask.initialized())
                        imgMask += objMask;
                        else
                        imgMask = objMask;
                        }
                        if (debug) SHOWIMG(rescale((Image<float>)imgMask, 512, 512));

                        LINFO("Mask %ix%i", imgMask.getWidth(), imgMask.getHeight());
                        Image<float> distMask = chamfer34(imgMask, (byte)255);
                        Image<float> objMask = binaryReverse(distMask, 255.0F);

                        //normalize mask from 0 to 1
                        inplaceNormalize(objMask, 0.0F, 1.0F);
                        inplaceNormalize(distMask, 0.0F, 1.0F);

                        if (debug) SHOWIMG(rescale((Image<float>)objMask, 512, 512));
                        if (debug) SHOWIMG(rescale((Image<float>)distMask, 512, 512));

                        //resize the saliency map to the orig img size
                        SMap = rescale(SMap, imgMask.getDims());

                        float objMin, objMax, objSum, objArea;
                        getMaskedMinMaxSumArea(SMap, objMask, objMin, objMax, objSum, objArea);

                        float distMin, distMax, distSum, distArea;
                        getMaskedMinMaxSumArea(SMap, distMask, distMin, distMax, distSum, distArea);

                        printf("\"%f\",\"%f\",\"%f\",\"%f\",\"%f\",\"%f\",\"%f\",\"%f\"",
                        objMax, objMin, objSum, objArea,
                        distMax, distMin, distSum, distArea);
                        printf("\n");
                        */
                } else {
                        printf("##%s has no objects \n", sceneFile.c_str());
                }
        }

}
Exemple #17
0
// ######################################################################
GenericFrame XMLInput::readFrame()
{
  if (itsTestImages.get() == 0)
    LFATAL("No scene data. Need xml file");

  if (!itsGetObjects.getVal())
    itsCurrentSceneNum = itsFrameNum;

  //If we dont have the frame number, then return an empty image
  if (itsCurrentSceneNum >= itsTestImages->getNumScenes())
  {
    LINFO("No more scenes");
    return GenericFrame();
  }


  //Get the scene


  TestImages::SceneData sceneData = itsTestImages->getSceneData(itsCurrentSceneNum);
  rutz::shared_ptr<TestImages::SceneData> scene(new TestImages::SceneData);
  scene->description = sceneData.description;
  scene->filename = sceneData.filename;
  scene->type = sceneData.type;
  scene->useType = sceneData.useType;

//  LINFO("Scene %s", sceneData.filename.c_str());
  Image<PixRGB<byte> > sceneImg;
  if (itsGetObjects.getVal())
  {

    if (itsObjectNum < sceneData.objects.size())
    {
      TestImages::ObjData objData = sceneData.objects[itsObjectNum];
      std::vector<Point2D<int> > objPoly = objData.polygon;

      Image<PixRGB<byte> > img = itsTestImages->getScene(itsCurrentSceneNum);

      //Get the bounding box
      Rectangle rect = findBoundingRect(objPoly, img.getDims());
      sceneImg = crop(img, rect);

      scene->objects.push_back(objData);
      itsObjectNum++;
      if (itsObjectNum >= sceneData.objects.size())
      {
        itsCurrentSceneNum++;
        itsObjectNum = 0;
      }
    }



  } else {
    scene->objects = sceneData.objects;
    sceneImg = itsTestImages->getScene(itsCurrentSceneNum);

    if (itsDrawPolygons.getVal())
    {
      for(uint i=0; i<sceneData.objects.size(); i++)
      {
        TestImages::ObjData objData = sceneData.objects[i];

        if (itsFilterObjectName.getVal() == objData.name || itsFilterObjectName.getVal().empty())
        {
          std::vector<Point2D<int> > objPoly = objData.polygon;
          Point2D<int> p1 = objPoly[0];
          for(uint i=1; i<objPoly.size(); i++)
          {
            drawLine(sceneImg, p1, objPoly[i], PixRGB<byte>(0, 255, 0), 0);
            p1 = objPoly[i];
          }
          drawLine(sceneImg, p1, objPoly[0], PixRGB<byte>(0, 255, 0)); //close the polygon

          writeText(sceneImg, objPoly[0]+10, objData.name.c_str(), PixRGB<byte>(255,255,255), PixRGB<byte>(0,0,0));
        }

      }
    }

  }

  if (!sceneData.dims.isEmpty())
    sceneImg = rescale(sceneImg, sceneData.dims);
  scene->dims = sceneImg.getDims();


  GenericFrame frame(sceneImg);
  frame.addMetaData(std::string("SceneData"), scene);

  return frame;
}