コード例 #1
0
ファイル: vision.cpp プロジェクト: alon/burst
void vision::saveImageRaw(){

  //First you have to declare an ALVisionImage to get the video buffer.
  // ( definition included in alvisiondefinitions.h and alvisiondefinitions.cpp )
  ALVisionImage* imageIn;

  //Now you can get the pointer to the video structure.
  try
  {
    imageIn = ( ALVisionImage* ) ( camera->call<int>( "getDirectRawImageLocal", name ) );
  }catch( ALError& e)
  {
    log->error( "vision", "could not call the getImageLocal method of the NaoCam module" );
  }

  std::cout<< imageIn->toString();

    static int saved_frames = 0;
    int MAX_FRAMES = 150;
    if (saved_frames > MAX_FRAMES)
        return;

    string EXT(".NFRM");
    string BASE("/");
    int NUMBER = saved_frames;
    string FOLDER("/home/root/frames");
    stringstream FRAME_PATH;

    FRAME_PATH << FOLDER << BASE << NUMBER << EXT;
    fstream fout(FRAME_PATH.str().c_str(), fstream::out);

    // Retrive joints
    //vector<float> joints = getVisionBodyAngles();

    // Lock and write image
    fout.write(reinterpret_cast<const char*>(imageIn->getFrame()), IMAGE_BYTE_SIZE);

    // Write joints
    //for (vector<float>::const_iterator i = joints.begin(); i < joints.end();
    //     i++) {
    //    fout << *i << " ";
    //}

    // Write sensors
    //vector<float> sensor_data = getAllSensors();
    //for (vector<float>::const_iterator i = sensor_data.begin();
    //     i != sensor_data.end(); i++) {
    //    fout << *i << " ";
    //}

    fout.close();
    cout << "Saved frame #" << saved_frames++ << endl;

    //Now you have finished with the image, you have to release it in the V.I.M.
    try {
        camera->call<int>( "releaseDirectRawImage", name );
    } catch (ALError& e) {
        log->error( "vision", "could not call the releaseImage method of the NaoCam module" );
    }
}
コード例 #2
0
ファイル: vision.cpp プロジェクト: alon/burst
/**
 * saveImage : save the last image received.
 * @param pName path of the file
 */
void vision::saveImage( std::string pName ){

  //First you have to declare an ALVisionImage to get the video buffer.
  // ( definition included in alvisiondefinitions.h and alvisiondefinitions.cpp )
  ALVisionImage* imageIn;

  //Now you can get the pointer to the video structure.
  try
  {
    imageIn = ( ALVisionImage* ) ( camera->call<int>( "getImageLocal", name ) );
  }catch( ALError& e)
  {
    log->error( "vision", "could not call the getImageLocal method of the NaoCam module" );
  }

  std::cout<< imageIn->toString();

  //You can get some informations of the image.
  int width = imageIn->fWidth;
  int height = imageIn->fHeight;
  int nbLayers = imageIn->fNbLayers;
  int colorSpace = imageIn->fColorSpace;
  long long timeStamp = imageIn->fTimeStamp;
  int seconds = (int)(timeStamp/1000000LL);

  //You can get the pointer of the image.
  uInt8 *dataPointerIn = imageIn->getFrame();

  // now you create an openCV image and you save it in a file.
  IplImage* image = cvCreateImage( cvSize( width, height ), 8, nbLayers );

//  image->imageData = ( char* ) imageIn->getFrame();
  image->imageData = ( char* ) dataPointerIn;

  const char* imageName = ( pName + DecToString(seconds) + ".jpg" ).c_str();

  cvSaveImage( imageName, image );
  cvReleaseImage( &image );

  //Now you have finished with the image, you have to release it in the V.I.M.
  try
  {
    camera->call<int>( "releaseImage", name );
  }catch( ALError& e)
  {
    log->error( "vision", "could not call the releaseImage method of the NaoCam module" );
  }

}