String AudioChannelSet::getDescription() const
{
    if (isDiscreteLayout())            return String ("Discrete #") + String (size());
    if (*this == disabled())           return "Disabled";
    if (*this == mono())               return "Mono";
    if (*this == stereo())             return "Stereo";
    if (*this == createLCR())          return "LCR";
    if (*this == createLRS())          return "LRS";
    if (*this == createLCRS())         return "LCRS";
    if (*this == quadraphonic())       return "Quadraphonic";
    if (*this == pentagonal())         return "Pentagonal";
    if (*this == hexagonal())          return "Hexagonal";
    if (*this == octagonal())          return "Octagonal";
    if (*this == ambisonic())          return "Ambisonic";
    if (*this == create5point0())      return "5.1 Surround";
    if (*this == create5point1())      return "5.1 Surround (+Lfe)";
    if (*this == create6point0())      return "6.1 Surround";
    if (*this == create6point0Music()) return "6.1 (Music) Surround";
    if (*this == create6point1())      return "6.1 Surround (+Lfe)";
    if (*this == create7point0())      return "7.1 Surround (Rear)";
    if (*this == create7point1())      return "7.1 Surround (Rear +Lfe)";
    if (*this == create7point1AC3())   return "7.1 AC3 Surround (Rear + Lfe)";
    if (*this == createFront7point0()) return "7.1 Surround (Front)";
    if (*this == createFront7point1()) return "7.1 Surround (Front +Lfe)";

    return "Unknown";
}
Beispiel #2
0
bool VisuoThread::getFixation(Bottle &bStereo)
{
    Vector stereo(4);

    imgMutex.wait();
    if(img[LEFT]!=NULL)
    {
        stereo[0]=stereo[2]=0.5*img[LEFT]->width();
        stereo[1]=stereo[3]=0.5*img[LEFT]->height();
    }
    else
    {
        stereo[0]=stereo[2]=160;
        stereo[1]=stereo[3]=120;
    }
    imgMutex.post();

    for(size_t i=0; i<stereo.size(); i++)
        bStereo.addDouble(stereo[i]);

    int side=40;
    startTracker(stereo,side);

    return true;
}
Beispiel #3
0
int main (int argc, char **argv) {
  ros::init(argc, argv, "uvc_camera_stereo");

  uvc_camera::StereoCamera stereo(ros::NodeHandle(), ros::NodeHandle("~"));

  ros::spin();
  return 0;
}
void StereochemistryTest::setBondStereochemistry()
{
    chemkit::Molecule molecule;
    chemkit::Atom *C1 = molecule.addAtom("C");
    chemkit::Atom *C2 = molecule.addAtom("C");
    chemkit::Bond *C1_C2 = molecule.addBond(C1, C2, chemkit::Bond::Double);

    chemkit::Stereochemistry stereo(&molecule);
    stereo.setStereochemistry(C1_C2, chemkit::Stereochemistry::Cis);
    QVERIFY(stereo.stereochemistry(C1_C2) == chemkit::Stereochemistry::Cis);
}
void StereochemistryTest::setAtomStereochemistry()
{
    chemkit::Molecule molecule;
    chemkit::Atom *C1 = molecule.addAtom("C");
    chemkit::Atom *C2 = molecule.addAtom("C");

    chemkit::Stereochemistry stereo(&molecule);
    stereo.setStereochemistry(C1, chemkit::Stereochemistry::R);
    QVERIFY(stereo.stereochemistry(C1) == chemkit::Stereochemistry::R);

    stereo.setStereochemistry(C2, chemkit::Stereochemistry::S);
    QVERIFY(stereo.stereochemistry(C2) == chemkit::Stereochemistry::S);
}
Beispiel #6
0
void VisuoThread::updateMotionCUT()
{
    // Detect motion
    Bottle *bMotion[2];
    bMotion[LEFT]=mCUTPort[LEFT].read(false);
    bMotion[RIGHT]=mCUTPort[RIGHT].read(false);

    double tNow=Time::now();

    bool detected=true;

    motMutex.wait();

    Vector stereo(4); stereo=0.0;
    for(int cam=0; cam<2; cam++)
    {
        // if even the first element of the buffer is far (in time)
        // clear the whole buffer
        if(buffer[cam].size()>0 && tNow-buffer[cam].back().t>timeTol)
            buffer[cam].clear();

        // If only a single moving blob has been detected
        if (bMotion[cam] && bMotion[cam]->size()==1)
        {
            Item item;
            item.t=Time::now();
            item.size=bMotion[cam]->get(0).asList()->get(2).asDouble();
            item.p=cvPoint(bMotion[cam]->get(0).asList()->get(0).asInt(),bMotion[cam]->get(0).asList()->get(1).asInt());
            buffer[cam].push_back(item);

            stereo[2*cam]=bMotion[cam]->get(0).asList()->get(0).asDouble();
            stereo[2*cam+1]=bMotion[cam]->get(0).asList()->get(1).asDouble();
        }

        // Avoid time unconsistencies
        while (buffer[cam].size() && (tNow-buffer[cam].front().t)>timeTol)
            buffer[cam].pop_front();
    }


    if(trackMode==MODE_TRACK_MOTION)
        stereo_target.set(stereo);


    motMutex.post();
}
Beispiel #7
0
 /** This function adds framebuffer-related hints with default values. */
 FramebufferHints::FramebufferHints()
 {
     // set default values
     redBits(8);
     greenBits(8);
     blueBits(8);
     alphaBits(8);
     depthBits(24);
     stencilBits(8);
     accumRedBits(0);
     accumGreenBits(0);
     accumBlueBits(0);
     accumAlphaBits(0);
     auxBuffers(0);
     samples(0);
     refreshRate(0);
     stereo(false);
     sRGBCapable(false);
 }
Beispiel #8
0
/**
 * This is the method you need to call to build a complete PiSDF graph.
 */
void init_stereo(){
	PiSDFGraph* top = Spider::createGraph(
		/*Edges*/    0,
		/*Params*/   0,
		/*InputIf*/  0,
		/*OutputIf*/ 0,
		/*Config*/   0,
		/*Body*/     1);

	Spider::addHierVertex(
		/* Graph */  top,
		/*Name*/     "top",
		/*Graph*/    stereo(),
		/*InputIf*/  0,
		/*OutputIf*/ 0,
		/*Params*/   0);

	Spider::setGraph(top);
}
int main(int argc, char* argv[])
{
	shared_ptr<RemoteControl> remoteControl(new RemoteControl());

	shared_ptr<Light> livingRoomLight(new Light("Living Room"));
	shared_ptr<Light> kitchenLight(new Light("Kitchen"));
	shared_ptr<CeilingFan> ceilingFan(new CeilingFan("Living Room"));
	shared_ptr<GarageDoor> garageDoor(new GarageDoor(""));
	shared_ptr<Stereo> stereo(new Stereo("Living Room"));

	shared_ptr<LightOnCommand> livingRoomLightOn(new LightOnCommand(livingRoomLight));
	shared_ptr<LightOffCommand> livingRoomLightOff(new LightOffCommand(livingRoomLight));
	shared_ptr<LightOnCommand> kitchenLightOn(new LightOnCommand(kitchenLight));
	shared_ptr<LightOffCommand> kitchenLightOff(new LightOffCommand(kitchenLight));

	shared_ptr<CeilingFanOnCommand> ceilingFanOn(new CeilingFanOnCommand(ceilingFan));
	shared_ptr<CeilingFanOffCommand> ceilingFanOff(new CeilingFanOffCommand(ceilingFan));

	shared_ptr<GarageDoorUpCommand> garageDoorUp(new GarageDoorUpCommand(garageDoor));
	shared_ptr<GarageDoorDownCommand> garageDoorDown(new GarageDoorDownCommand(garageDoor));

	shared_ptr<StereoOnWithCDCommand> stereoOnWithCD(new StereoOnWithCDCommand(stereo));
	shared_ptr<StereoOffCommand> stereoOff(new StereoOffCommand(stereo));

	remoteControl->setCommand(0, livingRoomLightOn, livingRoomLightOff);
	remoteControl->setCommand(1, kitchenLightOn, kitchenLightOff);
	remoteControl->setCommand(2, ceilingFanOn, ceilingFanOff);
	remoteControl->setCommand(3, stereoOnWithCD, stereoOff);

	cout << remoteControl->toString() << endl;

	remoteControl->onButtonWasPushed(0);
	remoteControl->offButtonWasPushed(0);
	remoteControl->onButtonWasPushed(1);
	remoteControl->offButtonWasPushed(1);
	remoteControl->onButtonWasPushed(2);
	remoteControl->offButtonWasPushed(2);
	remoteControl->onButtonWasPushed(3);
	remoteControl->offButtonWasPushed(3);

	return 0;
}
Beispiel #10
0
/**
 * This is the method you need to call to build a complete PiSDF graph.
 */
PiSDFGraph* init_stereo(Archi* archi, Stack* stack) {
    PiSDFGraph* top = CREATE(stack, PiSDFGraph)(
                          /*Edges*/    0,
                          /*Params*/   0,
                          /*InputIf*/  0,
                          /*OutputIf*/ 0,
                          /*Config*/   0,
                          /*Body*/     1,
                          /*Archi*/    archi,
                          /*Stack*/    stack);

    top->addHierVertex(
        /*Name*/     "top",
        /*Graph*/    stereo(archi, stack),
        /*InputIf*/  0,
        /*OutputIf*/ 0,
        /*Params*/   0);

    return top;
}
Beispiel #11
0
/*!*****************************************************************************
 *******************************************************************************
\note  process_blobs
\date  Feb 1999
   
\remarks 

    filtering and differentiation of the sensor readings

 *******************************************************************************
 Function Parameters: [in]=input,[out]=output

 \param[in]     raw   :  raw blobs as input
 \param[out]    blobs :  the processed blob information

 ******************************************************************************/
void
process_blobs(Blob2D raw[][2+1])

{

  int i,j,n;
  double pos,vel,acc;
  int rfID;
  static Blob3D *traw;
  static int firsttime = TRUE;
  double x[2+2+1], x1[2+1], x2[2+1];
  
  if (firsttime) {
    firsttime = FALSE;
    traw = (Blob3D *) my_calloc(max_blobs+1,sizeof(Blob3D),MY_STOP);
  }
  
  for (i=1; i<=max_blobs; ++i) {

    if (!raw_blob_overwrite_flag) {

      /* the status of a blob is the AND of the two components */
      traw[i].status = raw[i][1].status && raw[i][2].status;

      switch (stereo_mode) {
	case VISION_3D_MODE:
	  /* coordinate transformation */
	  if (traw[i].status) {
	      x[1] = raw[i][1].x[1];
	      x[2] = raw[i][1].x[2];
	      x[3] = raw[i][2].x[1];
	      x[4] = raw[i][2].x[2];
	      rfID = 0;
	      if (predictLWPROutput(BLOB2ROBOT, 
				    x, x, 0.0, TRUE, traw[i].x, &rfID) == 0) {
		  traw[i].status = FALSE;
	      }
	  }
	break;
	  
	case VISION_2D_MODE:
	  if (traw[i].status) {
	      x1[1] =  raw[i][1].x[1];
	      x1[2] =  raw[i][1].x[2];
	      x2[1] =  raw[i][2].x[1];
	      x2[2] =  raw[i][2].x[2];
#if 1
	      /* stereo calculation based on the calibration */
	      stereo (x1, x2, traw[i].x);
#endif
	  }
	break;
      }
	  
    } else {
      /* use the learning model for the coordinate transformation 
	 (but not in simulation) */
      traw[i] = raw_blobs[i];
      /* CGA: should be updated to use 2D blobs, not 3D blobs. */
    }

    /* post processing */

    /* if the status changed from FALSE to TRUE, it is important
       to re-initialize the filtering */
    if (traw[i].status && !blobs[i].status) {
      for (j= _X_; j<= _Z_; ++j) {
	for (n=0; n<=FILTER_ORDER; ++n) {
	  blobpp[i].fblob[_X_][j].filt[n] = traw[i].x[j];
	  blobpp[i].fblob[_Y_][j].filt[n] = 0;
	  blobpp[i].fblob[_Z_][j].filt[n] = 0;
	  blobpp[i].fblob[_X_][j].raw[n]  = traw[i].x[j];
	  blobpp[i].fblob[_Y_][j].raw[n]  = 0;
	  blobpp[i].fblob[_Z_][j].raw[n]  = 0;
	}
      }
    }

    if (blobpp[i].filter_type == KALMAN) {
      ;
    } else { /* default is butterworth filtering */

      for (j= _X_; j<= _Z_; ++j) {

	if (traw[i].status) {

	  blobpp[i].pending = FALSE;
	  blobs[i].status   = TRUE;

	  /* filter and differentiate */
	  pos = filt(traw[i].x[j],&(blobpp[i].fblob[_X_][j]));
	  vel = (pos - blobpp[i].fblob[_X_][j].filt[2])*(double) vision_servo_rate;
	  vel = filt(vel,&(blobpp[i].fblob[_Y_][j]));
	  acc = (vel - blobpp[i].fblob[_Y_][j].filt[2])*(double) vision_servo_rate;
	  acc = filt(acc,&(blobpp[i].fblob[_Z_][j]));
	  if (blobpp[i].acc[j] != NOT_USED) {
	    acc = blobpp[i].acc[j];
	  }

	} else {

	  if (++blobpp[i].pending <= MAX_PENDING) {
	    pos = blobpp[i].predicted_state.x[j];
	    vel = blobpp[i].predicted_state.xd[j];
	    acc = blobpp[i].predicted_state.xdd[j];
	    // Note: leave blobs[i].status as is, as we don't want 
	    // to set something true which was not true before
	  } else {
	    pos = blobs[i].blob.x[j];
	    vel = 0.0;
	    acc = 0.0;
	    blobs[i].status   = FALSE;
	  }

	  /* feed the filters with the "fake" data to avoid transients
	     when the target status becomes normal */
	  pos = filt(pos,&(blobpp[i].fblob[_X_][j]));
	  vel = filt(vel,&(blobpp[i].fblob[_Y_][j]));
	  acc = filt(acc,&(blobpp[i].fblob[_Z_][j]));

	}

	/* predict ahead */
	blobs[i].blob.x[j]   = pos;
	blobs[i].blob.xd[j]  = vel;
	blobs[i].blob.xdd[j] = acc;

	if (blobs[i].status) {
	  
	  predict_state(&(blobs[i].blob.x[j]),&(blobs[i].blob.xd[j]),
			&(blobs[i].blob.xdd[j]),predict);
	  
	  
	  /* predict the next state */
	  blobpp[i].predicted_state.x[j]   = pos;
	  blobpp[i].predicted_state.xd[j]  = vel;
	  blobpp[i].predicted_state.xdd[j] = acc;
	  
	  predict_state(&(blobpp[i].predicted_state.x[j]),
			&(blobpp[i].predicted_state.xd[j]),
			&(blobpp[i].predicted_state.xdd[j]),
			1./(double)vision_servo_rate);
	}


      }
    }
  }

}
Beispiel #12
0
//init the logger
INITIALIZE_EASYLOGGINGPP


int main(int argc, char* argv[])
{
  //tag for LOG-information
  std::string tag = "MAIN\t";

  LOG(INFO) << tag << "Application started.";

  //create a device manager for the matrix vision cameras
  mvIMPACT::acquire::DeviceManager devMgr;

  //create cameras
  Camera *left;
  Camera *right;

  //all the driver stuff from matrix vision fo rthe cameras is done here
  if(!Utility::initCameras(devMgr,left,right))
    return 0;


  //init the stereo system AFTER init the cameras!
  Stereosystem stereo(left,right);

  //collect some configuration parameter written in /configs/default.yml to set "something"
  std::vector<std::string> nodes;
  //"something" is here the extrinsic and intrinsic parameters
  nodes.push_back("inputParameter");


  std::string config = "./configs/default.yml";

  cv::FileStorage fs;

  //check if the config has all the collected configuration parameters
  if(!Utility::checkConfig(config,nodes,fs))
  {
    return 0;
  }

  //put the collected paramters to some variable
  std::string inputParameter;
  fs["inputParameter"] >> inputParameter;

  //load the camera matrices, dist coeffs, R, T, ....
  if(!stereo.loadIntrinsic(inputParameter+"/intrinsic.yml"))
    return 0;
  if(!stereo.loadExtrinisic(inputParameter +"/extrinsic.yml"))
    return 0;

  //set the exposure time for left and right camera
  left->setExposure(24000);
  right->setExposure(24000);


  char key = 0;
  bool running = true;
  unsigned int binning = 0;

  //create windows
  cv::namedWindow("Left Distorted" ,1);
  cv::namedWindow("Right Distorted" ,1);

  cv::namedWindow("Left Undistorted" ,1);
  cv::namedWindow("Right Undistorted" ,1);

  cv::namedWindow("Left Rectified" ,1);
  cv::namedWindow("Right Rectified" ,1);

  //Stereopair is a struct holding a left and right image
  Stereopair distorted;
  Stereopair undistorted;
  Stereopair rectified;


  //endless loop until 'q' is pressed
  while(running)
  {

    //get the different types of images from the stereo system...
    stereo.getImagepair(distorted);
    stereo.getUndistortedImagepair(undistorted);
    stereo.getRectifiedImagepair(rectified);

    //... and show them all (with the stereopair.mLeft/mRight you have acces to the images and can do whatever you want with them)
    cv::imshow("Left Distorted", distorted.mLeft);
    cv::imshow("Right Distorted", distorted.mRight);

    cv::imshow("Left Undistorted", undistorted.mLeft);
    cv::imshow("Right Undistorted", undistorted.mRight);

    cv::imshow("Left Rectified", rectified.mLeft);
    cv::imshow("Right Rectified", rectified.mRight);

    key = cv::waitKey(10);

    //quit program
    if(key == 'q')
    {
      running = false;
      break;
    }
    //switch binning mode
    else if(key == 'b')
    {
    if (binning == 0)
      binning = 1;
    else
      binning =0;
      //after binning the rectificaction has to reseted to have the correct parameters
      left->setBinning(binning);
      right->setBinning(binning);
      stereo.resetRectification();
    }
    //show FPS of the cameras
    else if(key == 'f')
    {
     std::cout << "FPS: Right "<< left->getFramerate() << " " << "Left " << right->getFramerate() << std::endl;
    }
    //capture the images anbd write them to disk
    else if (key == 'c')
    {
      cv::imwrite("Left Distorted.jpg",distorted.mLeft);
      cv::imwrite("Right Distorted.jpg",distorted.mRight);

      cv::imwrite("Left Undistorted.jpg",undistorted.mLeft);
      cv::imwrite("Right Undistorted.jpg",undistorted.mRight);

      cv::imwrite("Left Rectified.jpg",rectified.mLeft);
      cv::imwrite("Right Rectified.jpg",rectified.mRight);
    }
  }
  return 0;
}
Beispiel #13
0
int main(int argc, char** argv)
{	
    /*Polynomial2 poly2;
    poly2.kuu = -1; 
    poly2.kuv = 1; 
    poly2.kvv= -1; 
    poly2.ku = 0.25; 
    poly2.kv = 0.25; 
    poly2.k1 = 5;
    
    CurveRasterizer<Polynomial2> raster(1, 1, -100, 100, poly2);
    CurveRasterizer2<Polynomial2> raster2(1, 1, -100, 100, poly2);

    auto tr0 = clock();
    int x1 = 0;
    int x2 = 0;
    for (int i = 0; i < 10000000; i++)
    {
        raster.step();
        x1 += raster.x;
    }
    auto tr1 = clock();
    
    for (int i = 0; i < 10000000; i++)
    {
        raster2.step();
        x2 += raster2.x;
    }
    auto tr2 = clock();
    
    cout << "optimized " << double(tr1 - tr0) / CLOCKS_PER_SEC << endl;
    cout << "simple " << double(tr2 - tr1) / CLOCKS_PER_SEC << endl;
    cout << x1 << " " << x2 << endl;
    return 0;*/
    ifstream paramFile(argv[1]);
    if (not paramFile.is_open())
    {
        cout << argv[1] << " : ERROR, file is not found" << endl;
        return 0;
    }
    
    array<double, 6> params;
    
    cout << "EU Camera model parameters :" << endl;
    for (auto & p: params) 
    {
        paramFile >> p;
        cout << setw(10) << p;
    }
    cout << endl;
    paramFile.ignore();
    
    array<double, 6> cameraPose;
    cout << "Camera pose wrt the robot :" << endl;
    for (auto & e: cameraPose) 
    {
        paramFile >> e;
        cout << setw(10) << e;
    }
    cout << endl;
    paramFile.ignore();
    Transformation<double> TbaseCamera(cameraPose.data());
    
    array<double, 6> robotPose1, robotPose2;

    
    
    
    
    SGMParameters stereoParams2;
    stereoParams2.salientPoints = false;
    stereoParams2.verbosity = 3;
    stereoParams2.hypMax = 1;
//    stereoParams.salientPoints = false;
    paramFile >> stereoParams2.u0;
    paramFile >> stereoParams2.v0;
    paramFile >> stereoParams2.dispMax;
    paramFile >> stereoParams2.scale;
    
    paramFile.ignore();
    string imageDir;
    getline(paramFile, imageDir);
    string imageInfo, imageName;
    getline(paramFile, imageInfo);
    istringstream imageStream(imageInfo);
    imageStream >> imageName;
    for (auto & x : robotPose1) imageStream >> x;

    Mat8u img1 = imread(imageDir + imageName, 0);
    cout << "Image name: "<< imageDir + imageName << endl;
    cout << "Image size: "<<img1.size()<<endl;;
    
    stereoParams2.u0 = 50;
    stereoParams2.v0 = 50;
    stereoParams2.uMax = img1.cols;
    stereoParams2.vMax = img1.rows;
    stereoParams2.setEqualMargin();
//    stereoParams2.salientPoints = true;
    
    int counter = 2;
    EnhancedCamera camera(params.data());
    DepthMap depth;
    depth.setDefault();
    
//    stereoParams2.dispMax = 40;
//    stereoParams2.descRespThresh = 2;
//    stereoParams2.scaleVec = vector<int>{1};
    
    MotionStereoParameters stereoParams(stereoParams2);
    stereoParams.verbosity = 1;
    stereoParams.descLength = 5;
//    stereoParams.descRespThresh = 2;
//    stereoParams.scaleVec = vector<int>{1};
    
    MotionStereo stereo(&camera, &camera, stereoParams);
    stereo.setBaseImage(img1);
    
    
    //do SGM to init the depth
    getline(paramFile, imageInfo);
    getline(paramFile, imageInfo);
    getline(paramFile, imageInfo);
    imageStream.str(imageInfo);
    imageStream.clear();
    imageStream >> imageName;
    for (auto & x : robotPose2) imageStream >> x;
    Mat8u img2 = imread(imageDir + imageName, 0);
    Transformation<double> T01(robotPose1.data()), T02(robotPose2.data());
    Transformation<double> TleftRight = T01.compose(TbaseCamera).inverseCompose(T02.compose(TbaseCamera));
    EnhancedSGM stereoSG(TleftRight, &camera, &camera, stereoParams2);
    
    Timer timer;
    stereoSG.computeStereo(img1, img2, depth);
    depth.filterNoise();
    cout << timer.elapsed() << endl; 
    Mat32f res, sigmaRes;
    Mat32f res2, sigmaRes2;
    depth.toInverseMat(res2);
    depth.sigmaToMat(sigmaRes2);
    imshow("res" + to_string(counter), res2 *0.12);
    imshow("sigma " + to_string(counter), sigmaRes2*20);
    cv::waitKey(0);
    
    counter++;
    while (getline(paramFile, imageInfo))
    {
        istringstream imageStream(imageInfo);
        
        imageStream >> imageName;
        for (auto & x : robotPose2) imageStream >> x;
    
        Transformation<double> T01(robotPose1.data()), T02(robotPose2.data());
        Transformation<double> TleftRight = T01.compose(TbaseCamera).inverseCompose(T02.compose(TbaseCamera));
        
        Mat8u img2 = imread(imageDir + imageName, 0);

//        depth.setDefault();
        timer.reset();
        cout << TleftRight << endl;
        DepthMap depth2 = stereo.compute(TleftRight, img2, depth, counter - 3);
        depth = depth2;
        depth.filterNoise();
        cout << timer.elapsed() << endl; 
        depth.toInverseMat(res);
        depth.sigmaToMat(sigmaRes);
//        imwrite(imageDir + "res" + to_string(counter++) + ".png", depth*200);
        
        imshow("sigma " + to_string(counter), sigmaRes*20);
        imshow("d sigma " + to_string(counter), (sigmaRes - sigmaRes2)*20  + 0.5);
//        cout << (sigmaRes - sigmaRes2)(Rect(150, 150, 15, 15)) << endl;
        imshow("res " + to_string(counter), res *0.12);
        counter++; 
        waitKey();
    }
    waitKey();
    return 0;
}
int main(int argc, char** argv)
{	
    /*Polynomial2 poly2;
    poly2.kuu = -1; 
    poly2.kuv = 1; 
    poly2.kvv= -1; 
    poly2.ku = 0.25; 
    poly2.kv = 0.25; 
    poly2.k1 = 5;
    
    CurveRasterizer<Polynomial2> raster(1, 1, -100, 100, poly2);
    CurveRasterizer2<Polynomial2> raster2(1, 1, -100, 100, poly2);

    auto tr0 = clock();
    int x1 = 0;
    int x2 = 0;
    for (int i = 0; i < 10000000; i++)
    {
        raster.step();
        x1 += raster.x;
    }
    auto tr1 = clock();
    
    for (int i = 0; i < 10000000; i++)
    {
        raster2.step();
        x2 += raster2.x;
    }
    auto tr2 = clock();
    
    cout << "optimized " << double(tr1 - tr0) / CLOCKS_PER_SEC << endl;
    cout << "simple " << double(tr2 - tr1) / CLOCKS_PER_SEC << endl;
    cout << x1 << " " << x2 << endl;
    return 0;*/
    ifstream paramFile(argv[1]);
    if (not paramFile.is_open())
    {
        cout << argv[1] << " : ERROR, file is not found" << endl;
        return 0;
    }
    
    array<double, 6> params;
    
    cout << "EU Camera model parameters :" << endl;
    for (auto & p: params) 
    {
        paramFile >> p;
        cout << setw(10) << p;
    }
    cout << endl;
    paramFile.ignore();
    
    array<double, 6> cameraPose;
    cout << "Camera pose wrt the robot :" << endl;
    for (auto & e: cameraPose) 
    {
        paramFile >> e;
        cout << setw(10) << e;
    }
    cout << endl;
    paramFile.ignore();
    Transformation<double> TbaseCamera(cameraPose.data());
    
    array<double, 6> robotPose1, robotPose2;

    
    
    SGMParameters stereoParams;
    stereoParams.verbosity = 3;
    stereoParams.salientPoints = false;
    paramFile >> stereoParams.u0;
    paramFile >> stereoParams.v0;
    paramFile >> stereoParams.dispMax;
    paramFile >> stereoParams.scale;
    paramFile.ignore();
    string imageDir;
    getline(paramFile, imageDir);
    string imageInfo, imageName;
    getline(paramFile, imageInfo);
    istringstream imageStream(imageInfo);
    imageStream >> imageName;
    for (auto & x : robotPose1) imageStream >> x;

    Mat8u img1 = imread(imageDir + imageName, 0);
    stereoParams.uMax = img1.cols;
    stereoParams.vMax = img1.rows;
    stereoParams.setEqualMargin();
    
    int counter = 2;
    EnhancedCamera camera(params.data());
    while (getline(paramFile, imageInfo))
    {
        istringstream imageStream(imageInfo);
        
        imageStream >> imageName;
        for (auto & x : robotPose2) imageStream >> x;
    
        Transformation<double> T01(robotPose1.data()), T02(robotPose2.data());
        Transformation<double> TleftRight = T01.compose(TbaseCamera).inverseCompose(T02.compose(TbaseCamera));
        
        Mat8u img2 = imread(imageDir + imageName, 0);

        EnhancedSGM stereo(TleftRight, &camera, &camera, stereoParams);

        DepthMap depth;
        auto t2 = clock();
        stereo.computeStereo(img1, img2, depth);
        auto t3 = clock();
        cout << double(t3 - t2) / CLOCKS_PER_SEC << endl;
        
        Mat32f dMat;
        depth.toInverseMat(dMat);
//        imwrite(imageDir + "res" + to_string(counter++) + ".png", depth*200);
        imwrite(imageDir + "res" + to_string(counter++) + ".png", dMat*30);
    }
    return 0;
}
int main(int /*argc*/, char** /*argv*/)
{
  try
  {
    imp::ImageCv32fC1::Ptr cv_im0;
    imp::cvBridgeLoad(cv_im0,
                      "/home/mwerlberger/data/epipolar_stereo_test/00000.png",
                      imp::PixelOrder::gray);

    imp::ImageCv32fC1::Ptr cv_im1;
    imp::cvBridgeLoad(cv_im1,
                      "/home/mwerlberger/data/epipolar_stereo_test/00001.png",
                      imp::PixelOrder::gray);

    // rectify images for testing

    imp::cu::ImageGpu32fC1::Ptr im0 = std::make_shared<imp::cu::ImageGpu32fC1>(*cv_im0);
    imp::cu::ImageGpu32fC1::Ptr im1 = std::make_shared<imp::cu::ImageGpu32fC1>(*cv_im1);


    Eigen::Quaterniond q_world_im0(0.14062777, 0.98558398, 0.02351040, -0.09107859);
    Eigen::Quaterniond q_world_im1(0.14118687, 0.98569744, 0.01930722, -0.08996696);
    Eigen::Vector3d t_world_im0(-0.12617580, 0.50447008, 0.15342121);
    Eigen::Vector3d t_world_im1(-0.11031053, 0.50314023, 0.15158643);

    imp::cu::PinholeCamera cu_cam(414.09, 413.799, 355.567, 246.337);

    // im0: fixed image; im1: moving image
    imp::cu::Matrix3f F_fix_mov;
    imp::cu::Matrix3f F_mov_fix;
    Eigen::Matrix3d F_fm, F_mf;
    { // compute fundamental matrix
      Eigen::Matrix3d R_world_mov = q_world_im1.matrix();
      Eigen::Matrix3d R_world_fix = q_world_im0.matrix();
      Eigen::Matrix3d R_fix_mov = R_world_fix.inverse()*R_world_mov;

      // in ref coordinates
      Eigen::Vector3d t_fix_mov = R_world_fix.inverse()*(-t_world_im0 + t_world_im1);

      Eigen::Matrix3d tx_fix_mov;
      tx_fix_mov << 0, -t_fix_mov[2], t_fix_mov[1],
          t_fix_mov[2], 0, -t_fix_mov[0],
          -t_fix_mov[1], t_fix_mov[0], 0;
      Eigen::Matrix3d E_fix_mov = tx_fix_mov * R_fix_mov;
      Eigen::Matrix3d K;
      K << cu_cam.fx(), 0, cu_cam.cx(),
          0, cu_cam.fy(), cu_cam.cy(),
          0, 0, 1;

      Eigen::Matrix3d Kinv = K.inverse();
      F_fm = Kinv.transpose() * E_fix_mov * Kinv;
      F_mf = F_fm.transpose();
    } // end .. compute fundamental matrix
    // convert the Eigen-thingy to something that we can use in CUDA
    for(size_t row=0; row<F_fix_mov.rows(); ++row)
    {
      for(size_t col=0; col<F_fix_mov.cols(); ++col)
      {
        F_fix_mov(row,col) = (float)F_fm(row,col);
        F_mov_fix(row,col) = (float)F_mf(row,col);
      }
    }

    // compute SE3 transformation
    imp::cu::SE3<float> T_world_fix(
          static_cast<float>(q_world_im0.w()), static_cast<float>(q_world_im0.x()),
          static_cast<float>(q_world_im0.y()), static_cast<float>(q_world_im0.z()),
          static_cast<float>(t_world_im0.x()), static_cast<float>(t_world_im0.y()),
          static_cast<float>(t_world_im0.z()));
    imp::cu::SE3<float> T_world_mov(
          static_cast<float>(q_world_im1.w()), static_cast<float>(q_world_im1.x()),
          static_cast<float>(q_world_im1.y()), static_cast<float>(q_world_im1.z()),
          static_cast<float>(t_world_im1.x()), static_cast<float>(t_world_im1.y()),
          static_cast<float>(t_world_im1.z()));
    imp::cu::SE3<float> T_mov_fix = T_world_mov.inv() * T_world_fix;
    // end .. compute SE3 transformation

    std::cout << "T_mov_fix:\n" << T_mov_fix << std::endl;


    std::unique_ptr<imp::cu::VariationalEpipolarStereo> stereo(
          new imp::cu::VariationalEpipolarStereo());

    stereo->parameters()->verbose = 1;
    stereo->parameters()->solver = imp::cu::StereoPDSolver::EpipolarPrecondHuberL1;
    stereo->parameters()->ctf.scale_factor = 0.8f;
    stereo->parameters()->ctf.iters = 30;
    stereo->parameters()->ctf.warps  = 5;
    stereo->parameters()->ctf.apply_median_filter = true;
    stereo->parameters()->lambda = 20;

    stereo->addImage(im0);
    stereo->addImage(im1);

    imp::cu::ImageGpu32fC1::Ptr cu_mu = std::make_shared<imp::cu::ImageGpu32fC1>(im0->size());
    imp::cu::ImageGpu32fC1::Ptr cu_sigma2 = std::make_shared<imp::cu::ImageGpu32fC1>(im0->size());
    cu_mu->setValue(-5.f);
    cu_sigma2->setValue(0.0f);

    stereo->setFundamentalMatrix(F_mov_fix);
    stereo->setIntrinsics({cu_cam, cu_cam});
    stereo->setExtrinsics(T_mov_fix);
    stereo->setDepthProposal(cu_mu, cu_sigma2);
    
    stereo->solve();

    std::shared_ptr<imp::cu::ImageGpu32fC1> d_disp = stereo->getDisparities();


    {
      imp::Pixel32fC1 min_val,max_val;
      imp::cu::minMax(*d_disp, min_val, max_val);
      std::cout << "disp: min: " << min_val.x << " max: " << max_val.x << std::endl;
    }

    imp::cu::cvBridgeShow("im0", *im0);
    imp::cu::cvBridgeShow("im1", *im1);
//    *d_disp *= -1;
    {
      imp::Pixel32fC1 min_val,max_val;
      imp::cu::minMax(*d_disp, min_val, max_val);
      std::cout << "disp: min: " << min_val.x << " max: " << max_val.x << std::endl;
    }

    imp::cu::cvBridgeShow("disparities", *d_disp, -18.0f, 11.0f);
    imp::cu::cvBridgeShow("disparities minmax", *d_disp, true);
    cv::waitKey();
  }
  catch (std::exception& e)
  {
    std::cout << "[exception] " << e.what() << std::endl;
    assert(false);
  }

  return EXIT_SUCCESS;

}
Beispiel #16
0
int main(int argc, char **argv)
{
	int c, option_index;

	if (argc < 2)
		print_help(argv[0], -1);

	while (1) {
		option_index = 0;

		c = getopt_long(argc, argv,
#if PCIMAXFM_ENABLE_TX_TOGGLE
				"t::"
#endif /* PCIMAXFM_ENABLE_TX_TOGGLE */
				"f::p::s::"
#if PCIMAXFM_ENABLE_RDS
#if PCIMAXFM_ENABLE_RDS_TOGGLE
				"g::"
#endif /* PCIMAXFM_ENABLE_RDS_TOGGLE */
				"r:"
#endif /* PCIMAXFM_ENABLE_RDS */
				"d::vqehH",
				long_options, &option_index);

		if (c == -1)
			break;

		switch (c) {
#if PCIMAXFM_ENABLE_TX_TOGGLE
			case 't':
				tx(optarg);
				break;
#endif /* PCIMAXFM_ENABLE_TX_TOGGLE */
			case 'f':
				freq(optarg);
				break;
			case 'p':
				power(optarg);
				break;
			case 's':
				stereo(optarg);
				break;
#if PCIMAXFM_ENABLE_RDS
#if PCIMAXFM_ENABLE_RDS_TOGGLE
			case 'g':
				rds_signal(optarg);
				break;
#endif /* PCIMAXFM_ENABLE_RDS_TOGGLE */
			case 'r':
				rds(optarg);
				break;
#endif /* PCIMAXFM_ENABLE_RDS */
			case 'd':
				device(optarg);
				break;
			case 'v':
				verbosity = 1;
				DEBUG_MSG("Verbose output.");
				break;
			case 'q':
				verbosity = -1;
				break;
			case 'e':
				print_version(argv[0]);
			case 'h':
				print_help(argv[0], 0);
#if PCIMAXFM_ENABLE_RDS
			case 'H':
				print_help_rds(0);
#endif /* PCIMAXFM_ENABLE_RDS */
			default:
				exit(1);
		}
	}

	dev_close();

	return 0;
}
INITIALIZE_EASYLOGGINGPP

int main(int argc, char* argv[])
{
  std::string tag = "MAIN\t";

  LOG(INFO) << tag << "Application started.";
  mvIMPACT::acquire::DeviceManager devMgr;

  Camera *left;
  Camera *right;

  if(!Utility::initCameras(devMgr,left,right))
    return 0;


  Stereosystem stereo(left,right);

  std::vector<std::string> nodes;
  nodes.push_back("inputParameter");
  nodes.push_back("capturedImagesRectified");

  std::string config = "./configs/default.yml";

  cv::FileStorage fs;


  if(!Utility::checkConfig(config,nodes,fs))
  {
    return 0;
  }


  std::string inputParameter;
  fs["inputParameter"] >> inputParameter;

  std::string outputDirectory;
  fs["capturedImagesRectified"] >> outputDirectory;

  if(!stereo.loadIntrinsic(inputParameter+"/intrinsic.yml"))
    return 0;
  if(!stereo.loadExtrinisic(inputParameter +"/extrinsic.yml"))
    return 0;

  Stereopair s;

  left->setExposure(24000);
  right->setExposure(24000);

  std::string pathLeft = outputDirectory+"/left";
  std::string pathRight = outputDirectory+"/right";

  unsigned int imageNumber = 0;

  if(Utility::directoryExist(pathLeft) || Utility::directoryExist(pathRight))
  {
    std::vector<std::string> tmp1, tmp2;
    Utility::getFiles(pathLeft,tmp1);
    Utility::getFiles(pathRight,tmp2);

    if(tmp1.size() != 0 || tmp2.size() != 0)
    {
      std::cout << "Output directory not empty, clean files? [y/n] " <<std::endl;
      char key = getchar();
        if(key == 'y')
        {
          std::system(std::string("rm " + pathLeft + "/* -f ").c_str());
          std::system(std::string("rm " + pathRight + "/* -f ").c_str());
        }
        else if(key == 'n')
        {
          imageNumber = tmp1.size();
          LOG(INFO) << tag << "Start with image number " << imageNumber << std::endl;
        }
    }
  }

  if(Utility::createDirectory(pathLeft) && Utility::createDirectory(pathRight))
    {
            LOG(INFO) << tag << "Successfully created directories for captured images." << std::endl;
    }
    else
    {
        LOG(ERROR) << tag << "Unable to create directories for captured images." <<std::endl;
        return 0;
    }


  char key = 0;
  bool running = true;
  unsigned int binning = 0;

  cv::namedWindow("Left" ,1);
  cv::namedWindow("Right" ,1);

  while(running)
  {

    stereo.getRectifiedImagepair(s);
    cv::imshow("Left", s.mLeft);
    cv::imshow("Right", s.mRight);

    key = cv::waitKey(10);

    if(key == 'c')
    {
      std::string prefix = "";
      if(imageNumber < 10)
      {
        prefix +="000";
      }
      else if ((imageNumber >=10) && (imageNumber <100))
      {
        prefix += "00";
      }
      else if(imageNumber >= 100)
      {
        prefix +="0";
      }
      cv::imwrite(std::string(pathLeft+"/left_"+prefix+std::to_string(imageNumber)+".jpg"),s.mLeft);
      cv::imwrite(std::string(pathRight+"/right_"+prefix+std::to_string(imageNumber)+".jpg"),s.mRight);
      ++imageNumber;
    }
    else if(key == 'q')
    {
      running = false;
      break;
    }
    else if(key == 'b')
    {
    if (binning == 0)
      binning = 1;
    else
      binning =0;

     left->setBinning(binning);
     right->setBinning(binning);
     stereo.resetRectification();
    }
    else if(key == 'f')
    {
     std::cout << "FPS: Right "<< left->getFramerate() << " " << "Left " << right->getFramerate() << std::endl;
    }
  }
  return 0;
}
Beispiel #18
0
int main(int argc, char** argv)
{	
    /*Polynomial2 poly2;
    poly2.kuu = -1; 
    poly2.kuv = 1; 
    poly2.kvv= -1; 
    poly2.ku = 0.25; 
    poly2.kv = 0.25; 
    poly2.k1 = 5;
    
    CurveRasterizer<Polynomial2> raster(1, 1, -100, 100, poly2);
    CurveRasterizer2<Polynomial2> raster2(1, 1, -100, 100, poly2);

    auto tr0 = clock();
    int x1 = 0;
    int x2 = 0;
    for (int i = 0; i < 10000000; i++)
    {
        raster.step();
        x1 += raster.x;
    }
    auto tr1 = clock();
    
    for (int i = 0; i < 10000000; i++)
    {
        raster2.step();
        x2 += raster2.x;
    }
    auto tr2 = clock();
    
    cout << "optimized " << double(tr1 - tr0) / CLOCKS_PER_SEC << endl;
    cout << "simple " << double(tr2 - tr1) / CLOCKS_PER_SEC << endl;
    cout << x1 << " " << x2 << endl;
    return 0;*/
    ifstream paramFile(argv[1]);
    if (not paramFile.is_open())
    {
        cout << argv[1] << " : ERROR, file is not found" << endl;
        return 0;
    }
    
    array<double, 6> params1;
    array<double, 6> params2;
    
    cout << "First EU Camera model parameters :" << endl;
    for (auto & p: params1) 
    {
        paramFile >> p;
        cout << setw(10) << p;
    }
    cout << endl;
    paramFile.ignore();
    
    cout << "Second EU Camera model parameters :" << endl;
    for (auto & p: params2) 
    {
        paramFile >> p;
        cout << setw(10) << p;
    }
    cout << endl;
    paramFile.ignore();
    
    array<double, 6> cameraPose;
    cout << "Camera pose wrt the robot :" << endl;
    for (auto & e: cameraPose) 
    {
        paramFile >> e;
        cout << setw(10) << e;
    }
    cout << endl;
    paramFile.ignore();
    Transformation<double> TbaseCamera(cameraPose.data());
    
    array<double, 6> robotPose1, robotPose2;
    cout << "First robot's pose :" << endl;
    for (auto & e: robotPose1) 
    {
        paramFile >> e;
        cout << setw(10) << e;
    }
    cout << endl;
    cout << "Second robot's pose :" << endl;
    for (auto & e: robotPose2) 
    {
        paramFile >> e;
        cout << setw(10) << e;
    }
    cout << endl;
    paramFile.ignore();
    Transformation<double> T01(robotPose1.data()), T02(robotPose2.data());
    
    Transformation<double> TleftRight = T01.compose(TbaseCamera).inverseCompose(T02.compose(TbaseCamera));
    
    SGMParameters stereoParams;
    stereoParams.flawCost = 5;
    stereoParams.verbosity = 0;
    stereoParams.hypMax = 1;
//    stereoParams.salientPoints = false;
    paramFile >> stereoParams.u0;
    paramFile >> stereoParams.v0;
    paramFile >> stereoParams.dispMax;
    paramFile >> stereoParams.scale;
    paramFile.ignore();
//    stereoParams.lambdaStep = 3;
//    stereoParams.lambdaJump = 15;
    string fileName1, fileName2;
    while(getline(paramFile, fileName1))
    {
        getline(paramFile, fileName2);
        
        Mat8u img1 = imread(fileName1, 0);
        Mat8u img2 = imread(fileName2, 0);
        Mat16s img1lap, img2lap;

        stereoParams.uMax = img1.cols;
        stereoParams.vMax = img1.rows;
        stereoParams.setEqualMargin();
    //    
    //    Laplacian(img1, img1lap, CV_16S, 3);
    //    Laplacian(img2, img2lap, CV_16S, 3);
    //    
    //    GaussianBlur(img1, img1, Size(3, 3), 0, 0);
    //    GaussianBlur(img2, img2, Size(3, 3), 0, 0);
    //    
    //    img1lap = img1lap + 128;
    //    img2lap = img2lap + 128;
    //    img1lap.copyTo(img1);
    //    img2lap.copyTo(img2);
    ////    
        
        Timer timer;
        EnhancedCamera camera1(params1.data()), camera2(params2.data());
        EnhancedSGM stereo(TleftRight, &camera1, &camera2, stereoParams);
        cout << "    initialization time : " << timer.elapsed() << endl;
        Mat8u out1, out2;
        
        img1.copyTo(out1);
        img2.copyTo(out2);
        
    //    
    //    for (auto & x : {Point(320, 300), Point(500, 300), Point(750, 300), Point(350, 500), Point(600, 450)})
    //    {
    //        out1(x) = 0;
    //        out1(x.y + 1, x.x) = 0;
    //        out1(x.y, x.x + 1) = 255;
    //        out1(x.y + 1, x.x + 1) = 255;
    //        stereo.traceEpipolarLine(x, out2);
    //    }
        

    //    Mat32f res;
    //    timer.reset();
    //    stereo.computeCurveCost(img1, img2);
    //    cout << timer.elapsed() << endl;
    //    timer.reset();
    //    stereo.computeDynamicProgramming();
    //    cout << timer.elapsed() << endl;
    //    timer.reset();
    //    stereo.reconstructDisparity();
    //    cout << timer.elapsed() << endl;
    //    timer.reset();
    //    stereo.computeDepth(res);
    //    cout << timer.elapsed() << endl;
        
        DepthMap depth;
        Mat32f depthMat;
        timer.reset();
        
    //    stereo.computeStereo(img1, img2, depthMat);
        stereo.computeStereo(img1, img2, depth);
        cout << "    stereo total time : " << timer.elapsed() << endl;
        
        depth.toInverseMat(depthMat);
        
        imshow("out1", out1);
        imshow("out2", out2);
        imshow("res", depthMat/ 3);
        imshow("disp", stereo.disparity() * 256);
        cout << stereo.disparity()(350, 468) << " " << stereo.disparity()(350, 469) << endl;
        waitKey(); 
    }
    return 0;
}
INITIALIZE_EASYLOGGINGPP

int main(int argc, char const *argv[])
{

  std::string tag = "MAIN\t";

  LOG(INFO) << tag << "Application started.";
  mvIMPACT::acquire::DeviceManager devMgr;

  Camera *left;
  Camera *right;

  if(!Utility::initCameras(devMgr,left,right))
  {
    return 0;
  }

  Stereosystem stereo(left,right);

  std::vector<std::string> nodes;
  nodes.push_back("inputImages");
  nodes.push_back("outputParameter");

  std::string config = "./configs/default.yml";

  cv::FileStorage fs;


  if(!Utility::checkConfig(config,nodes,fs))
  {
    return 0;
  }


  std::string dirPath;
  fs["inputImages"] >> dirPath;

  std::string parameterOutput;
  fs["outputParameter"] >> parameterOutput;

  if(!Utility::createDirectory(parameterOutput))
  {
    LOG(ERROR) << tag << "Unable to create output directory for parameter" << std::endl;
  }

  std::vector<std::string> filenamesLeft;
  std::vector<std::string> filenamesRight;

  Utility::getFiles(dirPath+"/left", filenamesLeft);
  Utility::getFiles(dirPath+"/right", filenamesRight);

  std::vector<cv::Mat> imagesLeft, imagesRight;

  if(filenamesLeft.size() == filenamesRight.size()) {
    for(unsigned int i = 0; i < filenamesLeft.size(); ++i){
      imagesLeft.push_back(cv::imread(std::string(dirPath+"/left/"+filenamesLeft[i])));
      imagesRight.push_back(cv::imread(std::string(dirPath+"/right/"+filenamesRight[i])));
    }
  }

  double stereoRMS = stereo.calibrate(imagesLeft, imagesRight,20, cv::Size(11,8));

  std::cout << "RMS after stereorectification: " <<  stereoRMS << std::endl;

  std::cout << "press 's' to save the new parameters." << std::endl;
  char key = getchar();
  if(key == 's')
  {
    stereo.saveIntrinsic(parameterOutput + "/intrinsic.yml");
    stereo.saveExtrinsic(parameterOutput + "/extrinsic.yml");
  }

  return 0;
}