Exemple #1
0
void test_drawMatchPoints(double alpha,int winn,int winp,char * fic1,char* fic2,char * fic_s){
  image im1(fic1);
  image im2(fic2);

  image* sortie=im1.drawMatchPoints(im2,50,winn,winp,&image::zncc,true);
  sortie->EcrireImagePGM(fic_s);
}
Image ProjectExporter::getBestIconForSize (int size, bool returnNullIfNothingBigEnough) const
{
    Drawable* im = nullptr;

    ScopedPointer<Drawable> im1 (getSmallIcon());
    ScopedPointer<Drawable> im2 (getBigIcon());

    if (im1 != nullptr && im2 != nullptr)
    {
        if (im1->getWidth() >= size && im2->getWidth() >= size)
            im = im1->getWidth() < im2->getWidth() ? im1 : im2;
        else if (im1->getWidth() >= size)
            im = im1;
        else if (im2->getWidth() >= size)
            im = im2;
    }
    else
    {
        im = im1 != nullptr ? im1 : im2;
    }

    if (im == nullptr)
        return Image();

    if (returnNullIfNothingBigEnough && im->getWidth() < size && im->getHeight() < size)
        return Image();

    return rescaleImageForIcon (*im, size);
}
Exemple #3
0
// compute distances from rows of inmat1 to rows of inmat2
RcppExport SEXP rthpdist(SEXP inmat1,SEXP inmat2, SEXP nthreads)
{  
   Rcpp::NumericMatrix im1(inmat1);
   Rcpp::NumericMatrix im2(inmat2);
   int nr1 = im1.nrow();
   int nc = im1.ncol();
   int nr2 = im2.nrow();
   
   #if RTH_OMP
   omp_set_num_threads(INT(nthreads));
   #elif RTH_TBB
   // tbb::task_scheduler_init init(INT(nthreads));
   // for unknown reasons, this code does not work under TBB
   return Rcpp::wrap(-1);
   #endif
   
   thrust::device_vector<double> dmat1(im1.begin(),im1.end());
   thrust::device_vector<double> dmat2(im2.begin(),im2.end());
   // make space for the output
   thrust::device_vector<double> ddst(nr1*nr2);
   // iterators for row number of inmat1
   thrust::counting_iterator<int> iseqb(0);
   thrust::counting_iterator<int> iseqe = iseqb + nr1;
   // for each i in [iseqb,iseqe) find the distances from row i in inmat1 
   // to all rows of inmat2
   thrust::for_each(iseqb,iseqe,
      do1ival(dmat1.begin(),dmat2.begin(),ddst.begin(),nr1,nc,nr2));
   Rcpp::NumericMatrix rout(nr1,nr2);
   thrust::copy(ddst.begin(),ddst.end(),rout.begin());
   return rout;
}
void ScaleSpaceImage::show(std::string prefix, std::string processor, unsigned int image_number, bool debug)
{
  static int image_nr = 0;
  cv::Mat to_write;

  std::string image_number_string;
  if (image_number < 10)
  {
    image_number_string = "0";
  }
  image_number_string += std::to_string(image_number);
  cv::imwrite(prefix + "_" + processor + image_number_string + "_original.bmp", input);

  for (unsigned int i = 0; i < nr_scales; ++i)
  {
    std::string s;
    s = prefix + "_" + processor + "_";
    s += image_number_string;
    s += "_";
    if (i < 10)
    {
      s += "0";
    }
    s += std::to_string(i);
    if (debug)
    {
      std::string ds1 = s + "_1.bmp";
      std::string ds1txt = s + "_1.txt";
      std::ofstream im1(ds1txt);

      scale_space_images[0][i].convertTo(to_write, CV_8UC1, 255.0);
      cv::imwrite(ds1.c_str(), to_write);
      im1 << scale_space_images[0][i];

      if (scale_space_images.size() > 1)
      {
        std::string ds2 = s + "_2.bmp";
        std::string ds2txt = s + "_2.txt";
        std::ofstream im2(ds2txt);

        scale_space_images[1][i].convertTo(to_write, CV_8UC1, 255.0);
        cv::imwrite(ds2.c_str(), to_write);
        im2 << scale_space_images[1][i];
      }
    }
    s += ".bmp";

    if (gaussian)
    {
      scale_space_images[0][i].convertTo(to_write, CV_8UC1, 255.0);
    }
    else
    {
      to_write = output[i];
    }
    cv::imwrite(s.c_str(), to_write);
  }
  image_nr++;
}
Exemple #5
0
void testDblDepth(int winn,int winp,char * fic1,char * fic2,char * ficSortie){
  image im1(fic1);
  image im2(fic2);

  image* imSortie=im1.dblMatchProfPoints(im2,winn,winp,&image::ssd,false);
  imSortie->recadre(0,255);
  imSortie->EcrireImagePGM(ficSortie);
}
/**
 *  Test that for Jpeg files that use the JFIF colorspace, they are
 *  directly embedded into the PDF (without re-encoding) when that
 *  makes sense.
 */
DEF_TEST(PDFJpegEmbedTest, r) {
    const char test[] = "PDFJpegEmbedTest";
    SkAutoTUnref<SkData> mandrillData(
            load_resource(r, test, "mandrill_512_q075.jpg"));
    SkAutoTUnref<SkData> cmykData(load_resource(r, test, "CMYK.jpg"));
    if (!mandrillData || !cmykData) {
        return;
    }
    ////////////////////////////////////////////////////////////////////////////
    SkDynamicMemoryWStream pdf;
    SkAutoTUnref<SkDocument> document(SkDocument::CreatePDF(&pdf));
    SkCanvas* canvas = document->beginPage(642, 1028);

    canvas->clear(SK_ColorLTGRAY);

    SkBitmap bm1(bitmap_from_data(mandrillData));
    canvas->drawBitmap(bm1, 65.0, 0.0, nullptr);
    SkBitmap bm2(bitmap_from_data(cmykData));
    canvas->drawBitmap(bm2, 0.0, 512.0, nullptr);

    canvas->flush();
    document->endPage();
    document->close();
    SkAutoTUnref<SkData> pdfData(pdf.copyToData());
    SkASSERT(pdfData);
    pdf.reset();

    REPORTER_ASSERT(r, is_subset_of(mandrillData, pdfData));

    // This JPEG uses a nonstandard colorspace - it can not be
    // embedded into the PDF directly.
    REPORTER_ASSERT(r, !is_subset_of(cmykData, pdfData));
    ////////////////////////////////////////////////////////////////////////////
    pdf.reset();
    document.reset(SkDocument::CreatePDF(&pdf));
    canvas = document->beginPage(642, 1028);

    canvas->clear(SK_ColorLTGRAY);

    SkAutoTUnref<SkImage> im1(SkImage::NewFromEncoded(mandrillData));
    canvas->drawImage(im1, 65.0, 0.0, nullptr);
    SkAutoTUnref<SkImage> im2(SkImage::NewFromEncoded(cmykData));
    canvas->drawImage(im2, 0.0, 512.0, nullptr);

    canvas->flush();
    document->endPage();
    document->close();
    pdfData.reset(pdf.copyToData());
    SkASSERT(pdfData);
    pdf.reset();

    REPORTER_ASSERT(r, is_subset_of(mandrillData, pdfData));

    // This JPEG uses a nonstandard colorspace - it can not be
    // embedded into the PDF directly.
    REPORTER_ASSERT(r, !is_subset_of(cmykData, pdfData));
}
//For each part of cloud:
void scnreader_model::cleanNoise(int f){
    //we fix the width of 1 pixel
    double dense=0.04;
    //we keep the cloud which we must clean
    QVector<PointGL> lespoints=this->lesRails.getCloud();
    //we keep distance between xmin and xmax
    double* val=distanceMinMax(lespoints);
    double dist=val[1]-val[0];
    double xmin=val[0];
    int zmin=val[2];
    //calculate width of image
    int width=(int)(dist/dense);
    //initialize the image
    ImageProcessing im1(width+1, (f-this->ftpd));
    this->im=im1;
    //For each point of this part:
    for(int i=0; i<lespoints.size(); i++)
    {
        //we calculate the coordinate corresponding to it
        int c=(int) ((lespoints.at(i).getX()-xmin)/dense);
        int l=(int) lespoints.at(i).getZ()-zmin;
        //we increase this position
        im.increase(l,c);
    }
    //we do a calibration of gray level between 0 and 255
    im.calibration();
    //closing
    im.fermeture();
    //we do a thresholding with threshold=125 => binarization
    im.thresholding(0);
    //region growing
    im.growingRegion();
    //record
    im.enregistre(this->nomFile);
    //For each point of this part:
    for(int i=0; i<lespoints.size(); i++)
    {
        //we calculate the coordinate corresponding to it
        int c=(int) ((lespoints.at(i).getX()-xmin)/dense);
        int l=(int) lespoints.at(i).getZ()-zmin;
        //if the value of position equals 0, we remove this point
        if(im.getValue(l,c)==0)
        {
            lespoints.removeAt(i);
            //update after remove
            i--;
        }
    }
    //update of tracks
    this->lesRails.initialization(lespoints);
}
Exemple #8
0
cv::Mat images::ellipseCrop(QGraphicsRectItem * rect)
{
	cv::Rect myRoi(rect->rect().x(),rect->rect().y(),rect->rect().width(),rect->rect().height());
	cv::Mat croppedImage;
	cv::Point center(rect->rect().x()+rect->rect().width()/2,rect->rect().y()+rect->rect().height()/2);
	cv::Size size(rect->rect().width()/2,rect->rect().height()/2);
	cv::Mat im1(matrix.rows, matrix.cols, CV_8UC1, cv::Scalar(255,255,255));
	cv::Mat im2(matrix.rows, matrix.cols, CV_8UC1, cv::Scalar(0,0,0));
	cv::ellipse( im2, center, size, 0, 0, 360, cv::Scalar( 255, 255, 255), -1, 8 );
	cv::ellipse( im1, center, size, 0, 0, 360, cv::Scalar( 0, 0, 0), -1, 8 );
	cv::bitwise_and(matrix,im2,croppedImage);
	cv::bitwise_xor(croppedImage,im1,croppedImage);
	return croppedImage(myRoi);
}
void DisplayRenderer::render(RunData *runData, bool realDisplayData)
{
    this->runData = runData;

    QImage im(widget1->getDotSize().width(), widget1->getDotSize().height(), QImage::Format_Mono);
	im.fill(0);
    QPainter p(&im);
    p.setPen(QColor(Qt::white));
    p.setRenderHint(QPainter::TextAntialiasing, false);

	// render display 1
    int ySum=0;
    for(int n=1; n<=3; n++)
	{
        QFont font = settingsDialog->getFont(n);
        font.setStyleStrategy(QFont::NoAntialias);
        p.setFont(font);
        int y = settingsDialog->getFont(n).pointSize();
        ySum+=y;
        p.drawText(0, ySum-settingsDialog->getOffset(n), buildString(settingsDialog->getFormat(n)));
        ySum-=settingsDialog->getOffset(n);
    }
    widget1->setImage(im);
	
	QImage im1(im);		// save a copy of image 1

	// render display 2
    im.fill(0);
    ySum=0;
    for(int n=4; n<=6; n++)
	{
        QFont font = settingsDialog->getFont(n);
        font.setStyleStrategy(QFont::NoAntialias);
        p.setFont(font);
        int y = font.pointSize();
        ySum+=y;
        p.drawText(0, ySum-settingsDialog->getOffset(n), buildString(settingsDialog->getFormat(n)));
        ySum-=settingsDialog->getOffset(n);
    }
    widget2->setImage(im);
	
	if(realDisplayData && settingsDialog->getEnabled())
	{
		sendDisplayData(im1, im);
	}
}
void
Ut_UnlockNotificationSink::testEnableDisableLocking ()
{
    sink->setLockedState (true);
    gUnlockMissedEventsStub->stubReset ();

    // Test notifications
    NotificationParameters im_params;
    im_params.add (GenericNotificationParameterFactory::eventTypeKey (),
                   EVENT_IM);
    Notification im1 (200, GID, UID, im_params,
                      Notification::ApplicationEvent, -1);
    Notification im2 (200, GID, UID, im_params,
                      Notification::ApplicationEvent, -1);
    Notification im3 (200, GID, UID, im_params,
                      Notification::ApplicationEvent, -1);


    // To trigger the notification clearing...
    sink->setLockedState (false);
    QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("clearAll"), 1);

    // Try to add one notification
    sink->addNotification (im1);

    QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("addNotification"), 0);
    gUnlockMissedEventsStub->stubReset ();

    sink->setLockedState (true);

    // Try to add some notifications...
    sink->addNotification (im2);
    sink->addNotification (im3);

    QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("addNotification"), 2);
    QVERIFY (gUnlockMissedEventsStub->stubLastParameters<int> (0) 
             == (int) UnlockMissedEvents::NotifyMessage);
    gUnlockMissedEventsStub->stubReset ();

    sink->setLockedState (false);

    QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("clearAll"), 1);
}
void atWrapper::uploadDiff( QString basedir, QString dir, QString filename )
{

    qDebug() << basedir;
    QImage im1( basedir + ".baseline/" + filename );
    QImage im2( basedir + "/" + filename );

    QImage im3(im1.size(), QImage::Format_ARGB32);

    im1 = im1.convertToFormat(QImage::Format_ARGB32);
    im2 = im2.convertToFormat(QImage::Format_ARGB32);

    for ( int y=0; y<im1.height(); ++y )
    {
        uint *s = (uint *) im1.scanLine(y);
        uint *d = (uint *) im2.scanLine(y);
        uint *w = (uint *) im3.scanLine(y);

        for ( int x=0; x<im1.width(); ++x )
        {
            if (*s != *d)
                *w = 0xff000000;
            else
                *w = 0xffffffff;
        w++;
        s++;
        d++;
        }
    }

    im3.save( basedir + ".diff/" + filename ,"PNG");

    QFile file( basedir + ".diff/" + filename );
    file.open( QIODevice::ReadOnly );
    QByteArray contents = file.readAll();
    file.close();

    uploadFailed( dir + ".diff", filename, contents );

}
int main(int, char**)
{
    std::vector<std::chrono::duration<double,std::milli>> duration_vector_1;
    std::vector<std::chrono::duration<double,std::milli>> duration_vector_2;

#if SYNTHETIC_INPUT
    Halide::Buffer<uint8_t> im1(10, 10);
    Halide::Buffer<uint8_t> im2(10, 10);

    for (int i = 0; i < 10; i++)
	    for (int j = 0; j < 10; j++)
	    {
		    im1(i, j) = (uint8_t) i*i+j*j;
		    im2(i, j) = (uint8_t) i*i+j*j;
	    }
#else
    Halide::Buffer<uint8_t> im1 = Halide::Tools::load_image("./utils/images/rgb.png");
    Halide::Buffer<uint8_t> im2 = Halide::Tools::load_image("./utils/images/rgb.png");
#endif

    Halide::Buffer<float> Ix_m(im1.width(), im1.height());
    Halide::Buffer<float> Iy_m(im1.width(), im1.height());
    Halide::Buffer<float> It_m(im1.width(), im1.height());
    Halide::Buffer<int> C1(_NC);
    Halide::Buffer<int> C2(_NC);
    Halide::Buffer<int> SIZES(2);
    Halide::Buffer<int> u(_NC);
    Halide::Buffer<int> v(_NC);
    Halide::Buffer<float> A(2, 4*w*w);
    Halide::Buffer<float> tA(4*w*w, 2);
    Halide::Buffer<double> pinvA(4*w*w, 2);
    Halide::Buffer<double> det(1);
    Halide::Buffer<float> tAA(2, 2);
    Halide::Buffer<double> X(2, 2);

    SIZES(0) = im1.height();
    SIZES(1) = im1.width();
    C1(0) = 500; C2(0) = 400;
    C1(1) = 800; C2(1) = 900;
    C1(2) = 200; C2(2) = 400;
    C1(3) = 400; C2(3) = 200;
    C1(4) = 400; C2(4) = 500;
    C1(5) = 800; C2(5) = 200;
    C1(6) = 200; C2(6) = 900;
    C1(7) = 900; C2(7) = 200;

    det(0) = 0;
    init_buffer(Ix_m, (float) 0);
    init_buffer(Iy_m, (float) 0);
    init_buffer(It_m, (float) 0);
    init_buffer(A, (float) 0);
    init_buffer(tA, (float) 0);
    init_buffer(pinvA, (double) 0);
    init_buffer(tAA, (float) 0);
    init_buffer(X, (double) 0);

    // Warm up
    optical_flow_tiramisu(SIZES.raw_buffer(), im1.raw_buffer(), im2.raw_buffer(),
			  Ix_m.raw_buffer(), Iy_m.raw_buffer(), It_m.raw_buffer(),
			  C1.raw_buffer(), C2.raw_buffer(), u.raw_buffer(), v.raw_buffer(), A.raw_buffer(), pinvA.raw_buffer(), det.raw_buffer(), tAA.raw_buffer(), tA.raw_buffer(), X.raw_buffer());

    // Tiramisu
    for (int i=0; i<NB_TESTS; i++)
    {
        auto start1 = std::chrono::high_resolution_clock::now();
        optical_flow_tiramisu(SIZES.raw_buffer(), im1.raw_buffer(), im2.raw_buffer(),
			  Ix_m.raw_buffer(), Iy_m.raw_buffer(), It_m.raw_buffer(),
			  C1.raw_buffer(), C2.raw_buffer(), u.raw_buffer(), v.raw_buffer(), A.raw_buffer(), pinvA.raw_buffer(), det.raw_buffer(), tAA.raw_buffer(), tA.raw_buffer(), X.raw_buffer());
        auto end1 = std::chrono::high_resolution_clock::now();
        std::chrono::duration<double,std::milli> duration1 = end1 - start1;
        duration_vector_1.push_back(duration1);
    }

    std::cout << "Time: " << median(duration_vector_1) << std::endl;

#if SYNTHETIC_INPUT
    print_buffer(im1);
    print_buffer(im2);

    print_buffer(Ix_m);
    print_buffer(Iy_m);
    print_buffer(It_m);

    print_buffer(A);
    print_buffer(tA);
    print_buffer(tAA);
    print_buffer(det);
    print_buffer(X);
    print_buffer(pinvA);
#endif

    std::cout << "Output" << std::endl;

    print_buffer(u);
    print_buffer(v);

    return 0;
}
Exemple #13
0
MainWindow::MainWindow(QWidget *parent) :
    QMainWindow(parent),
	ui(new Ui::MainWindow)
{
    QSettings set;
    myMainWindow = this;
    localTimeOffset_ms = 0;

    ui->setupUi(this);

	showMaximized();

	dialogNewCompetition = new DialogNewCompetition(this);
	dialogGeneralSettings = new DialogGeneralSettings(this);
	dialogDisplaySettings = new DialogDisplaySettings(this);
	dialogCsvUploadCommand = new DialogCsvUploadCommand(this);
	dialogTimeStampGenerator = 0;
	dialogTimeStampGenerator2 = new DialogTimeStampGenerator2(this);
	formTimeStampList = 0;
	serverConnected = false;
    myCompetition=0;
    publisher=0;
    udpSocket = 0;


    boxStatesView = new BoxStatesView(&boxStates, 0);
    if(boxStatesView){
        ui->scrollArea_boxStates->setWidget(boxStatesView);

    }else{
        qDebug("Error on creating boxStatesView");
        qApp->exit();
    }

	connect(dialogNewCompetition, SIGNAL(accepted()), this, SLOT(newCompetition()));

	// show default images on the flipdot displays
	QImage im1(":/flipdot1_default.bmp");
	ui->flipdotDisplayWidget_1->setImage(im1);
	QImage im2(":/flipdot2_default.bmp");
	ui->flipdotDisplayWidget_2->setImage(im2);


	connect(ui->actionAboutQt, SIGNAL(triggered()), qApp, SLOT(aboutQt()));


	connect(&boxStates, SIGNAL(receivedNewTimeStamp(TimeStamp*)), this, SLOT(addNewTimeStamp(TimeStamp*)));
	
    renderer = new DisplayRenderer(ui->flipdotDisplayWidget_1,
                                   ui->flipdotDisplayWidget_2,
                                   dialogDisplaySettings, this);
    if(renderer == 0){
        qDebug("Error on creating renderer-object in MainWindow-contructor");
        qApp->exit();
    }
	publisher=new Publisher(this);
    if(publisher == 0){
        qDebug("Error on creating publisher-object in MainWindow-contructor");
        qApp->exit();
    }
	
    connect(publisher, SIGNAL(publishRunNow(RunData*)), renderer, SLOT(render(RunData*)));
	connect(renderer, SIGNAL(sendDatagram(QByteArray)), this, SLOT(sendPC2TB_Packet(QByteArray)));
	connect(dialogDisplaySettings->getRenderer(), SIGNAL(sendDatagram(QByteArray)), this, SLOT(sendPC2TB_Packet(QByteArray)));

    // show second screen (public)
    publicWidget = new PublicWidget();
    if(publisher == 0){
        qDebug("Error on creating publicWideget in MainWindow-contructor");
        qApp->exit();
    }
    QRect screenres = QApplication::desktop()->screenGeometry(1); // get coordinates of the external monitor
    publicWidget->move(QPoint(screenres.x(), screenres.y()));
    //publicWidget->showFullScreen();
	publicWidget->showMaximized();
    publicWidget->setWindowTitle(tr("Öffentlicher Monitor"));
    // use closeEvent for closing this widget

    currentFile = QFileInfo(set.value(SS_RECENT_PATH).toString() + "/"); // set current file to the recent path, with no filename
    newCompetition(); // create new competition with standard values from the dialogNewCompetition
	ui->infoscreen->clear();

	connect(ui->timeline, SIGNAL(selectionChanged(TimeStamp*)), ui->tableWidget_main, SLOT(selectRun(TimeStamp*)));
	connect(ui->tableWidget_main, SIGNAL(runSelectionChanged(RunData*)), ui->timeline, SLOT(selectRun(RunData*)));
	connect(ui->tableWidget_main, SIGNAL(selectTimeStamp(TimeStamp*)), ui->timeline, SLOT(selectTimeStamp(TimeStamp*)));
	connect(ui->timeline, SIGNAL(selectionChanged(TimeStamp*)), ui->tableWidget_timeStamps, SLOT(selectTimeStamp(TimeStamp*)));
	connect(&assignmentManager, SIGNAL(unassignedTimeStamp(TimeStamp*)), ui->tableWidget_timeStamps, SLOT(selectTimeStamp(TimeStamp*)));
	connect(ui->tableWidget_timeStamps, SIGNAL(selectionChanged(TimeStamp*)), ui->timeline, SLOT(selectTimeStamp(TimeStamp*)));
	connect(ui->tableWidget_timeStamps, SIGNAL(assign()), this, SLOT(on_pushButton_assignTime_clicked()));
	
	connect(this, SIGNAL(showFoundRun(RunData*)), ui->tableWidget_main, SLOT(selectRun(RunData*)));
	connect(this, SIGNAL(showFoundRun(RunData*)), ui->timeline, SLOT(selectRun(RunData*)));

    connect(&backupTimer, SIGNAL(timeout()), this, SLOT(backupCurrentCompetition()));

	ui->tableWidget_timeStamps->init();
	ui->tableWidget_main->regenerateTable();
	
	manualTrigger=new QShortcut(QKeySequence("CTRL+Space"), this);
	connect(manualTrigger, SIGNAL(activated()), this, SLOT(on_pushButton_manualTrigger_clicked()));
#ifndef Q_OS_WIN
	connect(&fdisk, SIGNAL(finished()), this, SLOT(writeBetweenRating()));
#endif

    setOnlineMode(false);
	
	ui->infoscreen->appendInfo(tr("Willkommen!"));

    // show welcome-screen:
    WelcomeScreen* w = new WelcomeScreen(this);
    w->show();
}
Exemple #14
0
	double Zncc::computeTpl(image::Image const& im1_, image::Image const& im2_, float const* weightMatrix)
	{
		// preconds
		JFR_PRECOND( im1_.depth() == depth, "Image 1 depth is different from the template parameter" );
		JFR_PRECOND( im2_.depth() == depth, "Image 2 depth is different from the template parameter" );
		JFR_PRECOND( im1_.channels() == im2_.channels(), "The channels number of both images are different" );
		JFR_PRECOND( !useWeightMatrix || weightMatrix, "Template parameter tells to use weightMatrix but no one is given" );
		
		// adjust ROIs to match size, assuming that it is reduced when set out of the image
		// FIXME weightMatrix should be a cv::Mat in order to have a ROI too, and to adjust it
		cv::Size size1; cv::Rect roi1 = im1_.getROI(size1);
		cv::Size size2; cv::Rect roi2 = im2_.getROI(size2);
		int dw = roi1.width - roi2.width, dh = roi1.height - roi2.height;
		if (dw != 0)
		{
			cv::Rect &roiA = (dw<0 ? roi1 : roi2), &roiB = (dw<0 ? roi2 : roi1);
			cv::Size &sizeA = (dw<0 ? size1 : size2);
			if (roiA.x == 0) { roiB.x += dw; roiB.width -= dw; } else
			if (roiA.x+roiA.width == sizeA.width) { roiB.width -= dw; }
		}
		if (dh != 0)
		{
			cv::Rect &roiA = (dh<0 ? roi1 : roi2), &roiB = (dh<0 ? roi2 : roi1);
			cv::Size &sizeA = (dh<0 ? size1 : size2);
			if (roiA.y == 0) { roiB.y += dh; roiB.height -= dh; } else
			if (roiA.y+roiA.height == sizeA.height) { roiB.height -= dh; }
		}
		image::Image im1(im1_); im1.setROI(roi1);
		image::Image im2(im2_); im2.setROI(roi2);

		// some variables initialization
		int height = im1.height();
		int width = im1.width();
		int step1 = im1.step1() - width;
		int step2 = im2.step1() - width;
		
		double mean1 = 0., mean2 = 0.;
		double sigma1 = 0., sigma2 = 0., sigma12 = 0.;
		double zncc_sum = 0.;
		double zncc_count = 0.;
		double zncc_total = 0.;
		
		worktype const* im1ptr = reinterpret_cast<worktype const*>(im1.data());
		worktype const* im2ptr = reinterpret_cast<worktype const*>(im2.data());
		
		float const* wptr = weightMatrix;
		double w;
		
		// start the loops
		for(int i = 0; i < height; ++i) 
		{
			for(int j = 0; j < width; ++j) 
			{
				worktype im1v = *(im1ptr++);
				worktype im2v = *(im2ptr++);
				if (useWeightMatrix) w = *(wptr++); else w = 1;
				if (useBornes) zncc_total += w;
				
//std::cout << "will correl ? " << useBornes << ", " << (int)im1v << ", " << (int)im2v << std::endl;
				if (!useBornes || (im1v != borneinf && im1v != bornesup && im2v != borneinf && im2v != bornesup))
				{
//std::cout << "correl one pixel" << std::endl;
#if 0
					double im1vw, im2vw;
					if (useWeightMatrix)
						{ im1vw = im1v * w; im2vw = im2v * w; } else
						{ im1vw = im1v;     im2vw = im2v;     }
					zncc_count += w;
					mean1 += im1vw;
					mean2 += im2vw;
					sigma1 += im1v * im1vw;
					sigma2 += im2v * im2vw;
					zncc_sum += im1v * im2vw;
#else
					zncc_count += w;
					mean1 += im1v * w;
					mean2 += im2v * w;
					sigma1 += im1v * im1v * w;
					sigma2 += im2v * im2v * w;
					zncc_sum += im1v * im2v * w;
#endif
				}
			}
			im1ptr += step1;
			im2ptr += step2;
		}
		
		if (useBornes) if (zncc_count / zncc_total < 0.75)
			{ /*std::cout << "zncc failed: " << zncc_count << "," << zncc_total << std::endl;*/ return -3; }
		
		// finish
		mean1 /= zncc_count;
		mean2 /= zncc_count;
		sigma1 = sigma1/zncc_count - mean1*mean1;
		sigma2 = sigma2/zncc_count - mean2*mean2;
		sigma1 = sigma1 > 0.0 ? sqrt(sigma1) : 0.0; // test for numerical rounding errors to avoid nan
		sigma2 = sigma2 > 0.0 ? sqrt(sigma2) : 0.0;
		sigma12 = sigma1*sigma2;
// std::cout << "normal: zncc_sum " << zncc_sum << ", count " << zncc_count << ", mean12 " << mean1*mean2 << ", sigma12 " << sigma1*sigma2 << std::endl;
		zncc_sum = (sigma12 < 1e-6 ? -1 : (zncc_sum/zncc_count - mean1*mean2) / sigma12);
		
		JFR_ASSERT(zncc_sum >= -1.01, "");
		return zncc_sum;
	}
Exemple #15
0
void test_matchPoints(double alpha,int winn,int winp,char * fic1,char* fic2){
  image im1(fic1);
  image comp(fic2);
  im1.matchPoints(comp,50,winn,winp,&image::zncc,true);
}
int main(int argc, char* argv[])
{
	std::vector<Mat<float> > mse;
	
	//Neural Networks settings :
	Topology topo;
	unsigned int nbrneurons = 25;
	unsigned int nbrlayer = 1;
	unsigned int nbrinput = width*height;
	unsigned int nbroutput = 10;
	
	//topo.push_back(nbrinput,NTNONE);	//input layer
	topo.push_back(nbrinput,NTSIGMOID);	//input layer
	
	//topo.push_back(nbrneurons, NTSIGMOID);
	topo.push_back(15, NTSIGMOID);
	
	//topo.push_back(nbroutput, NTSOFTMAX);	//linear output
	topo.push_back(nbroutput, NTSIGMOID);	//linear output
	
	NN<float> nn(topo);
	nn.learning = false;
	//------------------------------
	
	//DATASET SETTINGS :
	report.open(report_fn.c_str(), ios::out);
    image.open(training_image_fn.c_str(), ios::in | ios::binary); // Binary image file
    label.open(training_label_fn.c_str(), ios::in | ios::binary ); // Binary label file

	// Reading file headers
    char number;
    for (int i = 1; i <= 16; ++i) {
        image.read(&number, sizeof(char));
	}
    for (int i = 1; i <= 8; ++i) {
        label.read(&number, sizeof(char));
	}
	
	//------------------------------
	
	//checking rotation :
	Mat<float> im1(8,8, (char)1);
	Mat<float> im2( rotate(im1,PI/2.0f) );
	
	im1.afficher();
	im2.afficher();
	//--------------------------------
	
	//checking arctan !!!
	float y = -4.0f;
	float x = 4.0f;
	std::cout << arctan(y,x)*180.0f/(PI) << std::endl;
	//--------------------------------------------------
	
	//checking reading :
	char labelval = 0;
	float theta = PI/2;
	im1 = inputMNIST(labelval);
	im2 = rotate(im1,theta);
	im2.afficher();
		
	std::cout << "Rotation of : " << theta*180.0f/PI << std::endl;
	//---------------------------------------------------
	
	
	
	int iteration = 25000;
	int offx = 2;
	int offy = 2;
	int size = 28;
	int countSuccess = 0;
	
	while( iteration)
	{
		Mat<float> rotatedinput( inputMNIST(labelval) );
		//let us choose the rotation :
		float theta = ((float)(rand()%360))*PI/180.0f;
		
		//let us apply it :
		rotatedinput = extract( rotate(rotatedinput, theta), offx,offy, offx+(size-1), offy+(size-1) );
		
		Mat<float> input( reshapeV( rotatedinput ) );
		Mat<float> target( 0.0f, 10,1);
		target.set( 1.0f, labelval+1, 1);
		
		if(labelval < 9)
		{
			Mat<float> output( nn.feedForward( input) );
	
			int idmax = idmin( (-1.0f)*output).get(1,1);
	
			transpose( operatorL(target,output) ).afficher();
	
			std::cout << " LEARNING ITERATION : " << iteration << " ; IDMAX = " << idmax << std::endl;
	
	
			nn.backProp(target);
			//nn.backPropCrossEntropy(target);
			
			
			//counting :
			if(idmax == labelval+1)
			{
				countSuccess++;
			}
			
			//-------------------
			
			if( iteration % 1000 == 0)
			{
				std::cout << " TEST : " << countSuccess << " / " << 1000 << std::endl;
				mse.push_back(Mat<float>((float)countSuccess,1,1));
		
				writeInFile(std::string("./mse.txt"), mse);
		
				countSuccess = 0;
			}
			
			iteration--;
			
			
		}
		
		
		
	}
	
	std::cout << " VALIDATION TEST : in progress.." << std::endl;
	
	iteration = 1000;
	int success = 0;
	while( iteration)
	{
		Mat<float> rotatedinput( inputMNIST(labelval) );
		//let us choose the rotation :
		//float theta = rand()%360;
		float theta = ((float)(rand()%360))*PI/180.0f;
		
		//let us apply it :
		rotatedinput = extract( rotate(rotatedinput, theta), offx,offy, offx+(size-1), offy+(size-1) );
		
		Mat<float> input( reshapeV( rotatedinput ) );
		Mat<float> target( 0.0f, 10,1);
		target.set( 1.0f, labelval+1, 1);
		
		if(labelval < 5)
		{
			Mat<float> output( nn.feedForward( input));
			int idmax = idmin( (-1.0f)*output).get(1,1);
		
			transpose(output).afficher();
			std::cout << " ITERATION : " << iteration << " ; IDMAX = " << idmax << std::endl;
		
			if(idmax == labelval+1)
			{
				success++;
			}
			
			iteration--;
		}
		
	}
	
	std::cout << "VALIDATION TEST : " << success << " / 1000." << std::endl;
	
	report.close();
	label.close();
	image.close();
	
	nn.save(std::string("neuralnetworksDIGITROTATED"));
		
	return 0;
}
/** Displays the image view with its current image/ texts.
*/
void
CAImageSelector::display( bool active ) {
    CAImageView::display( active );

    CL_Rect crAll( 0,0, CA_APP->width, CA_APP->height );
    CL_Rect crImage( left,top, right, bottom );

    CA_APP->graphicContext->set_cliprect( crImage );

    // Animate:
    //
    float diff = (float)newImage - currentImage;
    float numImages2 = (float)numImages/2.0;
    bool forward = false;

    // Move forward or back?
    //
    if( (diff <  numImages2 && diff > 0.0) ||
            (diff < -numImages2) ) {
        forward = true;
    }

    // Correct frame difference:
    //
    if( diff < -numImages2 ) diff += numImages;
    if( diff >  numImages2 ) diff -= numImages;

    if( forward ) currentImage += fabs( diff ) / 6.0;
    else          currentImage -= fabs( diff ) / 6.0;

    if( currentImage< 0.0 )             currentImage+=(float)numImages;
    if( currentImage>(float)numImages ) currentImage-=(float)numImages;

    int image1 = (int)floor( currentImage );
    int image2 = (int)ceil( currentImage );
    if( image2>=numImages ) image2=0;
    float rest = currentImage-image1;
    int center;
    CL_SpriteDescription im1_desc = CL_SpriteDescription();
    CL_SpriteDescription im2_desc = CL_SpriteDescription();
    im1_desc.add_frame(image[image1].image);
    im2_desc.add_frame(image[image2].image);
    CL_Sprite im1(*CA_APP->graphicContext,im1_desc);
    CL_Sprite im2(*CA_APP->graphicContext,im2_desc);

    if( direction==Horizontal ) {
        center = (left+right)/2;
        int image1Pos = (int)(center - rest*width - image[image1].image.get_width()/2);
        im1.draw ( *CA_APP->graphicContext,image1Pos, top+barHeight);
        im2.draw ( *CA_APP->graphicContext,image1Pos + width, top+barHeight);
    } else {
        center = (top+bottom)/2;
        int image1Pos = (int)(center - rest*height - image[image1].image.get_height()/2);
        im1.draw ( *CA_APP->graphicContext,(left+right-image[image1].image.get_width())/2, image1Pos);
        im2.draw ( *CA_APP->graphicContext,(left+right-image[image1].image.get_width())/2, image1Pos + height);
    }

    displayArrows( active );

    CA_APP->graphicContext->set_cliprect( crAll );
}