/*************************************************************************************************** Metodos Autor: Alexander Gómez villa - Sebastian Guzman Obando - German Diez Valencia Descripcion: Encuentra el numero de pixeles que superan el umbral y lo comola en el entero #cambios, retorna código de error. ***************************************************************************************************/ void Simage::salt2(cv::Mat &image, int n) { for(int k=0; k<n ; k++) { //rand() is the MFC random number generator //try qrand with Qt int i= rand()%image.cols; int j= rand()%image.rows; if(image.channels()==1) { cv::Mat_<uchar> im2=image; //if the image is gray-level im2(j,i)=255; } else if(image.channels()==3) { cv::Mat_<cv::Vec3b> im2=image; //if there are 3 chanels(color image) im2(j,i)[0]=255; im2(j,i)[1]=255; im2(j,i)[2]=255; } } }
void test_drawMatchPoints(double alpha,int winn,int winp,char * fic1,char* fic2,char * fic_s){ image im1(fic1); image im2(fic2); image* sortie=im1.drawMatchPoints(im2,50,winn,winp,&image::zncc,true); sortie->EcrireImagePGM(fic_s); }
Image ProjectExporter::getBestIconForSize (int size, bool returnNullIfNothingBigEnough) const { Drawable* im = nullptr; ScopedPointer<Drawable> im1 (getSmallIcon()); ScopedPointer<Drawable> im2 (getBigIcon()); if (im1 != nullptr && im2 != nullptr) { if (im1->getWidth() >= size && im2->getWidth() >= size) im = im1->getWidth() < im2->getWidth() ? im1 : im2; else if (im1->getWidth() >= size) im = im1; else if (im2->getWidth() >= size) im = im2; } else { im = im1 != nullptr ? im1 : im2; } if (im == nullptr) return Image(); if (returnNullIfNothingBigEnough && im->getWidth() < size && im->getHeight() < size) return Image(); return rescaleImageForIcon (*im, size); }
// compute distances from rows of inmat1 to rows of inmat2 RcppExport SEXP rthpdist(SEXP inmat1,SEXP inmat2, SEXP nthreads) { Rcpp::NumericMatrix im1(inmat1); Rcpp::NumericMatrix im2(inmat2); int nr1 = im1.nrow(); int nc = im1.ncol(); int nr2 = im2.nrow(); #if RTH_OMP omp_set_num_threads(INT(nthreads)); #elif RTH_TBB // tbb::task_scheduler_init init(INT(nthreads)); // for unknown reasons, this code does not work under TBB return Rcpp::wrap(-1); #endif thrust::device_vector<double> dmat1(im1.begin(),im1.end()); thrust::device_vector<double> dmat2(im2.begin(),im2.end()); // make space for the output thrust::device_vector<double> ddst(nr1*nr2); // iterators for row number of inmat1 thrust::counting_iterator<int> iseqb(0); thrust::counting_iterator<int> iseqe = iseqb + nr1; // for each i in [iseqb,iseqe) find the distances from row i in inmat1 // to all rows of inmat2 thrust::for_each(iseqb,iseqe, do1ival(dmat1.begin(),dmat2.begin(),ddst.begin(),nr1,nc,nr2)); Rcpp::NumericMatrix rout(nr1,nr2); thrust::copy(ddst.begin(),ddst.end(),rout.begin()); return rout; }
void ScaleSpaceImage::show(std::string prefix, std::string processor, unsigned int image_number, bool debug) { static int image_nr = 0; cv::Mat to_write; std::string image_number_string; if (image_number < 10) { image_number_string = "0"; } image_number_string += std::to_string(image_number); cv::imwrite(prefix + "_" + processor + image_number_string + "_original.bmp", input); for (unsigned int i = 0; i < nr_scales; ++i) { std::string s; s = prefix + "_" + processor + "_"; s += image_number_string; s += "_"; if (i < 10) { s += "0"; } s += std::to_string(i); if (debug) { std::string ds1 = s + "_1.bmp"; std::string ds1txt = s + "_1.txt"; std::ofstream im1(ds1txt); scale_space_images[0][i].convertTo(to_write, CV_8UC1, 255.0); cv::imwrite(ds1.c_str(), to_write); im1 << scale_space_images[0][i]; if (scale_space_images.size() > 1) { std::string ds2 = s + "_2.bmp"; std::string ds2txt = s + "_2.txt"; std::ofstream im2(ds2txt); scale_space_images[1][i].convertTo(to_write, CV_8UC1, 255.0); cv::imwrite(ds2.c_str(), to_write); im2 << scale_space_images[1][i]; } } s += ".bmp"; if (gaussian) { scale_space_images[0][i].convertTo(to_write, CV_8UC1, 255.0); } else { to_write = output[i]; } cv::imwrite(s.c_str(), to_write); } image_nr++; }
void testDblDepth(int winn,int winp,char * fic1,char * fic2,char * ficSortie){ image im1(fic1); image im2(fic2); image* imSortie=im1.dblMatchProfPoints(im2,winn,winp,&image::ssd,false); imSortie->recadre(0,255); imSortie->EcrireImagePGM(ficSortie); }
/** * Test that for Jpeg files that use the JFIF colorspace, they are * directly embedded into the PDF (without re-encoding) when that * makes sense. */ DEF_TEST(PDFJpegEmbedTest, r) { const char test[] = "PDFJpegEmbedTest"; SkAutoTUnref<SkData> mandrillData( load_resource(r, test, "mandrill_512_q075.jpg")); SkAutoTUnref<SkData> cmykData(load_resource(r, test, "CMYK.jpg")); if (!mandrillData || !cmykData) { return; } //////////////////////////////////////////////////////////////////////////// SkDynamicMemoryWStream pdf; SkAutoTUnref<SkDocument> document(SkDocument::CreatePDF(&pdf)); SkCanvas* canvas = document->beginPage(642, 1028); canvas->clear(SK_ColorLTGRAY); SkBitmap bm1(bitmap_from_data(mandrillData)); canvas->drawBitmap(bm1, 65.0, 0.0, nullptr); SkBitmap bm2(bitmap_from_data(cmykData)); canvas->drawBitmap(bm2, 0.0, 512.0, nullptr); canvas->flush(); document->endPage(); document->close(); SkAutoTUnref<SkData> pdfData(pdf.copyToData()); SkASSERT(pdfData); pdf.reset(); REPORTER_ASSERT(r, is_subset_of(mandrillData, pdfData)); // This JPEG uses a nonstandard colorspace - it can not be // embedded into the PDF directly. REPORTER_ASSERT(r, !is_subset_of(cmykData, pdfData)); //////////////////////////////////////////////////////////////////////////// pdf.reset(); document.reset(SkDocument::CreatePDF(&pdf)); canvas = document->beginPage(642, 1028); canvas->clear(SK_ColorLTGRAY); SkAutoTUnref<SkImage> im1(SkImage::NewFromEncoded(mandrillData)); canvas->drawImage(im1, 65.0, 0.0, nullptr); SkAutoTUnref<SkImage> im2(SkImage::NewFromEncoded(cmykData)); canvas->drawImage(im2, 0.0, 512.0, nullptr); canvas->flush(); document->endPage(); document->close(); pdfData.reset(pdf.copyToData()); SkASSERT(pdfData); pdf.reset(); REPORTER_ASSERT(r, is_subset_of(mandrillData, pdfData)); // This JPEG uses a nonstandard colorspace - it can not be // embedded into the PDF directly. REPORTER_ASSERT(r, !is_subset_of(cmykData, pdfData)); }
FireflyOptimizator::FireflyOptimizator( QString imagePath, QString imageXml, float threshold, int MaxGenerations, int PopulationSize, float gamma) : MaxGenerations(MaxGenerations), PopulationSize(PopulationSize), imagePath(imagePath) , imageXml(imageXml), gamma(gamma),threshold(threshold) { SegmentedImage im2("/Users/zulli/Documents/city/sun_abnjbjjzwfckjhyx.jpg","/Users/zulli/Documents/city/sun_abnjbjjzwfckjhyx.xml"); listaCor = vector<Vec3b*>(0); groundTruth = Segmentation(im2,threshold,listaCor); //regions = *groundTruth.getRegions(); init(); }
// This is an extra version of the function // to illustrate the use of cv::Mat_ // works only for a 1-channel image void salt2(cv::Mat image, int n) { // use image with a Mat_ template cv::Mat_<uchar> im2(image); // or with references: // cv::Mat_<uchar>& im2= reinterpret_cast<cv::Mat_<uchar>&>(image); int i,j; for (int k=0; k<n; k++) { // rand() is the MFC random number generator i= rand()%image.cols; j= rand()%image.rows; if (im2.type() == CV_8UC1) { // gray-level image im2(j,i)= 255; } } }
cv::Mat images::ellipseCrop(QGraphicsRectItem * rect) { cv::Rect myRoi(rect->rect().x(),rect->rect().y(),rect->rect().width(),rect->rect().height()); cv::Mat croppedImage; cv::Point center(rect->rect().x()+rect->rect().width()/2,rect->rect().y()+rect->rect().height()/2); cv::Size size(rect->rect().width()/2,rect->rect().height()/2); cv::Mat im1(matrix.rows, matrix.cols, CV_8UC1, cv::Scalar(255,255,255)); cv::Mat im2(matrix.rows, matrix.cols, CV_8UC1, cv::Scalar(0,0,0)); cv::ellipse( im2, center, size, 0, 0, 360, cv::Scalar( 255, 255, 255), -1, 8 ); cv::ellipse( im1, center, size, 0, 0, 360, cv::Scalar( 0, 0, 0), -1, 8 ); cv::bitwise_and(matrix,im2,croppedImage); cv::bitwise_xor(croppedImage,im1,croppedImage); return croppedImage(myRoi); }
void SBConvolve::SBConvolveImpl::fillKImage(ImageView<std::complex<double> > im, double kx0, double dkx, double dkxy, double ky0, double dky, double dkyx) const { dbg<<"SBConvolve fillKImage\n"; dbg<<"kx = "<<kx0<<" + i * "<<dkx<<" + j * "<<dkxy<<std::endl; dbg<<"ky = "<<ky0<<" + i * "<<dkyx<<" + j * "<<dky<<std::endl; ConstIter pptr = _plist.begin(); assert(pptr != _plist.end()); GetImpl(*pptr)->fillKImage(im,kx0,dkx,dkxy,ky0,dky,dkyx); if (++pptr != _plist.end()) { ImageAlloc<std::complex<double> > im2(im.getBounds()); for (; pptr != _plist.end(); ++pptr) { GetImpl(*pptr)->fillKImage(im2.view(),kx0,dkx,dkxy,ky0,dky,dkyx); im *= im2; } } }
void Ut_UnlockNotificationSink::testEnableDisableLocking () { sink->setLockedState (true); gUnlockMissedEventsStub->stubReset (); // Test notifications NotificationParameters im_params; im_params.add (GenericNotificationParameterFactory::eventTypeKey (), EVENT_IM); Notification im1 (200, GID, UID, im_params, Notification::ApplicationEvent, -1); Notification im2 (200, GID, UID, im_params, Notification::ApplicationEvent, -1); Notification im3 (200, GID, UID, im_params, Notification::ApplicationEvent, -1); // To trigger the notification clearing... sink->setLockedState (false); QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("clearAll"), 1); // Try to add one notification sink->addNotification (im1); QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("addNotification"), 0); gUnlockMissedEventsStub->stubReset (); sink->setLockedState (true); // Try to add some notifications... sink->addNotification (im2); sink->addNotification (im3); QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("addNotification"), 2); QVERIFY (gUnlockMissedEventsStub->stubLastParameters<int> (0) == (int) UnlockMissedEvents::NotifyMessage); gUnlockMissedEventsStub->stubReset (); sink->setLockedState (false); QCOMPARE (gUnlockMissedEventsStub->stubCallCount ("clearAll"), 1); }
void atWrapper::uploadDiff( QString basedir, QString dir, QString filename ) { qDebug() << basedir; QImage im1( basedir + ".baseline/" + filename ); QImage im2( basedir + "/" + filename ); QImage im3(im1.size(), QImage::Format_ARGB32); im1 = im1.convertToFormat(QImage::Format_ARGB32); im2 = im2.convertToFormat(QImage::Format_ARGB32); for ( int y=0; y<im1.height(); ++y ) { uint *s = (uint *) im1.scanLine(y); uint *d = (uint *) im2.scanLine(y); uint *w = (uint *) im3.scanLine(y); for ( int x=0; x<im1.width(); ++x ) { if (*s != *d) *w = 0xff000000; else *w = 0xffffffff; w++; s++; d++; } } im3.save( basedir + ".diff/" + filename ,"PNG"); QFile file( basedir + ".diff/" + filename ); file.open( QIODevice::ReadOnly ); QByteArray contents = file.readAll(); file.close(); uploadFailed( dir + ".diff", filename, contents ); }
void writeDataCosts(MRF::CostVal *dataCostArray, int width, int height, int nLabels) { CShape sh(width, height, 1); CIntImage im(sh); CByteImage im2(sh); for (int l = 0; l < nLabels; l++) { int n = l; int vmax = 0; for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { int v = dataCostArray[n]; n += nLabels; if (v > vmax) vmax = v; im.Pixel(x, y, 0) = v; } } ScaleAndOffset(im, im2, (float)(255.0/vmax), 0); char fname[1000]; sprintf(fname, "datacost%d.png", l); WriteImageVerb(im2, fname, 1); } }
int main(int, char**) { std::vector<std::chrono::duration<double,std::milli>> duration_vector_1; std::vector<std::chrono::duration<double,std::milli>> duration_vector_2; #if SYNTHETIC_INPUT Halide::Buffer<uint8_t> im1(10, 10); Halide::Buffer<uint8_t> im2(10, 10); for (int i = 0; i < 10; i++) for (int j = 0; j < 10; j++) { im1(i, j) = (uint8_t) i*i+j*j; im2(i, j) = (uint8_t) i*i+j*j; } #else Halide::Buffer<uint8_t> im1 = Halide::Tools::load_image("./utils/images/rgb.png"); Halide::Buffer<uint8_t> im2 = Halide::Tools::load_image("./utils/images/rgb.png"); #endif Halide::Buffer<float> Ix_m(im1.width(), im1.height()); Halide::Buffer<float> Iy_m(im1.width(), im1.height()); Halide::Buffer<float> It_m(im1.width(), im1.height()); Halide::Buffer<int> C1(_NC); Halide::Buffer<int> C2(_NC); Halide::Buffer<int> SIZES(2); Halide::Buffer<int> u(_NC); Halide::Buffer<int> v(_NC); Halide::Buffer<float> A(2, 4*w*w); Halide::Buffer<float> tA(4*w*w, 2); Halide::Buffer<double> pinvA(4*w*w, 2); Halide::Buffer<double> det(1); Halide::Buffer<float> tAA(2, 2); Halide::Buffer<double> X(2, 2); SIZES(0) = im1.height(); SIZES(1) = im1.width(); C1(0) = 500; C2(0) = 400; C1(1) = 800; C2(1) = 900; C1(2) = 200; C2(2) = 400; C1(3) = 400; C2(3) = 200; C1(4) = 400; C2(4) = 500; C1(5) = 800; C2(5) = 200; C1(6) = 200; C2(6) = 900; C1(7) = 900; C2(7) = 200; det(0) = 0; init_buffer(Ix_m, (float) 0); init_buffer(Iy_m, (float) 0); init_buffer(It_m, (float) 0); init_buffer(A, (float) 0); init_buffer(tA, (float) 0); init_buffer(pinvA, (double) 0); init_buffer(tAA, (float) 0); init_buffer(X, (double) 0); // Warm up optical_flow_tiramisu(SIZES.raw_buffer(), im1.raw_buffer(), im2.raw_buffer(), Ix_m.raw_buffer(), Iy_m.raw_buffer(), It_m.raw_buffer(), C1.raw_buffer(), C2.raw_buffer(), u.raw_buffer(), v.raw_buffer(), A.raw_buffer(), pinvA.raw_buffer(), det.raw_buffer(), tAA.raw_buffer(), tA.raw_buffer(), X.raw_buffer()); // Tiramisu for (int i=0; i<NB_TESTS; i++) { auto start1 = std::chrono::high_resolution_clock::now(); optical_flow_tiramisu(SIZES.raw_buffer(), im1.raw_buffer(), im2.raw_buffer(), Ix_m.raw_buffer(), Iy_m.raw_buffer(), It_m.raw_buffer(), C1.raw_buffer(), C2.raw_buffer(), u.raw_buffer(), v.raw_buffer(), A.raw_buffer(), pinvA.raw_buffer(), det.raw_buffer(), tAA.raw_buffer(), tA.raw_buffer(), X.raw_buffer()); auto end1 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double,std::milli> duration1 = end1 - start1; duration_vector_1.push_back(duration1); } std::cout << "Time: " << median(duration_vector_1) << std::endl; #if SYNTHETIC_INPUT print_buffer(im1); print_buffer(im2); print_buffer(Ix_m); print_buffer(Iy_m); print_buffer(It_m); print_buffer(A); print_buffer(tA); print_buffer(tAA); print_buffer(det); print_buffer(X); print_buffer(pinvA); #endif std::cout << "Output" << std::endl; print_buffer(u); print_buffer(v); return 0; }
void presentation(){ auto next_button = hwlib::target::pin_in(hwlib::target::pins::d2); Timer t; // matrix led(64, 32, true, target::pins::d43, target::pins::d42, target::pins::d39, // target::pins::d25, target::pins::d26, target::pins::d27, target::pins::d28, // target::pins::d33, target::pins::d34, target::pins::d35, target::pins::d36, // target::pins::d37, target::pins::d38); matrix led(64, 32, true); led.start(); /* Colors with diffrent brightness for (int y=0; y<16; y++){ for (int x=0; x<32; x++){ led.drawPixel(x, y, 0, 35,0); } } for (int y=0; y<16; y++){ for (int x=32; x<64; x++){ led.drawPixel(x, y, 0, 75,0); } } for (int y=16; y<32; y++){ for (int x=0; x<32; x++){ led.drawPixel(x, y, 0, 255,0); } } for (int y=16; y<32; y++){ for (int x=32; x<64; x++){ led.drawPixel(x, y, 0, 150,0); } } led.swap_buffer(false); while (next_button.get()){ } */ led.clear(); Image im(led, simpson, vector(15, 0)); im.draw(); led.swap_buffer(false); while (next_button.get()){ } t.delayMilliseconds(1000); led.clear(); AnimatedImage a_im(led, animation, vector(15,1)); while (next_button.get()){ a_im.draw(5, false); } t.delayMilliseconds(1000); led.clear(); Image im2(led, hu, vector(48, 0)); im2.draw(); String title(led, tahoma10, (char*)"hu.nl", 0xFA0000, vector(0, 10)); title.draw(); const char *text = "De Hogeschool van Utrecht ontstond in 1988 in de stad en provincie Utrecht."; String str(led, tahoma10, (char*)text, 0x878BFF, vector(0, 28)); while (next_button.get()){ str.scroll(15, false); } t.delayMilliseconds(1000); }
double Explorer<Correl>::exploreTranslation(image::Image const& im1, image::Image const& im2_, int xmin, int xmax, int xstep, int ymin, int ymax, int ystep, double &xres, double &yres, float const* weightMatrix) { cv::Rect roi = im1.getROI(); // image::Image im2(im2_, cv::Rect(0,0,im2_.width(),im2_.height())); image::Image im2(im2_); double score; double best_score = -1.; int bestx = -1, besty = -1; if (xmin < 0) xmin = 0; if (xmax >= im2.width ()) xmax = im2.width ()-1; if (ymin < 0) ymin = 0; if (ymax >= im2.height()) ymax = im2.height()-1; int sa_w = (xmax-xmin+1), sa_h = (ymax-ymin+1); // search area if (sa_w < 5) xstep = 1; if (sa_h < 5) ystep = 1; int nresults = (sa_w+2)*(sa_h+2); double *results = new double[nresults]; // add 1 border for interpolation for(int i = 0; i < nresults; i++) results[i] = -1e6; // explore for(int y = ymin; y <= ymax; y += ystep) for(int x = xmin; x <= xmax; x += xstep) DO_CORRELATION(im1, im2, weightMatrix, x, y, score, best_score, bestx, besty, roi); // refine // JFR_DEBUG("refine (" << bestx << "," << besty << " " << best_score << ")"); // TODO refine several local maxima // TODO refine by dichotomy for large steps ? int newbestx = bestx, newbesty = besty; for(int y = besty-ystep+1; y <= besty+ystep-1; y++) for(int x = bestx-xstep+1; x <= bestx+xstep-1; x++) { if (x == bestx && y == besty) continue; DO_CORRELATION(im1, im2, weightMatrix, x, y, score, best_score, newbestx, newbesty, roi); } // ensure that all values that will be used by interpolation are computed int newnewbestx = newbestx, newnewbesty = newbesty; /* if (((newbestx == bestx-xstep+1 || newbestx == bestx+xstep-1) && (newbesty-ymin)%ystep) || ((newbesty == besty-ystep+1 || newbesty == besty+ystep-1) && (newbestx-xmin)%xstep)) { if (newbestx == bestx-xstep+1) DO_CORRELATION(im1, im2, weightMatrix, newbestx-1, newbesty, score, best_score, newnewbestx, newnewbesty, roi); if (newbestx == bestx+xstep-1) DO_CORRELATION(im1, im2, weightMatrix, newbestx+1, newbesty, score, best_score, newnewbestx, newnewbesty, roi); if (newbesty == besty-ystep+1) DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty-1, score, best_score, newnewbestx, newnewbesty, roi); if (newbesty == besty+ystep-1) DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty+1, score, best_score, newnewbestx, newnewbesty, roi); }*/ // JFR_DEBUG("extra interpol (" << newbestx << "," << newbesty << " " << best_score << ")"); do { newbestx = newnewbestx, newbesty = newnewbesty; if (newbestx>0 && RESULTS(newbesty,newbestx-1)<-1e5) DO_CORRELATION(im1, im2, weightMatrix, newbestx-1, newbesty, score, best_score, newnewbestx, newnewbesty, roi); if (newbestx<im2.width()-1 && RESULTS(newbesty,newbestx+1)<-1e5) DO_CORRELATION(im1, im2, weightMatrix, newbestx+1, newbesty, score, best_score, newnewbestx, newnewbesty, roi); if (newbesty>0 && RESULTS(newbesty-1,newbestx)<-1e5) DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty-1, score, best_score, newnewbestx, newnewbesty, roi); if (newbesty<im2.height()-1 && RESULTS(newbesty+1,newbestx)<-1e5) DO_CORRELATION(im1, im2, weightMatrix, newbestx, newbesty+1, score, best_score, newnewbestx, newnewbesty, roi); } while (newbestx != newnewbestx || newbesty != newnewbesty); // FIXME this could go out of bounds // JFR_DEBUG("final : " << newnewbestx << "," << newnewbesty << " " << best_score); bestx = newbestx; besty = newbesty; // TODO interpolate the score as well // interpolate x double a1 = RESULTS(besty,bestx-1), a2 = RESULTS(besty,bestx-0), a3 = RESULTS(besty,bestx+1); if (a1 > -1e5 && a3 > -1e5) jmath::parabolicInterpolation(a1,a2,a3, xres); else xres = 0; // JFR_DEBUG("interpolating " << a1 << " " << a2 << " " << a3 << " gives shift " << xres << " plus " << bestx+0.5); xres += bestx+0.5; // interpolate y a1 = RESULTS(besty-1,bestx), a2 = RESULTS(besty-0,bestx), a3 = RESULTS(besty+1,bestx); if (a1 > -1e5 && a3 > -1e5) jmath::parabolicInterpolation(a1,a2,a3, yres); else yres = 0; // JFR_DEBUG("interpolating " << a1 << " " << a2 << " " << a3 << " gives shift " << yres << " plus " << besty+0.5); yres += besty+0.5; delete[] results; return best_score; }
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent), ui(new Ui::MainWindow) { QSettings set; myMainWindow = this; localTimeOffset_ms = 0; ui->setupUi(this); showMaximized(); dialogNewCompetition = new DialogNewCompetition(this); dialogGeneralSettings = new DialogGeneralSettings(this); dialogDisplaySettings = new DialogDisplaySettings(this); dialogCsvUploadCommand = new DialogCsvUploadCommand(this); dialogTimeStampGenerator = 0; dialogTimeStampGenerator2 = new DialogTimeStampGenerator2(this); formTimeStampList = 0; serverConnected = false; myCompetition=0; publisher=0; udpSocket = 0; boxStatesView = new BoxStatesView(&boxStates, 0); if(boxStatesView){ ui->scrollArea_boxStates->setWidget(boxStatesView); }else{ qDebug("Error on creating boxStatesView"); qApp->exit(); } connect(dialogNewCompetition, SIGNAL(accepted()), this, SLOT(newCompetition())); // show default images on the flipdot displays QImage im1(":/flipdot1_default.bmp"); ui->flipdotDisplayWidget_1->setImage(im1); QImage im2(":/flipdot2_default.bmp"); ui->flipdotDisplayWidget_2->setImage(im2); connect(ui->actionAboutQt, SIGNAL(triggered()), qApp, SLOT(aboutQt())); connect(&boxStates, SIGNAL(receivedNewTimeStamp(TimeStamp*)), this, SLOT(addNewTimeStamp(TimeStamp*))); renderer = new DisplayRenderer(ui->flipdotDisplayWidget_1, ui->flipdotDisplayWidget_2, dialogDisplaySettings, this); if(renderer == 0){ qDebug("Error on creating renderer-object in MainWindow-contructor"); qApp->exit(); } publisher=new Publisher(this); if(publisher == 0){ qDebug("Error on creating publisher-object in MainWindow-contructor"); qApp->exit(); } connect(publisher, SIGNAL(publishRunNow(RunData*)), renderer, SLOT(render(RunData*))); connect(renderer, SIGNAL(sendDatagram(QByteArray)), this, SLOT(sendPC2TB_Packet(QByteArray))); connect(dialogDisplaySettings->getRenderer(), SIGNAL(sendDatagram(QByteArray)), this, SLOT(sendPC2TB_Packet(QByteArray))); // show second screen (public) publicWidget = new PublicWidget(); if(publisher == 0){ qDebug("Error on creating publicWideget in MainWindow-contructor"); qApp->exit(); } QRect screenres = QApplication::desktop()->screenGeometry(1); // get coordinates of the external monitor publicWidget->move(QPoint(screenres.x(), screenres.y())); //publicWidget->showFullScreen(); publicWidget->showMaximized(); publicWidget->setWindowTitle(tr("Öffentlicher Monitor")); // use closeEvent for closing this widget currentFile = QFileInfo(set.value(SS_RECENT_PATH).toString() + "/"); // set current file to the recent path, with no filename newCompetition(); // create new competition with standard values from the dialogNewCompetition ui->infoscreen->clear(); connect(ui->timeline, SIGNAL(selectionChanged(TimeStamp*)), ui->tableWidget_main, SLOT(selectRun(TimeStamp*))); connect(ui->tableWidget_main, SIGNAL(runSelectionChanged(RunData*)), ui->timeline, SLOT(selectRun(RunData*))); connect(ui->tableWidget_main, SIGNAL(selectTimeStamp(TimeStamp*)), ui->timeline, SLOT(selectTimeStamp(TimeStamp*))); connect(ui->timeline, SIGNAL(selectionChanged(TimeStamp*)), ui->tableWidget_timeStamps, SLOT(selectTimeStamp(TimeStamp*))); connect(&assignmentManager, SIGNAL(unassignedTimeStamp(TimeStamp*)), ui->tableWidget_timeStamps, SLOT(selectTimeStamp(TimeStamp*))); connect(ui->tableWidget_timeStamps, SIGNAL(selectionChanged(TimeStamp*)), ui->timeline, SLOT(selectTimeStamp(TimeStamp*))); connect(ui->tableWidget_timeStamps, SIGNAL(assign()), this, SLOT(on_pushButton_assignTime_clicked())); connect(this, SIGNAL(showFoundRun(RunData*)), ui->tableWidget_main, SLOT(selectRun(RunData*))); connect(this, SIGNAL(showFoundRun(RunData*)), ui->timeline, SLOT(selectRun(RunData*))); connect(&backupTimer, SIGNAL(timeout()), this, SLOT(backupCurrentCompetition())); ui->tableWidget_timeStamps->init(); ui->tableWidget_main->regenerateTable(); manualTrigger=new QShortcut(QKeySequence("CTRL+Space"), this); connect(manualTrigger, SIGNAL(activated()), this, SLOT(on_pushButton_manualTrigger_clicked())); #ifndef Q_OS_WIN connect(&fdisk, SIGNAL(finished()), this, SLOT(writeBetweenRating())); #endif setOnlineMode(false); ui->infoscreen->appendInfo(tr("Willkommen!")); // show welcome-screen: WelcomeScreen* w = new WelcomeScreen(this); w->show(); }
double Zncc::computeTpl(image::Image const& im1_, image::Image const& im2_, float const* weightMatrix) { // preconds JFR_PRECOND( im1_.depth() == depth, "Image 1 depth is different from the template parameter" ); JFR_PRECOND( im2_.depth() == depth, "Image 2 depth is different from the template parameter" ); JFR_PRECOND( im1_.channels() == im2_.channels(), "The channels number of both images are different" ); JFR_PRECOND( !useWeightMatrix || weightMatrix, "Template parameter tells to use weightMatrix but no one is given" ); // adjust ROIs to match size, assuming that it is reduced when set out of the image // FIXME weightMatrix should be a cv::Mat in order to have a ROI too, and to adjust it cv::Size size1; cv::Rect roi1 = im1_.getROI(size1); cv::Size size2; cv::Rect roi2 = im2_.getROI(size2); int dw = roi1.width - roi2.width, dh = roi1.height - roi2.height; if (dw != 0) { cv::Rect &roiA = (dw<0 ? roi1 : roi2), &roiB = (dw<0 ? roi2 : roi1); cv::Size &sizeA = (dw<0 ? size1 : size2); if (roiA.x == 0) { roiB.x += dw; roiB.width -= dw; } else if (roiA.x+roiA.width == sizeA.width) { roiB.width -= dw; } } if (dh != 0) { cv::Rect &roiA = (dh<0 ? roi1 : roi2), &roiB = (dh<0 ? roi2 : roi1); cv::Size &sizeA = (dh<0 ? size1 : size2); if (roiA.y == 0) { roiB.y += dh; roiB.height -= dh; } else if (roiA.y+roiA.height == sizeA.height) { roiB.height -= dh; } } image::Image im1(im1_); im1.setROI(roi1); image::Image im2(im2_); im2.setROI(roi2); // some variables initialization int height = im1.height(); int width = im1.width(); int step1 = im1.step1() - width; int step2 = im2.step1() - width; double mean1 = 0., mean2 = 0.; double sigma1 = 0., sigma2 = 0., sigma12 = 0.; double zncc_sum = 0.; double zncc_count = 0.; double zncc_total = 0.; worktype const* im1ptr = reinterpret_cast<worktype const*>(im1.data()); worktype const* im2ptr = reinterpret_cast<worktype const*>(im2.data()); float const* wptr = weightMatrix; double w; // start the loops for(int i = 0; i < height; ++i) { for(int j = 0; j < width; ++j) { worktype im1v = *(im1ptr++); worktype im2v = *(im2ptr++); if (useWeightMatrix) w = *(wptr++); else w = 1; if (useBornes) zncc_total += w; //std::cout << "will correl ? " << useBornes << ", " << (int)im1v << ", " << (int)im2v << std::endl; if (!useBornes || (im1v != borneinf && im1v != bornesup && im2v != borneinf && im2v != bornesup)) { //std::cout << "correl one pixel" << std::endl; #if 0 double im1vw, im2vw; if (useWeightMatrix) { im1vw = im1v * w; im2vw = im2v * w; } else { im1vw = im1v; im2vw = im2v; } zncc_count += w; mean1 += im1vw; mean2 += im2vw; sigma1 += im1v * im1vw; sigma2 += im2v * im2vw; zncc_sum += im1v * im2vw; #else zncc_count += w; mean1 += im1v * w; mean2 += im2v * w; sigma1 += im1v * im1v * w; sigma2 += im2v * im2v * w; zncc_sum += im1v * im2v * w; #endif } } im1ptr += step1; im2ptr += step2; } if (useBornes) if (zncc_count / zncc_total < 0.75) { /*std::cout << "zncc failed: " << zncc_count << "," << zncc_total << std::endl;*/ return -3; } // finish mean1 /= zncc_count; mean2 /= zncc_count; sigma1 = sigma1/zncc_count - mean1*mean1; sigma2 = sigma2/zncc_count - mean2*mean2; sigma1 = sigma1 > 0.0 ? sqrt(sigma1) : 0.0; // test for numerical rounding errors to avoid nan sigma2 = sigma2 > 0.0 ? sqrt(sigma2) : 0.0; sigma12 = sigma1*sigma2; // std::cout << "normal: zncc_sum " << zncc_sum << ", count " << zncc_count << ", mean12 " << mean1*mean2 << ", sigma12 " << sigma1*sigma2 << std::endl; zncc_sum = (sigma12 < 1e-6 ? -1 : (zncc_sum/zncc_count - mean1*mean2) / sigma12); JFR_ASSERT(zncc_sum >= -1.01, ""); return zncc_sum; }
CHECK(im == im); CHECK_FALSE(im == im2); CHECK_FALSE(im2 == im3); CHECK(im < im3); CHECK_FALSE(im < im2); // Check that width is correct CHECK(im.width() == 4); CHECK(im2.width() == 4); // Check that height is correct CHECK(im.height() == 4); CHECK(im2.height() == 4); CHECK(im(0,0) == 0); CHECK(im2(0,0) == 0); im2(0,0) = 1; CHECK(im2(0,0) == 1); im2.set(514); CHECK(im2(0,0) == 514); CHECK(im2(1,1) == 514); // Check that size is correct CHECK(im.size() == 32); CHECK(im2.size() == 32); // Check that row_size is correct CHECK(im.row_size() == 8); CHECK(im2.row_size() == 8); // Check that get_premultiplied is correct
int main(int argc, char* argv[]) { std::vector<Mat<float> > mse; //Neural Networks settings : Topology topo; unsigned int nbrneurons = 25; unsigned int nbrlayer = 1; unsigned int nbrinput = width*height; unsigned int nbroutput = 10; //topo.push_back(nbrinput,NTNONE); //input layer topo.push_back(nbrinput,NTSIGMOID); //input layer //topo.push_back(nbrneurons, NTSIGMOID); topo.push_back(15, NTSIGMOID); //topo.push_back(nbroutput, NTSOFTMAX); //linear output topo.push_back(nbroutput, NTSIGMOID); //linear output NN<float> nn(topo); nn.learning = false; //------------------------------ //DATASET SETTINGS : report.open(report_fn.c_str(), ios::out); image.open(training_image_fn.c_str(), ios::in | ios::binary); // Binary image file label.open(training_label_fn.c_str(), ios::in | ios::binary ); // Binary label file // Reading file headers char number; for (int i = 1; i <= 16; ++i) { image.read(&number, sizeof(char)); } for (int i = 1; i <= 8; ++i) { label.read(&number, sizeof(char)); } //------------------------------ //checking rotation : Mat<float> im1(8,8, (char)1); Mat<float> im2( rotate(im1,PI/2.0f) ); im1.afficher(); im2.afficher(); //-------------------------------- //checking arctan !!! float y = -4.0f; float x = 4.0f; std::cout << arctan(y,x)*180.0f/(PI) << std::endl; //-------------------------------------------------- //checking reading : char labelval = 0; float theta = PI/2; im1 = inputMNIST(labelval); im2 = rotate(im1,theta); im2.afficher(); std::cout << "Rotation of : " << theta*180.0f/PI << std::endl; //--------------------------------------------------- int iteration = 25000; int offx = 2; int offy = 2; int size = 28; int countSuccess = 0; while( iteration) { Mat<float> rotatedinput( inputMNIST(labelval) ); //let us choose the rotation : float theta = ((float)(rand()%360))*PI/180.0f; //let us apply it : rotatedinput = extract( rotate(rotatedinput, theta), offx,offy, offx+(size-1), offy+(size-1) ); Mat<float> input( reshapeV( rotatedinput ) ); Mat<float> target( 0.0f, 10,1); target.set( 1.0f, labelval+1, 1); if(labelval < 9) { Mat<float> output( nn.feedForward( input) ); int idmax = idmin( (-1.0f)*output).get(1,1); transpose( operatorL(target,output) ).afficher(); std::cout << " LEARNING ITERATION : " << iteration << " ; IDMAX = " << idmax << std::endl; nn.backProp(target); //nn.backPropCrossEntropy(target); //counting : if(idmax == labelval+1) { countSuccess++; } //------------------- if( iteration % 1000 == 0) { std::cout << " TEST : " << countSuccess << " / " << 1000 << std::endl; mse.push_back(Mat<float>((float)countSuccess,1,1)); writeInFile(std::string("./mse.txt"), mse); countSuccess = 0; } iteration--; } } std::cout << " VALIDATION TEST : in progress.." << std::endl; iteration = 1000; int success = 0; while( iteration) { Mat<float> rotatedinput( inputMNIST(labelval) ); //let us choose the rotation : //float theta = rand()%360; float theta = ((float)(rand()%360))*PI/180.0f; //let us apply it : rotatedinput = extract( rotate(rotatedinput, theta), offx,offy, offx+(size-1), offy+(size-1) ); Mat<float> input( reshapeV( rotatedinput ) ); Mat<float> target( 0.0f, 10,1); target.set( 1.0f, labelval+1, 1); if(labelval < 5) { Mat<float> output( nn.feedForward( input)); int idmax = idmin( (-1.0f)*output).get(1,1); transpose(output).afficher(); std::cout << " ITERATION : " << iteration << " ; IDMAX = " << idmax << std::endl; if(idmax == labelval+1) { success++; } iteration--; } } std::cout << "VALIDATION TEST : " << success << " / 1000." << std::endl; report.close(); label.close(); image.close(); nn.save(std::string("neuralnetworksDIGITROTATED")); return 0; }
/** Displays the image view with its current image/ texts. */ void CAImageSelector::display( bool active ) { CAImageView::display( active ); CL_Rect crAll( 0,0, CA_APP->width, CA_APP->height ); CL_Rect crImage( left,top, right, bottom ); CA_APP->graphicContext->set_cliprect( crImage ); // Animate: // float diff = (float)newImage - currentImage; float numImages2 = (float)numImages/2.0; bool forward = false; // Move forward or back? // if( (diff < numImages2 && diff > 0.0) || (diff < -numImages2) ) { forward = true; } // Correct frame difference: // if( diff < -numImages2 ) diff += numImages; if( diff > numImages2 ) diff -= numImages; if( forward ) currentImage += fabs( diff ) / 6.0; else currentImage -= fabs( diff ) / 6.0; if( currentImage< 0.0 ) currentImage+=(float)numImages; if( currentImage>(float)numImages ) currentImage-=(float)numImages; int image1 = (int)floor( currentImage ); int image2 = (int)ceil( currentImage ); if( image2>=numImages ) image2=0; float rest = currentImage-image1; int center; CL_SpriteDescription im1_desc = CL_SpriteDescription(); CL_SpriteDescription im2_desc = CL_SpriteDescription(); im1_desc.add_frame(image[image1].image); im2_desc.add_frame(image[image2].image); CL_Sprite im1(*CA_APP->graphicContext,im1_desc); CL_Sprite im2(*CA_APP->graphicContext,im2_desc); if( direction==Horizontal ) { center = (left+right)/2; int image1Pos = (int)(center - rest*width - image[image1].image.get_width()/2); im1.draw ( *CA_APP->graphicContext,image1Pos, top+barHeight); im2.draw ( *CA_APP->graphicContext,image1Pos + width, top+barHeight); } else { center = (top+bottom)/2; int image1Pos = (int)(center - rest*height - image[image1].image.get_height()/2); im1.draw ( *CA_APP->graphicContext,(left+right-image[image1].image.get_width())/2, image1Pos); im2.draw ( *CA_APP->graphicContext,(left+right-image[image1].image.get_width())/2, image1Pos + height); } displayArrows( active ); CA_APP->graphicContext->set_cliprect( crAll ); }