void trainTree(string treeFile, string trainDir) { SerializeHelper sHelp = SerializeHelper(); ImageReader imReader = ImageReader(); vector<Mat> depthImages = imReader.readDepthImages(trainDir); vector<Mat> classifiedImages = imReader.readClassifiedImages(trainDir); int times = clock(); // 7 classes, 15 deep, 200 features, 50 thresholds, 0.02 subsampling, 1 minnuminnode, 10 background penalty, feature range, threshold range Forest forest = Forest(6, 15, 500, 100, 0.05, 10, 0, pair<double, double>(150, 150), pair<double, double>(-255,255)); // 500 image per tree. Three made at once. forest.makeTrees(depthImages, classifiedImages, 150, 3); int timed = clock(); cout << "Making trees took "<< (timed-times) <<" ticks.\n"<< endl; //Forest forest = sHelp.loadForest("MediumTree100F1000.txt"); sHelp.serializeForest(forest, treeFile); string graphvix = forest.getTrees().at(0)->graphvizPrint(-1, NULL); }
std::vector<float> ShapeEstimation::computePixelLuminance(ImageReader imageIn, ImageReader mask, float &sigma){ int rows = imageIn.getImageHeight(); int cols = imageIn.getImageWidth(); std::vector<float> pixelLuminances; int pixelsInObject = 0; float objectLuminanceSum = 0.0f; for(int row = 0; row < rows; row++){ for(int col = 0; col < cols; col++){ QColor imageColor = QColor(imageIn.pixelAt(row, col)); QColor maskColor = QColor(mask.pixelAt(row, col)); if((maskColor.red() > 150)){ pixelsInObject += 1; float luminance = 0.213f * float(imageColor.red()) + 0.715f * float(imageColor.green()) + 0.072f * float(imageColor.blue()); if(luminance == 0.0f){ luminance = 0.0001f; } pixelLuminances.push_back(luminance/ 255.0f); objectLuminanceSum += log(luminance / 255.0f); } else { if(DEPTHMAPBACKGROUND){ pixelLuminances.push_back(1.0f); } else { pixelLuminances.push_back(0.0f); } } } } sigma = exp(objectLuminanceSum / float(pixelsInObject)); return pixelLuminances; }
void Retexture::calculateTexture(std::vector<Vector3f> T, std::vector<Vector3f> background, std::vector<Vector3f> image, std::vector<float> deltaX, std::vector<float> deltaY, std::vector<Vector3f> &result, ImageReader mask) { int width = mask.getImageWidth(); std::cout << width << std::endl; float cmaxAverage = 0.0f; for (int i = 0; i < T.size(); i++) { int x = i % width; int y = i / width; if(QColor(mask.pixelAt(y,x)).red() < 150) { // Black part of the mask, don't do anything result.push_back(background[i]); continue; } int t_x = fmod(float(x) + m_s*deltaX[i], width); int t_y = fmod(float(y) + m_s*deltaY[i], mask.getImageHeight()); Vector3f t_value = T[t_y*width + t_x]; Vector3f resultValue = (1.f - m_f) * t_value + m_f * image[i]; float resultAverage = (resultValue[0] + resultValue[1] + resultValue[2])/3.0f; if(cmaxAverage < resultAverage){ cmaxAverage = resultAverage; } result.push_back(resultValue); } }
void LoadTextureAMT::processCPU_Texture2D(const RJNode& header) { if(ji::hasChild(header, "images")) { const RJNode& images = ji::getChild(header, "images"); if(images.is_array() && images.size() > 0) { m_target = GL_TEXTURE_2D; const RJNode& img = images.at(0); std::string imageFilePath = ji::read(img, "filePath", DV_STRING); // Fail loading if given extension is not supported or file is invalid. ImageReader* reader = getImageFileByExtension(imageFilePath); if(!reader || !reader->isValid()) { utils::deleteAndNull(reader); return; } m_imageReaders[0] = reader; m_imageGLFormat = reader->getOpenGLInternalFormat(); } } }
/** * prepShowImage - Save normalized image to be viewable for modification * * @param filename Name of the file to normalize */ extern "C" VALUE method_prepShowImage(VALUE self, VALUE rubyfilename, VALUE rubyoutname) { std::string strfname( StringValueCStr( rubyfilename ) ); std::string stroutname( StringValueCStr( rubyoutname) ); ImageReader imr; imr.prepShowImage(strfname, stroutname); return self; }
/** * Entry point to the application * * @param argc Number of program arguments * @param argv Array of program arguments * * @return 0 on success, non-zero on failure */ int main(int argc, char* argv[]) { if (argc != 3) { cout << "Usage: " << argv[0] << " [searchTerm] [numImages]" << endl; return -1; } char search[] = "-s"; char* searchTerm = argv[1]; char num[] = "-n"; char* numImages = argv[2]; char* flickrFlags[] = { argv[0], search, searchTerm, num, numImages }; Flickr flickr; if (flickr.init(5, flickrFlags) < 0) { cout << "Error initializing flickr!" << endl; return -1; } list<string> urlBuf; if (flickr.gather_images(urlBuf) < 0) { cout << "Error querying Flickr for images!" << endl; return -1; } ImageReader ir; TileImage tile; for (list<string>::iterator i = urlBuf.begin(); i != urlBuf.end(); i++) { string url = *i; string fileName = safeFilename(url); if (fileExists(fileName)) { cout << "Exists: " << fileName << endl; continue; } if (ir.read_tile_image(url, tile) < 0) { cout << "Error reading url: " << url << endl; continue; } try { Magick::Image image(tile.get_magick()); image.write(fileName); } catch (Magick::Exception ex) { cout << "Error saving file: " << fileName << endl; continue; } cout << "Saved: " << fileName << endl; } return 0; }
// plik BMP sk³ada siê z nag³ówka (header) i kolorów // w nag³ówku przechowywane s¹ informacje np. o rozmiarze obrazka // ale nie tylko. Nag³ówek BMP ma sta³y rozmiar i standardow¹ strutkurê // dziêki temu wiemy, ¿e na danej pozycji nag³ówka mamy zapisany rozmiar obrazka // wiêcej o pliku BMP tutaj: // http://www.dragonwins.com/domains/getteched/bmp/bmpfileformat.htm // http://www.fileformat.info/format/bmp/egff.htm // https://en.wikipedia.org/wiki/BMP_file_format void Image::read_image(string filepath) { // jak skorzystaæ z gotowego kodu wczytywania obrazka // ta linijka ni¿ej wczytuje obrazek ImageReader* imagereader = new ImageReader(filepath); // pozosta³e linijki to robi¹ to czego potrzebujemy // czyli g³ównie kopiuj¹ nag³ówek i kolory do odpowiednich miejsc // w obiekcie klasy Image width = imagereader->get_width(); height = imagereader->get_height(); headersize = imagereader->get_header_size(); header = new char[headersize]; // Skopiowanie nag³ówka (poniewa¿ nie chcemy nadpisywaæ oryginalnego obrazka) for ( int i = 0; i < headersize; i++ ) { header[i] = imagereader->get_header_byte(i); } // Skopiwanie kolorów do obiektu klasy Pixel image = new Pixel*[height]; // dynamiczna tablica pikseli for ( int i = 0; i < height; i++ ) { image[i] = new Pixel[width]; // dynamiczna tablica pikseli for ( int j = 0; j < width; j++ ) { image[i][j].set_red(imagereader->get_data_byte(i,j,ImageReader::RED)); image[i][j].set_blue(imagereader->get_data_byte(i,j,ImageReader::BLUE)); image[i][j].set_green(imagereader->get_data_byte(i,j,ImageReader::GREEN)); } } delete imagereader; }
void LoadTextureAMT::processCPU_CubeMap(const RJNode& header) { if(ji::hasChild(header, "images")) { const RJNode& images = ji::getChild(header, "images"); if(images.is_array() && images.size() > 0) { m_target = GL_TEXTURE_CUBE_MAP; int index = 0; for(const RJNode& img : images) { std::string targetStr = ji::read(img, "target", DV_STRING); ETextureTarget target = textureTarget::fromString(targetStr); std::string filePath = ji::read(img, "filePath", DV_STRING); ImageReader* reader = getImageFileByExtension(filePath); if(!reader || !reader->isValid()) { utils::deleteAndNull(reader); } switch(target) { case ETextureTarget::CubeMapNegativeX: m_imageReaders[0] = reader; break; case ETextureTarget::CubeMapPositiveX: m_imageReaders[1] = reader; break; case ETextureTarget::CubeMapNegativeY: m_imageReaders[2] = reader; break; case ETextureTarget::CubeMapPositiveY: m_imageReaders[3] = reader; break; case ETextureTarget::CubeMapNegativeZ: m_imageReaders[4] = reader; break; case ETextureTarget::CubeMapPositiveZ: m_imageReaders[5] = reader; break; default: assert(0); break; } } } } }
/** * method_readFiles - main in method for Imgproc. Reads and returns results * * @param self ruby-required module (not included in ruby method call) * @param rubyfilenames The ruby-formatted string array of filenames * (including extension and path!!!) * @param rubynumQ ruby-formatted number of questions on test * @param rubyReadname ruby bool value to determine if name to be read * */ extern "C" VALUE method_readFiles(VALUE self, VALUE rubyfilenames, VALUE rubynumQ, VALUE rubyReadname) { int numQ = NUM2INT( rubynumQ ); int numFiles = int( RARRAY_LEN( rubyfilenames ) ); bool readName = RTEST( rubyReadname ); // c-style array of filenames std::vector<std::string> filenames; for( long i = 0; i < numFiles; i++ ) { VALUE rubyfn = rb_ary_entry(rubyfilenames,i); std::string strfname( StringValueCStr( rubyfn ) ); filenames.push_back( strfname ); } ImageReader imr; std::vector< std::vector< std::vector< float > > > results( numFiles ); // asynchronize image reads //ResGroup group; //vector<const ResThread::ResultValue*> results; vector< vector< float > > res; for( int i = 0; i < numFiles; i++ ) { //group.addThread( filenames[i], numQ, readName ); res = imr.readImage( filenames[i], numQ, readName ); results[i] = res; } //group.join(); //group.getResults( results ); assert( numFiles == int(results.size()) ); VALUE rbStudents = rb_ary_new(); // Go through each for each student for( int i = 0; i < numFiles; i++ ) { // Go through each for each student's answers VALUE rbStudentAnswers = rb_ary_new(); int sz = int( results[i].size() ); for( int k = 0; k < sz; k++ ) { int wsize = int( (results[i])[k].size() ); VALUE rbStudentAnswerComponents = rb_ary_new(); for( int w = 0; w < wsize; w++ ) { VALUE ansDouble = DBL2NUM( (results[i])[k][w] ); rb_ary_push( rbStudentAnswerComponents, ansDouble ); } rb_ary_push( rbStudentAnswers, rbStudentAnswerComponents ); } rb_ary_push( rbStudents, rbStudentAnswers ); } return rbStudents; }
int main(int argc, const char *argv[]) { const std::string str = "~/Desktop/brain/brain_001.dcm" ; ImageReader reader ; reader.SetFileName(str.c_str()) ; Image &image = reader.GetImage() ; unsigned int ndim = image.GetNumberOfDimensions() ; std::cout << ndim << std::endl ; if(!reader.Read()) { std::cerr << "Error Bitch" << std::endl ; return 1 ; } return 0; }
static PyObject * image_read(PyObject *self,PyObject *args) { PyObject *pyim; PyObject *pyFP; int file_type; if(!PyArg_ParseTuple(args,"OOi",&pyim,&pyFP,&file_type)) { return NULL; } if(!PyFile_Check(pyFP)) { return NULL; } image *i = (image *)PyCObject_AsVoidPtr(pyim); FILE *fp = PyFile_AsFile(pyFP); if(!fp || !i) { PyErr_SetString(PyExc_ValueError, "Bad arguments"); return NULL; } ImageReader *reader = ImageReader::create((image_file_t)file_type, fp, i); //if(!reader->ok()) //{ // PyErr_SetString(PyExc_IOError, "Couldn't create image reader"); // delete reader; // return NULL; //} if(!reader->read()) { PyErr_SetString(PyExc_IOError, "Couldn't read image contents"); delete reader; return NULL; } delete reader; Py_INCREF(Py_None); return Py_None; }
void imageLoader::run(){ mutex.lock(); while( image ){ //Load data QString filepath = file; auto loading = std::move( image ); mutex.unlock(); emit image_fetched(); ImageReader reader; //TODO: initialize in constructor? reader.read( *loading, filepath ); emit image_loaded( loading.get() ); mutex.lock(); //Make sure to lock it again, as wee need it at the while loop check } mutex.unlock(); //Make sure to lock it when the while loop exits }
/** Function responsible to Prepare the ROM and ROFS image SID data @internalComponent @released @param ImgVsExeStatus - Global integrated container which contains image, exes and attribute value status. */ void SidChecker::Check(ImgVsExeStatus& aImgVsExeStatus) { ImageReaderPtrList::iterator begin = iImageReaderList.begin(); ImageReaderPtrList::iterator end = iImageReaderList.end(); ExeVsIdDataMap::iterator exeBegin; ExeVsIdDataMap::iterator exeEnd; ExeVsIdDataMap exeVsIdDataMap; ImageReader* imageReader = KNull; String imageName; while(begin != end) { imageReader = *begin; imageName = imageReader->ImageName(); ExceptionReporter(GATHERINGIDDATA, (char*)KSid.c_str(),(char*)imageName.c_str()).Log(); imageReader->PrepareExeVsIdMap(); exeVsIdDataMap = imageReader->GetExeVsIdMap(); exeBegin = exeVsIdDataMap.begin(); exeEnd = exeVsIdDataMap.end(); if((aImgVsExeStatus[imageName].size() == 0) || (aImgVsExeStatus[imageName][exeBegin->first].iIdData == KNull)) { while(exeBegin != exeEnd) { if(!iSidAll) { if(ReaderUtil::IsExe(&exeBegin->second->iUid)) { iSidVsExeMap.insert(std::make_pair(exeBegin->second->iSid, exeBegin->first)); } } else { iSidVsExeMap.insert(std::make_pair(exeBegin->second->iSid, exeBegin->first)); } aImgVsExeStatus[imageName][exeBegin->first].iIdData = exeBegin->second; aImgVsExeStatus[imageName][exeBegin->first].iExeName = exeBegin->first; ++exeBegin; } } ++begin; } }
void ShapeEstimation::cropMask(ImageReader mask, std::vector<float> &pixelLuminances){ int rows = mask.getImageHeight(); int cols = mask.getImageWidth(); for(int row = 0; row < rows; row++){ for(int col = 0; col < cols; col++){ QColor maskColor = QColor(mask.pixelAt(row, col)); int index = mask.indexAt(row, col); if(maskColor.red() > 150){ } else { if(DEPTHMAPBACKGROUND){ pixelLuminances[index] = 1.0f; } else { pixelLuminances[index] = 0.0f; } } } } }
void runPrediction(string treeFile, string testDir, bool writeToFile, string outputFileName) { SerializeHelper sHelp = SerializeHelper(); Forest forest = sHelp.loadForest(treeFile); string graphvix = forest.getTrees().at(0)->graphvizPrint(-1, NULL); ofstream graphvizFile("graphvizForest.txt"); graphvizFile << graphvix; ImageReader imReader = ImageReader(); vector<Mat> testDepthImages = imReader.readTrainingImages(testDir); for(int k=0; k < testDepthImages.size(); k++) { Mat classified = forest.classifyImage(testDepthImages.at(k)); std::ostringstream path; path << outputFileName << "/" << k+1 << "Y.png"; string windowName = path.str(); //namedWindow( windowName, WINDOW_AUTOSIZE ); //Mat cimg = convertToColorForBaby(classified); if(writeToFile) { //imwrite(windowName, cimg); imwrite(windowName, classified); } //imshow(windowName, cimg); //imshow(windowName, classified); waitKey(30); } }
void loadImages(void) { ImageReader myReader; //------------------------------------------------------------- // We read the left image, right image, and disparity map //------------------------------------------------------------- myReader.setSource(const_cast<char*>(leftImagePath.c_str())); gMyLeftImage = myReader.getFrame(false); // left image myReader.setSource(const_cast<char*>(righttImagePath.c_str())); gMyRightImage = myReader.getFrame(false); // right image // Normally, I should be able to read the disparity map directly into my // DepthMap's raster. Then I could render the map as a gray-level image // in one subwindow and draw it as a mesh in an other. However, at this // point, the "readInto" component of the ImageReader class has not been // implemented, so I had to separate the two operations. myReader.setSource(const_cast<char*>(disparityImagePath.c_str())); gMyDisparity = static_cast<RasterImage_gray*>(myReader.getFrame(false)); // disparity map // initially we display the right image in the upper-right subwindow gUpperRightDisplay = gMyRightImage; }
bool ImageData::load(ImageReader &reader) { if (reader.error()) return false; resize(reader.width(), reader.height(), reader.pixelBytes()); Log::debug(" Height: %d Width: %d Bpp: %d\n", width, height, bpp); /* * Copy the row data to the image buffer in reverse Y order, suitable * for texture upload. */ unsigned char *ptr = &pixels[bpp * width * (height - 1)]; while (reader.nextRow(ptr)) ptr -= bpp * width; return !reader.error(); }
int ForceRGBCommand::run(const char** args, unsigned int numArgs) { if ( numArgs < 2 ) { fprintf(stderr, "Usage: ImageTool forcergb <input> <output> [-filequality 0-100] [-pad N,N,N]\n"); fprintf(stderr, "\te.g. ImageTool forcergb input.jpg output.jpg\n"); return IMAGECORE_INVALID_USAGE; } int ret = open(args[0], args[1]); if (ret != IMAGECORE_SUCCESS) { return ret; } // Defaults unsigned int compressionQuality = 75; // Optional args unsigned int numOptional = numArgs - 2; if ( numOptional > 0 ) { unsigned int numPairs = numOptional / 2; for ( unsigned int i = 0; i < numPairs; i++ ) { const char* argName = args[2 + i * 2 + 0]; const char* argValue = args[2 + i * 2 + 1]; if( strcmp(argName, "-filequality") == 0 ) { compressionQuality = clamp(0, 100, atoi(argValue)); } else if( strcmp(argName, "-pad") == 0 ) { int ret = populateBuckets(argValue); if (ret != IMAGECORE_SUCCESS) { return ret; } } } } ImageReader* reader = ImageReader::create(m_Source); if( reader == NULL ) { fprintf(stderr, "error: unknown or corrupt image format for '%s'\n", m_InputFilePath); return IMAGECORE_INVALID_FORMAT; } EImageFormat outputFormat = ImageWriter::formatFromExtension(args[1], reader->getFormat()); unsigned int colorProfileSize = 0; reader->getColorProfile(colorProfileSize); if( colorProfileSize != 0 && reader->getFormat() == kImageFormat_JPEG ) { reader->setReadOptions(ImageReader::kReadOption_ApplyColorProfile); ImageRGBA* image = ImageRGBA::create(reader->getWidth(), reader->getHeight()); if( reader->readImage(image) ) { ImageWriter* writer = ImageWriter::createWithFormat(kImageFormat_JPEG, m_Output); if (writer == NULL) { fprintf(stderr, "error: unable to create ImageWriter\n"); return IMAGECORE_OUT_OF_MEMORY; } writer->setWriteOptions(ImageWriter::kWriteOption_WriteDefaultColorProfile); writer->setSourceReader(reader); writer->setQuality(compressionQuality); if( !writer->writeImage(image) ) { ret = IMAGECORE_WRITE_ERROR; } delete writer; } else { fprintf(stderr, "error unable to read input image"); ret = IMAGECORE_READ_ERROR; } delete image; } else { ImageWriter* imageWriter = ImageWriter::createWithFormat(outputFormat, m_Output); unsigned int writeOptions = 0; writeOptions |= ImageWriter::kWriteOption_WriteExifOrientation; writeOptions |= ImageWriter::kWriteOption_WriteDefaultColorProfile; if( imageWriter != NULL ) { imageWriter->setWriteOptions(writeOptions); if( imageWriter->copyLossless(reader) ) { ret = IMAGECORE_SUCCESS; } else { fprintf(stderr, "error: unable to perform lossless copy.\n"); ret = IMAGECORE_INVALID_OPERATION; } delete imageWriter; } } delete reader; reader = NULL; close(); return ret; }
void Retexture::calculateMixedMaterial(std::vector<Vector3f> glass,std::vector<Vector3f> notGlass, std::vector<Vector3f> background, std::vector<Vector3f> image, std::vector<float> deltaX, std::vector<float> deltaY, std::vector<Vector3f> &result, ImageReader mask, ImageReader materialMask, ImageReader glassColors) { int width = mask.getImageWidth(); std::cout << width << std::endl; float cmaxAverage = 0.0f; std::vector<Vector3f> T = glass; for (int i = 0; i < T.size(); i++) { int x = i % width; int y = i / width; if(QColor(mask.pixelAt(y,x)).red() < 150) { // Black part of the mask, don't do anything // if image isn't 0, push back image. result.push_back(background[i]); continue; } if(QColor(materialMask.pixelAt(y,x)).red() < 50 && QColor(materialMask.pixelAt(y,x)).blue() < 50 && QColor(materialMask.pixelAt(y,x)).green() < 50){ T = notGlass; m_s = 20.0f; } else { T = glass; m_s = 50.0f; } int t_x = fmod(float(x) + m_s*deltaX[i], width); int t_y = fmod(float(y) + m_s*deltaY[i], mask.getImageHeight()); Vector3f t_value = T[t_y*width + t_x]; Vector3f resultValue = (1.f - m_f) * t_value + m_f * image[i]; float resultAverage = (resultValue[0] + resultValue[1] + resultValue[2])/3.0f; if(cmaxAverage < resultAverage){ cmaxAverage = resultAverage; } result.push_back(resultValue); } std::cout << cmaxAverage << std::endl; T = glass; for (int i = 0; i < T.size(); i++) { int x = i % width; int y = i / width; if(QColor(mask.pixelAt(y,x)).red() < 150) { // Black part of the mask, don't do anything continue; } if(QColor(materialMask.pixelAt(y,x)).red() < 50 && QColor(materialMask.pixelAt(y,x)).blue() < 50 && QColor(materialMask.pixelAt(y,x)).green() < 50) { // Black part of the mask, don't do anything continue; } QColor stainedGlass = QColor(glassColors.pixelAt(y,x)); Vector3f darkness = Vector3f(2.0f,2.0f,2.0f); // Vector3f color = Vector3f(float(stainedGlass.red())/255.0f,float(stainedGlass.green())/255.0f,float(stainedGlass.blue())/255.0f); Vector3f color = Vector3f(1.0f,1.0f,1.0f); Vector3f resultValue = result[i]; resultValue[0] = fmin(pow((resultValue[0] * color[0]/ cmaxAverage) , darkness[0]) * 255.0f,255.0f); resultValue[1] = fmin(pow((resultValue[1] * color[1]/ cmaxAverage) , darkness[1]) * 255.0f,255.0f); resultValue[2] = fmin(pow((resultValue[2] * color[2]/ cmaxAverage) , darkness[2]) * 255.0f, 255.0f); result[i] = resultValue; } }
int _tmain(int argc, _TCHAR* argv[]) { ImageReader imgReader; imgReader.LoadFile("roadresult.tif"); //cv::Mat img, image; //try { //image = cv::imread("roadresult.tif", CV_LOAD_IMAGE_UNCHANGED ); //} //catch (cv::Exception e) { // cout << e.err << std::endl ; //} //cv::cvtColor(image, img, CV_RGB2GRAY); //int iType = img.type(); int iType = imgReader.m_data.depth(); if(! imgReader.m_data.data ) // Check for invalid input { cout << "Could not open or find the image" << std::endl ; return -1; } //cv::Mat src; //imgReader.m_data.convertTo(src, CV_8U); //cv::Mat src_gray, threshold_output; // need 3or4channel //cv::cvtColor(src, src_gray, CV_BGR2GRAY); //cv::blur(imgReader.m_data, src_gray, Size(1, 1)); std::vector<vector<Point>> contours; vector<Vec4i> hierarchy; // 8uc1 32fc1 //threshold(imgReader.m_data, imgReader.m_data, 100, 255, THRESH_BINARY); // need 8uc1, 32sc1 //int iii = CV_32SC1; //cv::Mat helpframe2; //imgReader.m_data.convertTo(helpframe2, CV_32S); //iii = helpframe2.depth(); findContours(imgReader.m_data, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); vector<vector<Point>> contours_poly(contours.size()); vector<Rect> boundRect(contours.size()); //vector<Point2f> center(contours.size()); //vector<float> radius(contours.size()); vector<MyStruct> angle_rect; for (int i = 0; i < (int)contours.size(); i++) { approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 3, true); boundRect[i] = boundingRect(cv::Mat(contours_poly[i])); //minEnclosingCircle((Mat)contours_poly[i], center[i], radius[i]); MyStruct ms; if (contours_poly[i].size() > 1) { Point tl, tr, bl, br; tl = boundRect[i].tl(); tr = tl; tr.x = tl.x + boundRect[i].width - 1; br = boundRect[i].br(); br.x -= 1; br.y -= 1; bl = br; bl.x = br.x - boundRect[i].width + 1; vector<Point>::iterator iter1 = std::find(contours_poly[i].begin(), contours_poly[i].end(), tr); vector<Point>::iterator iter2 = std::find(contours_poly[i].begin(), contours_poly[i].end(), bl); if (iter1 != contours_poly[i].end() || iter2 != contours_poly[i].end()) { ms.type = 1; ms.rc = boundRect[i]; } else { ms.type = 0; ms.rc = boundRect[i]; } } else { ms.rc = boundRect[i]; ms.type = 0; } angle_rect.push_back(ms); } Mat drawing = Mat::zeros(imgReader.m_data.size(), CV_8UC3); RNG rng(12345); for (int i = 0; i < (int)contours.size(); i++) { Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() ); //rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 1, 8, 0 ); //circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 ); } // remove the big ones std::vector<MyStruct> calcRect; for (std::vector<MyStruct>::iterator iter = angle_rect.begin(); iter != angle_rect.end(); iter++) { MyStruct ms = *iter; bool bValid = true; for(int i = 0; i < (int)angle_rect.size(); i++) { if (ms.rc == angle_rect[i].rc) continue; if ((ms.rc & angle_rect[i].rc) == angle_rect[i].rc) // intersenction is boundRect[i], so boundRect is inside rc, rc is invalid { bValid = false; break; } } if (bValid) { calcRect.push_back(ms); } } //for (int i = 0; i < (int)calcRect.size(); i++) //{ // Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); // //drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() ); // rectangle( drawing, calcRect[i].tl(), calcRect[i].br(), color, 1, 8, 0 ); // //circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 ); //} float angle_threshold = 3.0; int postion_threshold = 100; // sort rect std::sort(calcRect.begin(), calcRect.end(), compareMyStruct); // calc //std::vector<Rect> campRect; for (int j = 0; j < (int)calcRect.size(); j++) { for(int i = 0; i < (int)calcRect.size(); i++) { if (calcRect[j].rc == calcRect[i].rc) { continue; } if (!isRectPositionValid(calcRect[j].rc, calcRect[i].rc, postion_threshold) || !isRectAngleValid(calcRect[j], calcRect[i], angle_threshold, false)) continue; // merge together MyStruct ms_merge; ms_merge.type = calcRect[j].type; ms_merge.rc = calcRect[j].rc | calcRect[i].rc; // if angle valid if (!isRectAngleValid(calcRect[j], ms_merge, angle_threshold)) continue; //cout << "lalala" << endl; //campRect.push_back(calcRect[i]); calcRect[i] = calcRect[j] = ms_merge; } /// } //std::sort(calcRect.begin(), calcRect.end(), compareRect); for (int i = 0; i < (int)calcRect.size(); i++) { Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); //drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() ); rectangle( drawing, calcRect[i].rc.tl(), calcRect[i].rc.br(), color, 1, 8, 0 ); //circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 ); } //TRACE("%s, %d, %.2f", "what ever", 3, 5.2); /// Show in a window namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); imshow( "Contours", drawing ); cv::imwrite("bound_3.tif", drawing); //namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display. //imshow( "Display window", imgReader.m_data ); // Show our image inside it. waitKey(0); return 0; }
//Constructor of the class ImageAcquition class, current directory ImageAcquition::ImageAcquition(ImageReader& imagerdr) : imageName(imagerdr.getImageName(false)), imageLocation(imagerdr.getImageLoc()) { //nothing to be done here }
ImageReader::ImageReader(const ImageReader & x) { setNative(x.getNative()); }
void ShapeEstimation::estimateShape(ImageReader imageIn, ImageReader mask, std::vector<float>& depthMap, std::vector<Vector3f>& normalMap, std::vector<float> &gradientX, std::vector<float> &gradientY){ BilateralFilter bf; int rows = imageIn.getImageHeight(); int cols = imageIn.getImageWidth(); float sigma = 0.0f; std::vector<float> luminances = computePixelLuminance(imageIn, mask, sigma); m_luminances = luminances; sigmoidalCompression(luminances, sigma); std::cout << rows << " " << cols << std::endl; float bilateralSigmaSpatial = m_bilateralSmoothing * float(cols); float bilateralSigmaL = 255.0f; std::cout << "convolve" << std::endl; luminances = bf.convolve(imageIn, luminances, bilateralSigmaSpatial, bilateralSigmaL); std::cout << "inversion" << std::endl; sigmoidalInversion(luminances, sigma); //cropMask(mask, luminances); std::vector<Vector3f> normals = gradientField(mask, luminances, gradientX, gradientY); if(DEBUG){ QImage output(cols, rows, QImage::Format_RGB32); QRgb *depthMap = reinterpret_cast<QRgb *>(output.bits()); for(int i = 0; i < rows; i++){ for(int j = 0; j < cols; j++){ int index = imageIn.indexAt(i, j); float color = luminances[index] * 255.0f; QColor colorOut = QColor(floor(color), floor(color), floor(color)); depthMap[imageIn.indexAt(i, j)] = colorOut.rgb(); } } output.save("images/depthMap.png"); } // write out normal map if(DEBUG){ float maxRed = 0.0f; float maxGreen = 0.0f; float minRed = 10000.0f; float minGreen = 10000.0f; for(int i = 0; i < rows; i++){ for(int j = 0; j < cols; j++){ int index = imageIn.indexAt(i, j); float red = normals[index](0); if(red > maxRed){ maxRed = red; } if(red < minRed){ minRed = red; } float green = normals[index](1) ; if(green > maxGreen){ maxGreen = green; } if(green < minGreen){ minGreen = green; } } } QImage output(cols, rows, QImage::Format_RGB32); QRgb *normalMap = reinterpret_cast<QRgb *>(output.bits()); for(int i = 0; i < rows; i++){ for(int j = 0; j < cols; j++){ int index = mask.indexAt(i, j); QColor colorOut = QColor(0,0,0); if(QColor(mask.pixelAt(i,j)).red() > 150){ float red = (255) * (normals[index](0) - minRed)/(maxRed - minRed); float green = (255) * (normals[index](1) - minGreen)/(maxGreen - minGreen); float blue = normals[index](2) * 255.0f; colorOut = QColor(fabs(floor(red)), fabs(floor(green)), floor(blue)); } normalMap[mask.indexAt(i, j)] = colorOut.rgb(); } } output.save("images/normalmap.png"); } depthMap = luminances; normalMap = normals; }
C2DTextureResource::C2DTextureResource(const std::string &filePath, const int flags) : CTextureResource(filePath) { GLenum iFormat; int levels; //uint32_t size; ImageReader *img; m_textureId = 0; m_target = GL_TEXTURE_2D; m_type = ETT_TEX2D; // Fail loading if given extension is not supported. if((img = getImageFileByExtension(filePath)) == 0) { return; } // Fail loading if loaded file was invalid. if(!img->isValid()) { return; } iFormat = img->getOpenGLInternalFormat(flags); glActiveTexture(GL_TEXTURE0); glGenTextures(1, &m_textureId); glBindTexture(m_target, m_textureId); if(flags & ETF_MIPMAPS) { levels = img->getLevelAmount(); if(levels == 1) { // If need mipmaps and image has only base level, set number of mipmap levels to generate levels = gmath::imageLevelCount(img->getWidth(0), img->getHeight(0), 1); } } else { levels = 1; } glTexStorage2D(m_target, levels, iFormat, img->getWidth(0), img->getHeight(0)); for(int i = 0; i < img->getLevelAmount(); i++) { if(img->isCompressedFormat()) { glCompressedTexSubImage2D(m_target, i, 0, 0, img->getWidth(i), img->getHeight(i), img->getFormat(), img->getLevelSize(i), img->getData(i)); } else { glTexSubImage2D(m_target, i, 0, 0, img->getWidth(i), img->getHeight(i), img->getFormat(), GL_UNSIGNED_BYTE, img->getData(i)); } } glTexParameteri(m_target, GL_TEXTURE_BASE_LEVEL, 0); if((img->getLevelAmount() == 1) && (flags & ETF_MIPMAPS)) { glGenerateMipmap(m_target); } if(flags & ETF_MIPMAPS) { glTexParameteri(m_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(m_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); } else { glTexParameteri(m_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(m_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(m_target, GL_TEXTURE_MAX_LEVEL, 0); } // Anisotropy filtering float maximumAnistropy; glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &maximumAnistropy); glTexParameterf(m_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, maximumAnistropy); //size = approxSize(iFormat, img->getXBlocks(0), img->getYBlocks(0), 1, flags & ETF_MIPMAPS); glBindTexture(m_target, 0); delete img; m_loaded = true; }
std::vector<Vector3f> ShapeEstimation::gradientField(ImageReader mask, std::vector<float> &pixelLuminances, std::vector<float> &gradientX, std::vector<float> &gradientY){ int rows = mask.getImageHeight(); int cols = mask.getImageWidth(); float gxNormalize = 0.0f; float gyNormalize = 0.0f; for(int row = 0; row < rows; row++){ for(int col = 0; col < cols; col++){ int index = mask.indexAt(row, col); if(row + 1 < rows){ int indexUp = mask.indexAt(row + 1, col); float dY = pixelLuminances[indexUp] - pixelLuminances[index]; gradientY.push_back(dY); if(fabs(dY) > gyNormalize){ gyNormalize = fabs(dY); } } else { gradientY.push_back(0.0f); } if(col + 1 < cols){ int indexRight = mask.indexAt(row, col+1); float dX = pixelLuminances[indexRight] - pixelLuminances[index]; gradientX.push_back(dX); if(fabs(dX) > gxNormalize){ gxNormalize = fabs(dX); } } else { gradientX.push_back(0.0f); } } } assert(gradientX.size() == gradientY.size()); for(int i = 0; i < gradientX.size(); i++){ gradientX[i] = gradientReshapeRecursive(gradientX[i]/gxNormalize, m_curvature) * gxNormalize; } for(int i = 0; i < gradientY.size(); i++){ gradientY[i] = gradientReshapeRecursive(gradientY[i]/gyNormalize, m_curvature) * gyNormalize; } std::vector<Vector3f> normals; for(int i = 0; i < rows; i++){ for(int j = 0; j < cols; j++){ QColor maskColor = QColor(mask.pixelAt(i,j)); if(maskColor.red() > 150){ Eigen::Vector3f gx = Vector3f(1.0f, 0.0f, gradientX[mask.indexAt(i,j)]); Eigen::Vector3f gy = Vector3f(0.0f, 1.0f, gradientY[mask.indexAt(i,j)]); Eigen::Vector3f normal = (gx.cross(gy)); normal = normal.normalized(); normals.push_back(normal); } else { normals.push_back(Vector3f(0.0,0.0,0.0)); } } } //pixelLuminances = gradientX; return normals; }
void LoadTextureAMT::processGPU_Texture2D() { ImageReader* reader = m_imageReaders[0]; if(!reader) { return; } glActiveTexture(GL_TEXTURE0); glGenTextures(1, &m_textureId); glBindTexture(m_target, m_textureId); int levels; if(m_loadMipmaps) { levels = reader->getLevelAmount(); if(levels == 1) { // If need mipmaps and image has only base level, set number of mipmap levels to generate levels = gmath::imageLevelCount(reader->getWidth(0), reader->getHeight(0), 1); } } else { levels = 1; } glTexStorage2D(m_target, levels, m_imageGLFormat, reader->getWidth(0), reader->getHeight(0)); for(int i = 0; i < reader->getLevelAmount(); i++) { if(reader->isCompressedFormat()) { glCompressedTexSubImage2D(m_target, i, 0, 0, reader->getWidth(i), reader->getHeight(i), reader->getFormat(), reader->getLevelSize(i), reader->getData(i)); } else { glTexSubImage2D(m_target, i, 0, 0, reader->getWidth(i), reader->getHeight(i), reader->getFormat(), GL_UNSIGNED_BYTE, reader->getData(i)); } } glTexParameteri(m_target, GL_TEXTURE_BASE_LEVEL, 0); if((reader->getLevelAmount() == 1) && m_loadMipmaps) { glGenerateMipmap(m_target); } if(m_loadMipmaps) { glTexParameteri(m_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(m_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); } else { glTexParameteri(m_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(m_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(m_target, GL_TEXTURE_MAX_LEVEL, 0); } // Anisotropy filtering float maximumAnistropy; glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &maximumAnistropy); glTexParameterf(m_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, maximumAnistropy); glBindTexture(m_target, 0); utils::deleteAndNull(reader); m_imageReaders[0] = nullptr; m_context->m_target = m_target; m_context->m_textureId = m_textureId; }