void ColorSpace::RGBtoLab (CoImage* pIn, CoImage* pOut) { assert (pIn->GetType() == MAT_Tbyte); assert (pOut->GetType() == MAT_Tfloat); BYTE* pbR = pIn->m_matX.data.ptr[0]; BYTE* pbG = pIn->m_matY.data.ptr[0]; BYTE* pbB = pIn->m_matZ.data.ptr[0]; float* prL = pOut->m_matX.data.fl[0]; float* pra = pOut->m_matY.data.fl[0]; float* prb = pOut->m_matZ.data.fl[0]; for (int i = 0; i < pIn->GetHeight() * pIn->GetWidth(); i ++) { RGBtoLab(pbR[i], pbG[i], pbB[i], &prL[i], &pra[i], &prb[i]); } }
CGImageRef Image::createAbstraction(float stylization, uint quantization) { pixel4b *rgbPixels = (pixel4b *) CFDataGetMutableBytePtr(_data); // Convert from RGB to Lab colorspace to perform operations on lightness channel. RGBtoLab(rgbPixels, _pixels); // Initial bilateral filter. bilateral(); // Extract edges. pixel3f *edges = createEdges(stylization); // Additional bilateral filtering. bilateral(); bilateral(); // Quantize lightness channel. quantize(quantization); // Overlay edges. overlayEdges(edges); // Convert back to RGB colorspace. LabtoRGB(_pixels, rgbPixels); // Create an image from the modified data. CGContextRef context = CGBitmapContextCreate( rgbPixels, _width, _height, _bitsPerComponent, _bytesPerRow, _colorSpaceRef, _bitmapInfo ); CGImageRef image = CGBitmapContextCreateImage(context); delete[] edges; return image; }
int main( int argc, char * argv[]){ /**** Read the IO ****/ if (argc<3){ qWarning( "Usage: %s texton_file type [params ..]", argv[0] ); qWarning( " type :"); qWarning( " FilterBank [nTextons filterbank_size]" ); qWarning( " Color [nTextons]" ); qWarning( " HoG [nTextons L/A/B]" ); qWarning( " Location [nTextons]" ); // qWarning( " BBox [nTextons]" ); return 1; } QString save_filename = argv[1]; QString type = argv[2]; type = type.toLower(); int n_textons = N_TEXTONS; if (argc>3) n_textons = QString( argv[3] ).toInt(); QSharedPointer<Feature> filter; if (type == "filterbank"){ float filterbank_size = FILTER_BANK_SIZE; if (argc>4) filterbank_size = QString( argv[4] ).toFloat(); filter = QSharedPointer<Feature>( new FilterBank( filterbank_size ) ); } else if (type == "color"){ filter = QSharedPointer<Feature>( new ColorFeature() ); } else if (type == "location"){ filter = QSharedPointer<Feature>( new LocationFeature() ); } else if (type == "hog"){ HogFeature::HogFeatureType type = HogFeature::L; if (argc>4){ QString t = QString( argv[4] ).toLower(); if (t=="a") type = HogFeature::A; if (t=="b") type = HogFeature::B; } filter = QSharedPointer<Feature>( new HogFeature(type) ); } else if (type == "bbox"){ filter = QSharedPointer<Feature>( new BBoxFeature() ); } else qFatal( "Unknown feature %s", qPrintable( type ) ); // Declare all variables we need for both training and evaluation QVector< ColorImage > images; QVector< Image<float> > lab_images; QVector< LabelImage > labels; QVector< QString > names; /**** Training ****/ qDebug("(train) Loading the database"); loadImages( images, labels, names, TRAIN ); // Color Conversion qDebug("(train) Converting to Lab"); lab_images = RGBtoLab( images ); // Training qDebug("(train) Training Textons"); Texton texton( filter, n_textons ); texton.train( lab_images, names ); /**** Evaluation ****/ qDebug("(test) Loading the database"); loadImages( images, labels, names, ALL ); // Color Conversion qDebug("(test) Converting to Lab"); lab_images = RGBtoLab( images ); // Evalutation qDebug("(test) Textonizing"); QVector< Image<short> > textons = texton.textonize( lab_images, names ); /**** Storing textons ****/ saveTextons( save_filename, textons, names ); }