示例#1
0
	UMatData* allocate(int dims0, const int* sizes, int type, void* data,
			size_t* step, int flags, UMatUsageFlags usageFlags) const {
		if (data != 0) {
			CV_Error(Error::StsAssert, "The data should normally be NULL!");
			// probably this is safe to do in such extreme case
			return stdAllocator->allocate(dims0, sizes, type, data, step, flags,
					usageFlags);
		}
		PyEnsureGIL gil;

		int depth = CV_MAT_DEPTH(type);
		int cn = CV_MAT_CN(type);
		const int f = (int) (sizeof(size_t) / 8);
		int typenum =
				depth == CV_8U ? NPY_UBYTE :
				depth == CV_8S ? NPY_BYTE :
				depth == CV_16U ? NPY_USHORT :
				depth == CV_16S ? NPY_SHORT :
				depth == CV_32S ? NPY_INT :
				depth == CV_32F ? NPY_FLOAT :
				depth == CV_64F ?
						NPY_DOUBLE : f * NPY_ULONGLONG + (f ^ 1) * NPY_UINT;
		int i, dims = dims0;
		cv::AutoBuffer<npy_intp> _sizes(dims + 1);
		for (i = 0; i < dims; i++)
			_sizes[i] = sizes[i];
		if (cn > 1)
			_sizes[dims++] = cn;
		PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
		if (!o)
			CV_Error_(Error::StsError,
					("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
		return allocate(o, dims0, sizes, type, step);
	}
示例#2
0
template<typename Distance, typename IndexType> void
buildIndex_(void*& index, const Mat& data, const IndexParams& params, const Distance& dist = Distance())
{
    typedef typename Distance::ElementType ElementType;
    if(DataType<ElementType>::type != data.type())
        CV_Error_(Error::StsUnsupportedFormat, ("type=%d\n", data.type()));
    if(!data.isContinuous())
        CV_Error(Error::StsBadArg, "Only continuous arrays are supported");

    ::cvflann::Matrix<ElementType> dataset((ElementType*)data.data, data.rows, data.cols);
    IndexType* _index = new IndexType(dataset, get_params(params), dist);

    try
    {
        _index->buildIndex();
    }
    catch (...)
    {
        delete _index;
        _index = NULL;

        throw;
    }

    index = _index;
}
示例#3
0
    void inpaint(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType)
    {
        dst.create( src.size(), src.type() );

        switch ( algorithmType )
        {
            case xphoto::INPAINT_SHIFTMAP:
                shiftMapInpaint <Tp, cn>(src, mask, dst);
                break;
            default:
                CV_Error_( CV_StsNotImplemented,
                    ("Unsupported algorithm type (=%d)", algorithmType) );
                break;
        }
    }
示例#4
0
void cv::setWindowTitle(const String& winname, const String& title)
{
    CvWindow* window = icvFindWindowByName(winname.c_str());

    if (!window)
    {
        namedWindow(winname);
        window = icvFindWindowByName(winname.c_str());
    }

    if (!window)
        CV_Error(Error::StsNullPtr, "NULL window");

    if (noErr != SetWindowTitleWithCFString(window->window, CFStringCreateWithCString(NULL, title.c_str(), kCFStringEncodingASCII)))
        CV_Error_(Error::StsError, ("Failed to set \"%s\" window title to \"%s\"", winname.c_str(), title.c_str()));
}
    /*! This function implements simple dct-based image denoising,
	 *	link: http://www.ipol.im/pub/art/2011/ys-dct/
     *
	 *  \param src : source image (rgb, or gray)
     *  \param dst : destination image
     *  \param sigma : expected noise standard deviation
     *  \param psize : size of block side where dct is computed
     */
    void dctDenoising(const Mat &src, Mat &dst, const double sigma, const int psize)
    {
        CV_Assert( src.channels() == 3 || src.channels() == 1 );

        int xtype = CV_MAKE_TYPE( CV_32F, src.channels() );
        Mat img( src.size(), xtype );
        src.convertTo(img, xtype);

        if ( img.type() == CV_32FC3 )
            rgbDctDenoising( img, img, sigma, psize );
        else if ( img.type() == CV_32FC1 )
            grayDctDenoising( img, img, sigma, psize );
        else
            CV_Error_( CV_StsNotImplemented,
            ("Unsupported source image format (=%d)", img.type()) );

        img.convertTo( dst, src.type() );
    }
示例#6
0
bool CvCascadeImageReader::PosReader::create( const String _filename )
{
    if ( file )
        fclose( file );
    file = fopen( _filename.c_str(), "rb" );

    if( !file )
        return false;
    short tmp = 0;
    if( fread( &count, sizeof( count ), 1, file ) != 1 ||
        fread( &vecSize, sizeof( vecSize ), 1, file ) != 1 ||
        fread( &tmp, sizeof( tmp ), 1, file ) != 1 ||
        fread( &tmp, sizeof( tmp ), 1, file ) != 1 )
        CV_Error_( CV_StsParseError, ("wrong file format for %s\n", _filename.c_str()) );
    base = sizeof( count ) + sizeof( vecSize ) + 2*sizeof( tmp );
    if( feof( file ) )
        return false;
    last = 0;
    vec = (short*) cvAlloc( sizeof( *vec ) * vecSize );
    CV_Assert( vec );
    return true;
}
示例#7
0
 Impl(const char* fname)
 {
     // http://support.microsoft.com/kb/316609
     int numRetries = 5;
     do
     {
         handle = ::CreateFileA(fname, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
                             OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
         if (INVALID_HANDLE_VALUE == handle)
         {
             if (ERROR_SHARING_VIOLATION == GetLastError())
             {
                 numRetries--;
                 Sleep(250);
                 continue;
             }
             else
             {
                 CV_Error_(Error::StsAssert, ("Can't open lock file: %s", fname));
             }
         }
         break;
     } while (numRetries > 0);
 }
示例#8
0
    /*! The function reconstructs the selected image area from known area.
    *  \param src : source image.
    *  \param mask : inpainting mask, 8-bit 1-channel image. Zero pixels indicate the area that needs to be inpainted.
    *  \param dst : destination image.
    *  \param algorithmType : inpainting method.
    */
    void inpaint(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType)
    {
        CV_Assert( mask.channels() == 1 && mask.depth() == CV_8U );
        CV_Assert( src.rows == mask.rows && src.cols == mask.cols );

        switch ( src.type() )
        {
            case CV_8SC1:
                inpaint <char,   1>( src, mask, dst, algorithmType );
                break;
            case CV_8SC2:
                inpaint <char,   2>( src, mask, dst, algorithmType );
                break;
            case CV_8SC3:
                inpaint <char,   3>( src, mask, dst, algorithmType );
                break;
            case CV_8SC4:
                inpaint <char,   4>( src, mask, dst, algorithmType );
                break;
            case CV_8UC1:
                inpaint <uchar,  1>( src, mask, dst, algorithmType );
                break;
            case CV_8UC2:
                inpaint <uchar,  2>( src, mask, dst, algorithmType );
                break;
            case CV_8UC3:
                inpaint <uchar,  3>( src, mask, dst, algorithmType );
                break;
            case CV_8UC4:
                inpaint <uchar,  4>( src, mask, dst, algorithmType );
                break;
            case CV_16SC1:
                inpaint <short,  1>( src, mask, dst, algorithmType );
                break;
            case CV_16SC2:
                inpaint <short,  2>( src, mask, dst, algorithmType );
                break;
            case CV_16SC3:
                inpaint <short,  3>( src, mask, dst, algorithmType );
                break;
            case CV_16SC4:
                inpaint <short,  4>( src, mask, dst, algorithmType );
                break;
            case CV_16UC1:
                inpaint <ushort, 1>( src, mask, dst, algorithmType );
                break;
            case CV_16UC2:
                inpaint <ushort, 2>( src, mask, dst, algorithmType );
                break;
            case CV_16UC3:
                inpaint <ushort, 3>( src, mask, dst, algorithmType );
                break;
            case CV_16UC4:
                inpaint <ushort, 4>( src, mask, dst, algorithmType );
                break;
            case CV_32SC1:
                inpaint <int,    1>( src, mask, dst, algorithmType );
                break;
            case CV_32SC2:
                inpaint <int,    2>( src, mask, dst, algorithmType );
                break;
            case CV_32SC3:
                inpaint <int,    3>( src, mask, dst, algorithmType );
                break;
            case CV_32SC4:
                inpaint <int,    4>( src, mask, dst, algorithmType );
                break;
            case CV_32FC1:
                inpaint <float,  1>( src, mask, dst, algorithmType );
                break;
            case CV_32FC2:
                inpaint <float,  2>( src, mask, dst, algorithmType );
                break;
            case CV_32FC3:
                inpaint <float,  3>( src, mask, dst, algorithmType );
                break;
            case CV_32FC4:
                inpaint <float,  4>( src, mask, dst, algorithmType );
                break;
            case CV_64FC1:
                inpaint <double, 1>( src, mask, dst, algorithmType );
                break;
            case CV_64FC2:
                inpaint <double, 2>( src, mask, dst, algorithmType );
                break;
            case CV_64FC3:
                inpaint <double, 3>( src, mask, dst, algorithmType );
                break;
            case CV_64FC4:
                inpaint <double, 4>( src, mask, dst, algorithmType );
                break;
            default:
                CV_Error_( CV_StsNotImplemented,
                    ("Unsupported source image format (=%d)",
                    src.type()) );
                break;
        }
    }
CV_IMPL CvSubdiv2DPoint *
cvSubdivDelaunay2DInsert( CvSubdiv2D * subdiv, CvPoint2D32f pt )
{
    CvSubdiv2DPoint *point = 0;
    CvSubdiv2DPointLocation location = CV_PTLOC_ERROR;

    CvSubdiv2DPoint *curr_point = 0, *first_point = 0;
    CvSubdiv2DEdge curr_edge = 0, deleted_edge = 0, base_edge = 0;
    int i, max_edges;

    if( !subdiv )
        CV_Error( CV_StsNullPtr, "" );

    if( !CV_IS_SUBDIV2D(subdiv) )
        CV_Error( CV_StsBadFlag, "" );

    location = cvSubdiv2DLocate( subdiv, pt, &curr_edge, &curr_point );

    switch (location)
    {
    case CV_PTLOC_ERROR:
        CV_Error( CV_StsBadSize, "" );

    case CV_PTLOC_OUTSIDE_RECT:
        CV_Error( CV_StsOutOfRange, "" );

    case CV_PTLOC_VERTEX:
        point = curr_point;
        break;

    case CV_PTLOC_ON_EDGE:
        deleted_edge = curr_edge;
        subdiv->recent_edge = curr_edge = cvSubdiv2DGetEdge( curr_edge, CV_PREV_AROUND_ORG );
        cvSubdiv2DDeleteEdge( subdiv, deleted_edge );
        /* no break */

    case CV_PTLOC_INSIDE:

        assert( curr_edge != 0 );
        subdiv->is_geometry_valid = 0;

        curr_point = cvSubdiv2DAddPoint( subdiv, pt, 0 );
        base_edge = cvSubdiv2DMakeEdge( subdiv );
        first_point = cvSubdiv2DEdgeOrg( curr_edge );
        cvSubdiv2DSetEdgePoints( base_edge, first_point, curr_point );
        cvSubdiv2DSplice( base_edge, curr_edge );

        do
        {
            base_edge = cvSubdiv2DConnectEdges( subdiv, curr_edge,
                                                cvSubdiv2DSymEdge( base_edge ));
            curr_edge = cvSubdiv2DGetEdge( base_edge, CV_PREV_AROUND_ORG );
        }
        while( cvSubdiv2DEdgeDst( curr_edge ) != first_point );

        curr_edge = cvSubdiv2DGetEdge( base_edge, CV_PREV_AROUND_ORG );

        max_edges = subdiv->quad_edges * 4;

        for( i = 0; i < max_edges; i++ )
        {
            CvSubdiv2DPoint *temp_dst = 0, *curr_org = 0, *curr_dst = 0;
            CvSubdiv2DEdge temp_edge = cvSubdiv2DGetEdge( curr_edge, CV_PREV_AROUND_ORG );

            temp_dst = cvSubdiv2DEdgeDst( temp_edge );
            curr_org = cvSubdiv2DEdgeOrg( curr_edge );
            curr_dst = cvSubdiv2DEdgeDst( curr_edge );

            if( icvIsRightOf( temp_dst->pt, curr_edge ) > 0 &&
                icvIsPtInCircle3( curr_org->pt, temp_dst->pt,
                                  curr_dst->pt, curr_point->pt ) < 0 )
            {
                cvSubdiv2DSwapEdges( curr_edge );
                curr_edge = cvSubdiv2DGetEdge( curr_edge, CV_PREV_AROUND_ORG );
            }
            else if( curr_org == first_point )
            {
                break;
            }
            else
            {
                curr_edge = cvSubdiv2DGetEdge( cvSubdiv2DNextEdge( curr_edge ),
                                               CV_PREV_AROUND_LEFT );
            }
        }
        break;
    default:
        CV_Error_(CV_StsError, ("cvSubdiv2DLocate returned invalid location = %d", location) );
    }

    return curr_point;
}
示例#10
0
void BackgroundSubtractorGMG::initializeType(InputArray _image, double min, double max)
{
    minVal = min;
    maxVal = max;

    if (minVal == maxVal)
    {
        CV_Error_(CV_StsBadArg,("minVal and maxVal cannot be the same."));
    }

    /*
     * Parameter validation
     */
    if (maxFeatures <= 0)
    {
        CV_Error_(CV_StsBadArg,
                ("maxFeatures parameter must be 1 or greater. Instead, it is %d.",maxFeatures));
    }
    if (learningRate < 0.0 || learningRate > 1.0)
    {
        CV_Error_(CV_StsBadArg,
                ("learningRate parameter must be in the range [0.0,1.0]. Instead, it is %f.",
                learningRate));
    }
    if (numInitializationFrames < 1)
    {
        CV_Error_(CV_StsBadArg,
                ("numInitializationFrames must be at least 1. Instead, it is %d.",
                        numInitializationFrames));
    }
    if (quantizationLevels < 1)
    {
        CV_Error_(CV_StsBadArg,
                ("quantizationLevels must be at least 1 (preferably more). Instead it is %d.",
                        quantizationLevels));
    }
    if (backgroundPrior < 0.0 || backgroundPrior > 1.0)
    {
        CV_Error_(CV_StsBadArg,
                ("backgroundPrior must be a probability, between 0.0 and 1.0. Instead it is %f.",
                        backgroundPrior));
    }

    /*
     * Detect and accommodate the image depth
     */
    Mat image = _image.getMat();
    numChannels = image.channels();

    /*
     * Color quantization [0 | | | | max] --> [0 | | max]
     *  (0) Use double as intermediary to convert all types to int.
     *  (i) Shift min to 0,
     *  (ii) max/(num intervals) = factor.  x/factor * factor = quantized result, after integer operation.
     */

    /*
     * Data Structure Initialization
     */
    imWidth = image.cols;
    imHeight = image.rows;
    numPixels = image.total();
    pixels.resize(numPixels);
    frameNum = 0;

    // used to iterate through matrix of type unknown at compile time
    //elemSize = image.elemSize();
    //elemSize1 = image.elemSize1();

    vector<PixelModelGMG>::iterator pixel;
    vector<PixelModelGMG>::iterator pixel_end = pixels.end();
    for (pixel = pixels.begin(); pixel != pixel_end; ++pixel)
    {
        pixel->setMaxFeatures(maxFeatures);
    }

    fgMaskImage = Mat::zeros(imHeight, imWidth, CV_8UC1);  // 8-bit unsigned mask. 255 for FG, 0 for BG
    posteriorImage = Mat::zeros(imHeight, imWidth, CV_32FC1);  // float for storing probabilities. Can be viewed directly with imshow.
    isDataInitialized = true;
}
示例#11
0
int main(int /*argc*/, char** /*argv*/) {

	std::cout.setf(std::ios_base::fixed, std::ios_base::floatfield);
	std::cout.precision(2);

    try {
        const std::vector<std::string> vsTestImagePaths = {
            {"data/airplane.png"},
            {"data/baboon.png"},
			{ "data/cameraman.tif" },
			{ "data/lena.png" },
            {"data/logo.tif"},
			{ "data/logo_noise.tif" },
            {"data/peppers.png"},
        };
        for(const std::string& sTestImagePath : vsTestImagePaths) {
            const cv::Mat oInput = cv::imread(sTestImagePath);
            if(oInput.empty() || oInput.type()!=CV_8UC3)
                CV_Error_(-1,("Could not load image at '%s', check local paths",sTestImagePath.c_str()));

			std::cout << "\n ***************************************** \n\n";

            // COMPRESSION
            cv::Mat_<uchar> Y,Cb,Cr;
            conv_rgb2ycbcr(oInput,USE_SUBSAMPLING,Y,Cb,Cr);
            const std::vector<cv::Mat_<uchar>> vBlocks_Y = decoup(Y);
            const std::vector<cv::Mat_<uchar>> vBlocks_Cb = decoup(Cb);
            const std::vector<cv::Mat_<uchar>> vBlocks_Cr = decoup(Cr);

			/* Test de-conversion */
			cv::Mat image_unconvert;
			conv_ycbcr2rgb(Y, Cb, Cr, USE_SUBSAMPLING, image_unconvert);
			cv::Mat diff;
			cv::absdiff(oInput, image_unconvert, diff);


            std::vector<cv::Mat_<uchar>> vBlocks;
            vBlocks.insert(vBlocks.end(),vBlocks_Y.begin(),vBlocks_Y.end());
            vBlocks.insert(vBlocks.end(),vBlocks_Cb.begin(),vBlocks_Cb.end());
            vBlocks.insert(vBlocks.end(),vBlocks_Cr.begin(),vBlocks_Cr.end());
            std::vector<cv::Mat_<float>> vDCTBlocks(vBlocks.size());

			/* Test block - unblock*/
			//const cv::Mat_<uchar> test = decoup_inv(vBlocks, Y.size());

			for (size_t b = 0; b < vBlocks.size(); ++b)
			{
				vDCTBlocks[b] = dct(vBlocks[b]);

				/* Test i_dct*/
				cv::Mat_<uchar> original = vBlocks[b];
				cv::Mat_<float> dct = vDCTBlocks[b];
				cv::Mat_<uchar> inverse = dct_inv(vDCTBlocks[b]);			
			}
			
			// Quantification
            std::vector<cv::Mat_<short>> vQuantifDCTBlocks(vDCTBlocks.size());
            for(size_t b=0; b<vDCTBlocks.size(); ++b)
                vQuantifDCTBlocks[b] = quantif(vDCTBlocks[b],USE_QUANT_QUALITY);

            std::vector<std::array<short,8*8>> vInlinedBlocks(vQuantifDCTBlocks.size());
			for (size_t b = 0; b < vQuantifDCTBlocks.size(); ++b)
			{
				vInlinedBlocks[b] = zigzag(vQuantifDCTBlocks[b]);

				// Test zigzag ...
				/*
				cv::Mat_<short> original = vQuantifDCTBlocks[b];
				std::array<short, 8 * 8> arr = vInlinedBlocks[b];
				cv::Mat_<short> inverse = zigzag_inv(vInlinedBlocks[b]);
				*/
			}
                
            const HuffOutput<short> oCode = huff(vInlinedBlocks);

            // @@@@ TODO: check compression rate here...
			cv::Size s = oInput.size();
			int nbPixel = s.height * s.width;

			// Size in bits

			double size_before = 8 * nbPixel * oInput.channels();
			double size_after_color = 8 * (Y.size().area() + Cb.size().area() + Cr.size().area());
			double size_after_dct = 8 * 8 * 8 * vInlinedBlocks.size();
			double size_after_pipeline= oCode.string.size();

			double compressionRate_after_color = 1 - (size_after_color / size_before);
			double compressionRate_after_dct = 1 - (size_after_dct / size_after_color);
			double compressionRate_afer_pipeline = 1 - (size_after_pipeline / size_before);

			/*
			double compressionRate_after_color = size_before / size_after_color;
			double compressionRate_after_dct = size_after_color / size_after_dct;
			double compressionRate_afer_pipeline = size_before /size_after_pipeline;
			*/

			std::cout << "Images: " << sTestImagePath << "\n";

			std::cout << "Size before color   : " << size_before/ (1000.0 * 8.0) << " ko\n";
			std::cout << "Size after color    : " << size_after_color/ (1000.0 * 8.0) << " ko\n";
			std::cout << "Size after dct      : " << size_after_dct/ (1000.0 * 8.0) << " ko\n";
			std::cout << "Size after pipeline : " << size_after_pipeline/ (1000.0 * 8.0) << " ko\n";
			
			std::cout << "Compression rate couleur seulement   : " << compressionRate_after_color << "%\n";
			std::cout << "Compression rate dct(+q+z) seulement : " << compressionRate_after_dct << "%\n";
			std::cout << "Compression rate fin pipeline        : " << compressionRate_afer_pipeline << "%\n";

            // DECOMPRESSION
            const std::vector<std::array<short,8*8>> vInlinedBlocks_decompr = huff_inv<8*8>(oCode);


			// Comment to test dct
            std::vector<cv::Mat_<short>> vQuantifDCTBlocks_decompr(vInlinedBlocks_decompr.size());
            for(size_t b=0; b<vInlinedBlocks_decompr.size(); ++b)
                vQuantifDCTBlocks_decompr[b] = zigzag_inv(vInlinedBlocks_decompr[b]);

			// Uncomment to test dct
			//std::vector<cv::Mat_<short>> vQuantifDCTBlocks_decompr(vInlinedBlocks.size());
			//for (size_t b = 0; b<vInlinedBlocks.size(); ++b)
			//	vQuantifDCTBlocks_decompr[b] = zigzag_inv(vInlinedBlocks[b]);
			
			// Comment to test dct
            std::vector<cv::Mat_<float>> vDCTBlocks_decompr(vQuantifDCTBlocks_decompr.size());
            for(size_t b=0; b<vQuantifDCTBlocks_decompr.size(); ++b)
                vDCTBlocks_decompr[b] = quantif_inv(vQuantifDCTBlocks_decompr[b],USE_QUANT_QUALITY);

			// Uncomment to test dct
			//std::vector<cv::Mat_<float>> vDCTBlocks_decompr(vQuantifDCTBlocks_decompr.size());
			//for (size_t b = 0; b<vQuantifDCTBlocks_decompr.size(); ++b)
			//	vDCTBlocks_decompr[b] = quantif_inv(vQuantifDCTBlocks_decompr[b], USE_QUANT_QUALITY);

			// Commment to test quantification
            std::vector<cv::Mat_<uchar>> vBlocks_decompr(vDCTBlocks_decompr.size());
			for (size_t b = 0; b<vDCTBlocks_decompr.size(); ++b)
				vBlocks_decompr[b] = dct_inv(vDCTBlocks_decompr[b]);

			// Uncomment to test quantification inverse
			//std::vector<cv::Mat_<uchar>> vBlocks_decompr(vDCTBlocks.size());
   //         for(size_t b=0; b<vDCTBlocks.size(); ++b)
   //             vBlocks_decompr[b] = dct_inv(vDCTBlocks[b]);

            const std::vector<cv::Mat_<uchar>> vBlocks_Y_decompr(vBlocks_decompr.begin(),vBlocks_decompr.begin()+vBlocks_Y.size());
            const std::vector<cv::Mat_<uchar>> vBlocks_Cb_decompr(vBlocks_decompr.begin()+vBlocks_Y.size(),vBlocks_decompr.begin()+vBlocks_Y.size()+vBlocks_Cb.size());
            const std::vector<cv::Mat_<uchar>> vBlocks_Cr_decompr(vBlocks_decompr.begin()+vBlocks_Y.size()+vBlocks_Cb.size(),vBlocks_decompr.end());
            const cv::Mat_<uchar> Y_decompr = decoup_inv(vBlocks_Y_decompr,Y.size());
            const cv::Mat_<uchar> Cb_decompr = decoup_inv(vBlocks_Cb_decompr,Cb.size());
            const cv::Mat_<uchar> Cr_decompr = decoup_inv(vBlocks_Cr_decompr,Cr.size());
            cv::Mat oInput_decompr;
            conv_ycbcr2rgb(Y_decompr,Cb_decompr,Cr_decompr,USE_SUBSAMPLING,oInput_decompr);

            cv::Mat oDisplay;
            cv::hconcat(oInput,oInput_decompr,oDisplay);
            cv::Mat oDiff;
            cv::absdiff(oInput,oInput_decompr,oDiff);
            cv::hconcat(oDisplay,oDiff,oDisplay);
            cv::imshow(sTestImagePath.substr(sTestImagePath.find_last_of("/\\")+1),oDisplay);
            cv::waitKey(1);
        }
        std::cout << "all done; press any key on a window to quit..." << std::endl;
        cv::waitKey(0);
        return 0;
    }
    catch(const cv::Exception& e) {
        std::cerr << "Caught cv::Exceptions: " << e.what() << std::endl;
    }
    catch(const std::runtime_error& e) {
        std::cerr << "Caught std::runtime_error: " << e.what() << std::endl;
    }
    catch(...) {
        std::cerr << "Caught unhandled exception." << std::endl;
    }
    return 1;
}
示例#12
0
int main(int /*argc*/, char** /*argv*/) {

	// Slide property
	size_t n1 = 9;
	size_t N = 18;

    try {
        // note: by default, imread always returns 3-ch images unless the cv::IMREAD_GRAYSCALE flag is set (here we hardcode it based on prior knowledge)
        const std::vector<ImagePathFlag> vsTestImages = {
            {"data/test1.png",cv::IMREAD_GRAYSCALE},
            {"data/test2.png",cv::IMREAD_GRAYSCALE},
            {"data/test3.png",cv::IMREAD_GRAYSCALE},
            {"data/test4.png",cv::IMREAD_COLOR},
            {"data/test5.png",cv::IMREAD_COLOR},
            {"data/test6.png",cv::IMREAD_COLOR},
            {"data/test7.png",cv::IMREAD_COLOR},
            {"data/test8.jpg",cv::IMREAD_COLOR},
            {"data/test9.bmp",cv::IMREAD_COLOR},
            {"data/test10.bmp",cv::IMREAD_COLOR},
        };
        for(const ImagePathFlag& oImagePathFlag : vsTestImages) {
            cv::Mat oInputImg = cv::imread(oImagePathFlag.first,oImagePathFlag.second);
            if(oInputImg.empty())
                CV_Error_(-1,("Could not load image at '%s', check local paths",oImagePathFlag.first.c_str()));

			std::vector<uint8_t> formatSignal = format_signal(oInputImg);

			std::vector<LZ77Code> encodeSignal = lz77_encode(formatSignal, N, n1);

			std::vector<uint8_t> decode = lz77_decode(encodeSignal, N, n1);

			std::cout << "\n***** New Data *******";
			std::cout << "\nTaille plain (byte): " << (formatSignal.size());
			std::cout << "\nTaille encode (byte): " << (encodeSignal.size());
			double taux = 1.0 - (double)encodeSignal.size() / (double)formatSignal.size();
			std::cout << "\nTaux compression: " << std::to_string(taux);

			if (decode.size() > formatSignal.size())
			{
				decode.pop_back();
			}

			//cv::Mat oOutputImg = reformat_image(decode, oInputImg.size());
			/*
			bool areImageEquals = std::equal(oInputImg.begin<uchar>(), oInputImg.end<uchar>(), oOutputImg.begin<uchar>());
			if (areImageEquals)
			{
				std::cout << "Yay!\n";
			}
			else {
				std::cout << "Ohhhh :(\n";
			}*/
			
            // ... @@@@ TODO (make sure decoding also provides the original image!)
            
        }
		ghettoTestEncode();
    }
    catch(const cv::Exception& e) {
        std::cerr << "Caught cv::Exceptions: " << e.what() << std::endl;
    }
    catch(const std::runtime_error& e) {
        std::cerr << "Caught std::runtime_error: " << e.what() << std::endl;
    }
    catch(const std::exception e) {
        std::cerr << "Caught unhandled exception." << e.what() << std::endl;
    }
    return 0;
}