Esempio n. 1
0
Matrix<Color> read_pcx(std::string filename)
{
    std::ifstream myfile(filename, std::ios::in|std::ios::binary);
    if(myfile)
    {
        unsigned char header[128];
        myfile.read(reinterpret_cast<char*>(header), 128);
        unsigned char id = header[0];
        unsigned char version = header[1];
        unsigned char encoding = header[2];
        unsigned char bpp = header[3];
        unsigned xstart = 256*header[5]+header[4];
        unsigned ystart = 256*header[7]+header[6];
        unsigned xend = 256*header[9]+header[8];
        unsigned yend = 256*header[11]+header[10];
        unsigned hres = 256*header[13]+header[12];
        unsigned vres = 256*header[15]+header[14];
        unsigned char* palette = &header[16];
        unsigned char reserved1 = header[64];
        unsigned char numbitplanes = header[65];
        unsigned bytesperline = 256*header[67]+header[66];
        unsigned palettetype = 256*header[69]+header[68];
        unsigned hscreensize = 256*header[71]+header[70];
        unsigned vscreensize = 256*header[73]+header[72];
        unsigned char* reserved2 = &header[74];
        Matrix<Color> img(yend-ystart+1, xend-xstart+1);

        char c, runcount, runvalue;
        std::size_t ScanLineLength = numbitplanes*bytesperline;
        std::size_t LinePaddingSize = ((numbitplanes*bytesperline)*(8/bpp))-((xend - xstart)+1);

        for(std::size_t i=0;i<yend-ystart+1;++i)
        {
            std::size_t idx = 0;
            while(idx < ScanLineLength)
            {
                myfile.get(c);
                if((c & 0xC0) == 0xC0)
                {
                    runcount = c & 0x3F;
                    myfile.get(runvalue);
                }
                else
                {
                    runcount = 1;
                    runvalue = c;
                }
                std::size_t total = 0;
                while(runcount && idx < ScanLineLength)
                {
                    if(idx<ScanLineLength/3)
                        img(i, idx).red(runvalue);
                    else if(idx<2*ScanLineLength/3)
                        img(i, idx-ScanLineLength/3).green(runvalue);
                    else
                        img(i, idx-2*ScanLineLength/3).blue(runvalue);

                    total += runcount;
                    --runcount, ++idx;
                }
            }
        }

        myfile.close();
        return img;
    }
    std::cout<<"Unable to read the file : "<<filename<<std::endl;
    return Matrix<Color>();
}
Esempio n. 2
0
int main(){
    
   
   
    cv::VideoCapture cap(0);
    ThumbTracker tracker;
    ThumbMap map;
    
    std::vector<cv::Point2f> screen;
    screen.push_back(cv::Point2f(0, 0));
    screen.push_back(cv::Point2f(0, 30));
    screen.push_back(cv::Point2f(40, 30));
    screen.push_back(cv::Point2f(40, 0));
    
    cv::Mat templateImg ;//set first captured image as template image
    cap >> templateImg;
    Thumbnail templateThumb(templateImg);
    tracker.setTemplateThumb(templateThumb);
    
    // add it to map
    map.addKeyFrame(templateThumb);
    int currentKeyFrameIndex = 0;
    
    cv::Mat pose;
    

    while (true)
    {
        
        
        
        cv::Mat img;
        cap >> img; //get image from camera
        
        
        float finalScore;
        
        struct timeval tv;
        gettimeofday(&tv,NULL);
        unsigned long s0 = 1000000 * tv.tv_sec + tv.tv_usec;
        
        //make thumbnail for current image
        Thumbnail compThumb(img);
        tracker.setCompareThumb(compThumb);
        
        
        //tracking : find the transform between current image and template image
        cv::Mat currentKeyFrameImg = tracker.IteratePose(pose,finalScore);
        
        cv::Scalar color = cv::Scalar(0, 0, 255); //red
        
        
        if(finalScore > 2.0e6){//tracking failed (the diffrence between current and template image is too large)

            //see if there is some other Keyframe better
            int best = map.findBestKeyFrame(compThumb, finalScore);
            
            
            if(best != -1 && finalScore < 2.0e6){ //finally find it. the use it as tracking template
                tracker.setTemplateThumb(*map.getKeyFrame(best));
                currentKeyFrameIndex = best;
                pose = cv::Mat();
            }else{ //nothing find
                pose = cv::Mat();
            }
        }else{  //tracking is OK, draw some information

            //draw pose
            std::vector<cv::Point2f> trans;
            cv::transform(screen, trans, pose);
            for (size_t i = 0; i < screen.size(); ++i) {
                cv::Point2f& r1 = trans[i % 4];
                cv::Point2f& r2 = trans[(i + 1) % 4];
                cv::line(img, r1*16 , r2*16 , color, 3, CV_AA);
            }
            
            //draw thumbnail image
            cv::cvtColor(currentKeyFrameImg, currentKeyFrameImg, cv::COLOR_GRAY2BGR);
            cv::resize(currentKeyFrameImg, currentKeyFrameImg, cv::Size(120,90 ));            
            cv::Rect roi = cv::Rect(10, 10, currentKeyFrameImg.cols, currentKeyFrameImg.rows);
            cv::addWeighted( img(roi), 0.0, currentKeyFrameImg, 1.0, 0.0, img(roi));
            
            //draw current template index
            std::stringstream stream;
            stream<<"KF: "<<currentKeyFrameIndex;
            cv::putText(img, stream.str(),cv::Point(50,50), CV_FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255,255,255),2);
        }
        
        gettimeofday(&tv,NULL);
        unsigned long s1 = 1000000 * tv.tv_sec + tv.tv_usec;
        std::cout << "one round cost time: " << s1 - s0 << " micro" << std::endl;
        std::cout << "score: " << finalScore << " micro" << std::endl;
        
        imshow ("Image", img);

        int k = cv::waitKey(1);
        if (k == 13){
            //Press Enter to add one keyframe
            map.addKeyFrame(compThumb);
            tracker.setTemplateThumb(compThumb);
            currentKeyFrameIndex = map.getSize()-1;
        }else if(k == 32){
            break; // Press Space to exit
        }
        
    }

}
Esempio n. 3
0
	void ReloadPreviews()
	{
		Freeze();

		m_ScrolledPanel->DestroyChildren();
		m_ItemSizer->Clear();

		m_LastTerrainSelection = NULL; // clear any reference to deleted button

		AtlasMessage::qGetTerrainGroupPreviews qry(m_Name.c_str(), imageWidth, imageHeight);
		qry.Post();

		std::vector<AtlasMessage::sTerrainGroupPreview> previews = *qry.previews;

		bool allLoaded = true;

		for (size_t i = 0; i < previews.size(); ++i)
		{
			if (!previews[i].loaded)
				allLoaded = false;

			// Construct the wrapped-text label
			wxString name = previews[i].name.c_str();

			// Add spaces into the displayed name so there are more wrapping opportunities
			wxString labelText = name;
			labelText.Replace(_T("_"), _T(" "));
			wxStaticText* label = new wxStaticText(m_ScrolledPanel, wxID_ANY, labelText, wxDefaultPosition, wxDefaultSize, wxALIGN_CENTER);
			label->Wrap(imageWidth);

			unsigned char* buf = (unsigned char*)(malloc(previews[i].imageData.GetSize()));
			// imagedata.GetBuffer() gives a Shareable<unsigned char>*, which
			// is stored the same as a unsigned char*, so we can just copy it.
			memcpy(buf, previews[i].imageData.GetBuffer(), previews[i].imageData.GetSize());
			wxImage img (imageWidth, imageHeight, buf);

			wxButton* button = new wxBitmapButton(m_ScrolledPanel, wxID_ANY, wxBitmap(img));
			// Store the texture name in the clientdata slot
			button->SetClientObject(new wxStringClientData(name));

			wxSizer* imageSizer = new wxBoxSizer(wxVERTICAL);
			imageSizer->Add(button, wxSizerFlags().Center());
			imageSizer->Add(label, wxSizerFlags().Proportion(1).Center());
			m_ItemSizer->Add(imageSizer, wxSizerFlags().Expand().Center());
		}

		m_ScrolledPanel->Fit();
		Layout();

		Thaw();

		// If not all textures were loaded yet, run a timer to reload the previews
		// every so often until they've all finished
		if (allLoaded && m_Timer.IsRunning())
		{
			m_Timer.Stop();
		}
		else if (!allLoaded && !m_Timer.IsRunning())
		{
			m_Timer.Start(2000);
		}
	}
Esempio n. 4
0
int mri(
		float* img, 
		float complex* f, 
		float* mask, 
		float lambda,
		int N1,
		int N2)
{
	int i, j;

	float complex* f0	    = (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dx	    = (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dy	    = (float complex*) calloc(N1*N2,sizeof(float complex));

	float complex* dx_new   = (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dy_new   = (float complex*) calloc(N1*N2,sizeof(float complex));

	float complex* dtildex	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dtildey	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* u_fft2	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* u		= (float complex*) calloc(N1*N2,sizeof(float complex));

	float complex* fftmul	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* Lap		= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* diff		= (float complex*) calloc(N1*N2,sizeof(float complex));

	float sum = 0;

	for(i=0; i<N1; i++)
		for(j=0; j<N2; j++)
			sum += (SQR(crealf(f(i,j))/N1) + SQR(cimagf(f(i,j))/N1));

	float normFactor = 1.f/sqrtf(sum);
	float scale		 = sqrtf(N1*N2);

	for(i=0; i<N1; i++) {
		for(j=0; j<N2; j++) {
			f(i, j)  = f(i, j)*normFactor;
			f0(i, j) = f(i, j);
		}
	}
	Lap(N1-1, N2-1)	= 0.f;
	Lap(N1-1, 0)	= 1.f; 
	Lap(N1-1, 1)	= 0.f;
	Lap(0, N2-1)	= 1.f;
	Lap(0, 0)		= -4.f; 
	Lap(0, 1)		= 1.f;
	Lap(1, N2-1)	= 0.f;
	Lap(1, 0)		= 1.f; 
	Lap(1, 1)		= 0.f;

	float complex *w1;
	float complex *w2;
	float complex *buff;

	dft_init(&w1, &w2, &buff, N1, N2);
	dft(Lap, Lap, w1, w2, buff, N1, N2);

	for(i=0;i<N1;i++)
		for(j=0;j<N2;j++)					
			fftmul(i,j) = 1.0/((lambda/Gamma1)*mask(i,j) - Lap(i,j) + Gamma2);

	int OuterIter,iter;
	for(OuterIter= 0; OuterIter<MaxOutIter; OuterIter++) {
		for(iter = 0; iter<MaxIter; iter++) {

			for(i=0;i<N1;i++)	
				for(j=0;j<N2;j++)
					diff(i,j)  = dtildex(i,j)-dtildex(i,(j-1)>=0?(j-1):0) + dtildey(i,j)- dtildey((i-1)>=0?(i-1):0,j) ;

			dft(diff, diff, w1, w2, buff, N1, N2);

			for(i=0;i<N1;i++)
				for(j=0;j<N2;j++)
					u_fft2(i,j) = fftmul(i,j)*(f(i,j)*lambda/Gamma1*scale-diff(i,j)+Gamma2*u_fft2(i,j)) ;

			idft(u, u_fft2, w1, w2, buff, N1, N2);

			for(i=0;i<N1;i++) {
				for(j=0;j<N2;j++) {
					float tmp;
					float Thresh=1.0/Gamma1;

					dx(i,j)     = u(i,j<(N2-1)?(j+1):j)-u(i,j)+dx(i,j)-dtildex(i,j) ;
					dy(i,j)     = u(i<(N1-1)?(i+1):i,j)-u(i,j)+dy(i,j)-dtildey(i,j) ;

					tmp = sqrtf(SQR(crealf(dx(i,j)))+SQR(cimagf(dx(i,j))) + SQR(crealf(dy(i,j)))+SQR(cimagf(dy(i,j))));
					tmp = max(0,tmp-Thresh)/(tmp+(tmp<Thresh));
					dx_new(i,j) =dx(i,j)*tmp;
					dy_new(i,j) =dy(i,j)*tmp;
					dtildex(i,j) = 2*dx_new(i,j) - dx(i,j);
					dtildey(i,j) = 2*dy_new(i,j) - dy(i,j);
					dx(i,j)      = dx_new(i,j);
					dy(i,j)      = dy_new(i,j);
				}
			}
		}
		for(i=0;i<N1;i++) {
			for(j=0;j<N2;j++) {
				f(i,j) += f0(i,j) - mask(i,j)*u_fft2(i,j)/scale;  
			}
		}
	}

	for(i=0; i<N1; i++) {
		for(j=0; j<N2; j++) {
			img(i, j) = sqrt(SQR(crealf(u(i, j))) + SQR(cimagf(u(i, j))));
		}
	}

	free(w1);
	free(w2);
	free(buff);
	return 0;
}
Esempio n. 5
0
	int minarea( int /*argc*/, char** /*argv*/ )
	{
	    help();
	
	    Mat img(500, 500, CV_8UC3);
	    RNG& rng = theRNG();
	
	    for(;;)
	    {
	        int i, count = rng.uniform(1, 101);
	        vector<Point> points;
	
	        // Generate a random set of points
	        for( i = 0; i < count; i++ )
	        {
	            Point pt;
	            pt.x = rng.uniform(img.cols/4, img.cols*3/4);
	            pt.y = rng.uniform(img.rows/4, img.rows*3/4);
	
	            points.push_back(pt);
	        }
	
	        // Find the minimum area enclosing bounding box
	        RotatedRect box = minAreaRect(Mat(points));
	
	        // Find the minimum area enclosing triangle
	        vector<Point2f> triangle;
	
	        minEnclosingTriangle(points, triangle);
	
	        // Find the minimum area enclosing circle
	        Point2f center, vtx[4];
	        float radius = 0;
	        minEnclosingCircle(Mat(points), center, radius);
	        box.points(vtx);
	
	        img = Scalar::all(0);
	
	        // Draw the points
	        for( i = 0; i < count; i++ )
	            circle( img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA );
	
	        // Draw the bounding box
	        for( i = 0; i < 4; i++ )
	            line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, LINE_AA);
	
	        // Draw the triangle
	        for( i = 0; i < 3; i++ )
	            line(img, triangle[i], triangle[(i+1)%3], Scalar(255, 255, 0), 1, LINE_AA);
	
	        // Draw the circle
	        circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, LINE_AA);
	
	        imshow( "Rectangle, triangle & circle", img );
	
	        char key = (char)waitKey();
	        if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
	            break;
	    }
	
	    return 0;
	}
Esempio n. 6
0
void LCGWSpinBBHNR1::Computehpc(double t)
{
	double hpl, hcr;
	double AmpT;
	dcomplex Yp;
	dcomplex Yn;
	dcomplex img(0.0, 1.0);
	dcomplex h, dhdtau;
	
	hpl = 0.0;
	hcr = 0.0;
	
#ifdef _DEBUG_GW_  
    if((DEBUGDispAll)&&(t>1000.)){
        DispAllParam(MT->o);
        DEBUGDispAll=false;
    }
#endif
    

	
	if ((t>tStartWave)&&(t<=tEndWave)){
		
		
		for(int iH=2; iH<lmax+1; iH++){
			for(int jH=1; jH<iH+1; jH++){
				Yp = SpherHarm(iH, jH);
				Yn = SpherHarm(iH, -jH);
                
                if((OutputType==1)||(OutputType==2))
                    dhdtau = (dhpharmdtau(iH, jH, t) - img * dhcharmdtau(iH, jH, t))*Yp + (dhpharmdtau(iH, -jH, t) - img * dhcharmdtau(iH, -jH, t))*Yn;
                
                if((OutputType==0)||(OutputType==2))
                    h = (hpharm(iH, jH, t) - img * hcharm(iH, jH, t))*Yp + (hpharm(iH, -jH, t) - img * hcharm(iH, -jH, t))*Yn;
                
                //if((t>1000.)&&(t<1100.))
                //    Cout << t << " " << t/M << " "  << OutputType << " " << iH << " " << jH << " " << real(h) << " " << imag(h) << " " << real(dhdtau) << " " << imag(dhdtau) << Endl; 
                
                
                
                if(OutputType==1){
                    h = dhdtau/M;
                }
                
                if(OutputType==2){
                    h = h/M - dhdtau*(t-tShiftHybObs)/(M*M);
                    h *= LC::TSUN;
                }
                    
                //if((OutputType==2)&&(t>100.)&&(jH==2))
                //    Cout << " " << real(h) << " " << imag(h) << Endl; 
                
				hpl += real(h);
				hcr += -imag(h);

				//Cout << iH << " " << jH << " : " << h << " " << hpharm(iH, jH, t) << " " << hcharm(iH, jH, t) << " " << hpl << " " << hcr << " " << hpharm(iH, -jH, t) << " " << hcharm(iH, -jH, t)  << Endl;
			} 
			
		}
		
		/*	
		 if(lmax < 3){
		 Yp = SpherHarm(2, 2);
		 Yn = SpherHarm(2, -2);
		 h = (hpharm(2, 2, t) - img * hcharm(2, 2, t))*Yp + (hpharm(2, -2, t) - img * hcharm(2, -2, t))*Yn;
		 hpl += real(h);
		 hcr += -imag(h);
		 
		 Yp = SpherHarm(2, 1);
		 Yn = SpherHarm(2, -1);
		 h = (hpharm(2, 1, t) - img * hcharm(2, 1, t))*Yp + (hpharm(2, -1, t) - img * hcharm(2, -1, t))*Yn;
		 hpl += real(h);
		 hcr += -imag(h);
		 }
		 
		 
		 if(lmax < 4){
		 Yp = SpherHarm(3, 3);
		 Yn = SpherHarm(3, -3);
		 h = (hpharm(3, 3, t) - img * hcharm(3, 3, t))*Yp + (hpharm(3, -3, t) - img * hcharm(3, -3, t))*Yn;
		 hpl += real(h);
		 hcr += -imag(h);
		 }
		 
		 
		 if(lmax < 5){
		 Yp = SpherHarm(4, 4);
		 Yn = SpherHarm(4, -4);
		 h = (hpharm(4, 4, t) - img * hcharm(4, 4, t))*Yp + (hpharm(4, -4, t) - img * hcharm(4, -4, t))*Yn;
		 hpl += real(h);
		 hcr += -imag(h);
		 }
		 
		 
		 if(lmax < 6){
		 Yp = SpherHarm(5, 5);
		 Yn = SpherHarm(5, -5);
		 h = (hpharm(5, 5, t) - img * hcharm(5, 5, t))*Yp + (hpharm(5, -5, t) - img * hcharm(5, -5, t))*Yn;
		 hpl += real(h);
		 hcr += -imag(h);
		 }
		 
		 
		 if(lmax < 7){
		 Yp = SpherHarm(6, 6);
		 Yn = SpherHarm(6, -6);
		 h = (hpharm(6, 6, t) - img * hcharm(6, 6, t))*Yp + (hpharm(6, -6, t) - img * hcharm(6, -6, t))*Yn;
		 hpl += real(h);
		 hcr += -imag(h);
		 }
		 
		 
		 if(lmax < 8){
		 Yp = SpherHarm(7, 7);
		 Yn = SpherHarm(7, -7);
		 h = (hpharm(7, 7, t) - img * hcharm(7, 7, t))*Yp + (hpharm(7, -7, t) - img * hcharm(7, -7, t))*Yn;
		 hpl += real(h);
		 hcr += -imag(h);
		 }
		 
		 */
		
		//!  h+ and hx in SSB are compute in LCGW::ComputehBpc(double t) of LISACODE-GW.cpp
		
		//hBpLast = - Amp * (hpl * c2psi + hcr * s2psi);   // Are they defined before ?
		//hBcLast = - Amp * (-hpl * s2psi + hcr * c2psi);  // Are they defined before ?
		
		if(ApplyTaper)
			AmpT = Amp*GWSBHHH->halfhann(t, tendTaper, tTaper);
		else
			AmpT = Amp;
		
	//	hBpLast = - AmpT * hpl;
	//	hBcLast = - AmpT * hcr;

        
        hBpLast = - Amp * (hpl * c2psi + hcr * s2psi);   // Are they defined before ?
		hBcLast = - Amp * (-hpl * s2psi + hcr * c2psi);  // Are they defined before ?
		
#ifdef _DEBUG_GW_     
        (*DEBUGfCheck) << " " << hpl << " " << hcr << " " << hBpLast << " " << hBcLast  ;
#endif
        
		
	}else{
		hBpLast = 0.0;
		hBcLast = 0.0;
	}
    
	
}
QImage QWindowsFontEngineDirectWrite::imageForGlyph(glyph_t t,
        QFixed subPixelPosition,
        int margin,
        const QTransform &xform)
{
    glyph_metrics_t metrics = QFontEngine::boundingBox(t, xform);
    int width = (metrics.width + margin * 2 + 4).ceil().toInt() ;
    int height = (metrics.height + margin * 2 + 4).ceil().toInt();

    UINT16 glyphIndex = t;
    FLOAT glyphAdvance = metrics.xoff.toReal();

    DWRITE_GLYPH_OFFSET glyphOffset;
    glyphOffset.advanceOffset = 0;
    glyphOffset.ascenderOffset = 0;

    DWRITE_GLYPH_RUN glyphRun;
    glyphRun.fontFace = m_directWriteFontFace;
    glyphRun.fontEmSize = fontDef.pixelSize;
    glyphRun.glyphCount = 1;
    glyphRun.glyphIndices = &glyphIndex;
    glyphRun.glyphAdvances = &glyphAdvance;
    glyphRun.isSideways = false;
    glyphRun.bidiLevel = 0;
    glyphRun.glyphOffsets = &glyphOffset;

    QFixed x = margin - metrics.x.floor() + subPixelPosition;
    QFixed y = margin - metrics.y.floor();

    DWRITE_MATRIX transform;
    transform.dx = x.toReal();
    transform.dy = y.toReal();
    transform.m11 = xform.m11();
    transform.m12 = xform.m12();
    transform.m21 = xform.m21();
    transform.m22 = xform.m22();

    IDWriteGlyphRunAnalysis *glyphAnalysis = NULL;
    HRESULT hr = m_fontEngineData->directWriteFactory->CreateGlyphRunAnalysis(
                     &glyphRun,
                     1.0f,
                     &transform,
                     DWRITE_RENDERING_MODE_CLEARTYPE_NATURAL_SYMMETRIC,
                     DWRITE_MEASURING_MODE_NATURAL,
                     0.0, 0.0,
                     &glyphAnalysis
                 );

    if (SUCCEEDED(hr)) {
        RECT rect;
        rect.left = 0;
        rect.top = 0;
        rect.right = width;
        rect.bottom = height;

        int size = width * height * 3;
        BYTE *alphaValues = new BYTE[size];
        memset(alphaValues, size, 0);

        hr = glyphAnalysis->CreateAlphaTexture(DWRITE_TEXTURE_CLEARTYPE_3x1,
                                               &rect,
                                               alphaValues,
                                               size);

        if (SUCCEEDED(hr)) {
            QImage img(width, height, QImage::Format_RGB32);
            img.fill(0xffffffff);

            for (int y=0; y<height; ++y) {
                uint *dest = reinterpret_cast<uint *>(img.scanLine(y));
                BYTE *src = alphaValues + width * 3 * y;

                for (int x=0; x<width; ++x) {
                    dest[x] = *(src) << 16
                              | *(src + 1) << 8
                              | *(src + 2);

                    src += 3;
                }
            }

            delete[] alphaValues;
            glyphAnalysis->Release();

            return img;
        } else {
            delete[] alphaValues;
            glyphAnalysis->Release();

            qErrnoWarning("%s: CreateAlphaTexture failed", __FUNCTION__);
        }

    } else {
        qErrnoWarning("%s: CreateGlyphRunAnalysis failed", __FUNCTION__);
    }

    return QImage();
}
Esempio n. 8
0
int main()
{
	typedef float channel_type;

	typedef mizuiro::image::format<
		mizuiro::image::dimension<
			3
		>,
		mizuiro::image::interleaved<
			mizuiro::color::homogenous<
				channel_type,
				mizuiro::color::layout::rgba
			>
		>
	> format;

	typedef mizuiro::image::store<
		format,
		mizuiro::access::raw
	> store;

	store img(
		store::dim_type(
			100,
			100,
			100	
		)
	);

	typedef store::view_type view_type;

	typedef view_type::bound_type bound_type;

	// TODO: create an algorithm for this!
	{
		view_type const view(
			img.view()
		);

		typedef view_type::dim_type dim_type;

		typedef dim_type::size_type size_type;

		dim_type const dim(
			img.view().dim()
		);

		for(size_type x = 0; x < dim[0]; ++x)
			for(size_type y = 0; y < dim[1]; ++y)
				for(size_type z = 0; z < dim[2]; ++z)
					view[
						dim_type(
							x,
							y,
							z
						)
					]
					= mizuiro::color::object<
						format::color_format
					>(
						(mizuiro::color::init::red = static_cast<channel_type>(x))
						(mizuiro::color::init::green = static_cast<channel_type>(y))
						(mizuiro::color::init::blue = static_cast<channel_type>(z))
						(mizuiro::color::init::alpha = static_cast<channel_type>(255))
					);
	}

	std::cout << '\n';

	view_type const sub_view(
		mizuiro::image::sub_view(
			img.view(),
			bound_type(
				bound_type::dim_type(
					1,
					1,
					1
				),
				bound_type::dim_type(
					3,
					4,
					3	
				)
			)
		)
	);

	std::cout
		<< "sub image (with pitch "
		<< sub_view.pitch()
		<< ")\n";


	view_type const sub_sub_view(
		mizuiro::image::sub_view(
			sub_view,
			bound_type(
				bound_type::dim_type(
					1,	
					1,
					1
				),
				bound_type::dim_type(
					2,
					3,
					2
				)
			)
		)
	);

	std::cout
		<< "sub sub image (with pitch "
		<< sub_sub_view.pitch()
		<< ")\n";

	mizuiro::image::algorithm::print(
		std::cout,
		sub_sub_view
	);

	std::cout << '\n';

	{
		typedef std::reverse_iterator<
			view_type::iterator
		> reverse_iterator;

		for(
			reverse_iterator it(
				sub_sub_view.end()
			);
			it != reverse_iterator(sub_sub_view.begin());
			++it
		)
			std::cout << *it << ' ';
	}

	std::cout << '\n';
}
Esempio n. 9
0
void QtViewRenderer::initialize()
{
	QOpenGLFunctions* f = QOpenGLContext::currentContext()->functions();

	unsigned char* pixels;
	int width, height;
	m_atlas->getTexDataAsRGBA32(&pixels, &width, &height);
	QImage img(pixels, width, height, QImage::Format::Format_ARGB32);
	m_fontTexture = std::make_unique<QOpenGLTexture>(img);

	m_atlas->setTexID(m_fontTexture->textureId());

	const GLchar* vertex_shader =
		"#version 330\n"
		"uniform mat4 ProjMtx;\n"
		"layout (location = 0) in vec2 Position;\n"
		"layout (location = 1) in vec2 UV;\n"
		"layout (location = 2) in vec4 Color;\n"
		"out vec2 Frag_UV;\n"
		"out vec4 Frag_Color;\n"
		"void main()\n"
		"{\n"
		"	Frag_UV = UV;\n"
		"	Frag_Color = Color;\n"
		"	gl_Position = ProjMtx * vec4(Position.xy,0,1);\n"
		"}\n";

	const GLchar* fragment_shader =
		"#version 330\n"
		"uniform sampler2D Texture;\n"
		"in vec2 Frag_UV;\n"
		"in vec4 Frag_Color;\n"
		"out vec4 Out_Color;\n"
		"void main()\n"
		"{\n"
		"	Out_Color = Frag_Color * texture( Texture, Frag_UV.st);\n"
		"}\n";

	m_shader = std::make_unique<QOpenGLShaderProgram>();
	m_shader->addShaderFromSourceCode(QOpenGLShader::ShaderTypeBit::Vertex, vertex_shader);
	m_shader->addShaderFromSourceCode(QOpenGLShader::ShaderTypeBit::Fragment, fragment_shader);
	m_shader->link();

	m_locationTex = m_shader->uniformLocation("Texture");
	m_locationProjMtx = m_shader->uniformLocation("ProjMtx");

	m_VAO = std::make_unique<QOpenGLVertexArrayObject>();
	m_VAO->create();
	m_VAO->bind();

	m_VBO = std::make_unique<QOpenGLBuffer>(QOpenGLBuffer::VertexBuffer);
	m_VBO->create();
	m_VBO->setUsagePattern(QOpenGLBuffer::StreamDraw);
	m_VBO->bind();
	
	f->glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(DrawList::DrawVert), (GLvoid*)offsetof(DrawList::DrawVert, pos));
	f->glEnableVertexAttribArray(0);

	f->glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(DrawList::DrawVert), (GLvoid*)offsetof(DrawList::DrawVert, uv));
	f->glEnableVertexAttribArray(1);

	f->glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(DrawList::DrawVert), (GLvoid*)offsetof(DrawList::DrawVert, col));
	f->glEnableVertexAttribArray(2);
	
	m_VBO->release();
	
	m_EBO = std::make_unique<QOpenGLBuffer>(QOpenGLBuffer::IndexBuffer);
	m_EBO->create();
	m_EBO->setUsagePattern(QOpenGLBuffer::StreamDraw);
	
	m_VAO->release();
}
Esempio n. 10
0
/**
 * This demonstrates homomorphic operations being done
 * on images. An input image is encrypted pixelwise and
 * homomorphically. Then a homomorphic transformation 
 * is done on it. In this case we convert the RGB values 
 * of the pixels of the image to HSV (Hue, Saturation, Value).
 * Then we rotate the hue by a constant amount, morphing
 * the color of the image. The result is then decrypted
 * and displayed next to the original.
 *
 * Due to the constraints of homomorphic computation the
 * image is scaled down to be of size 100 by 100 pixels.
 * Then we perform batch homomorphic computation on a 
 * vector of pixels of size ~10,000.
 */
int main(int argc, char * argv[]) {
  if (argc < 2) {
    std::cerr << "Usage: " << argv[0] << " ImageFile" << std::endl;
    return 1;
  }

  // Our image input
  cimg_library::CImg<unsigned char> img(argv[1]);

  clock_t start, end;

  start = clock();

  // Generate parameters for the YASHE protocol
  // and create environment.
  YASHE SHE = YASHE::readFromFile("resources/8BitFHE");
  //long t = 257;
  //NTL::ZZ q = NTL::GenPrime_ZZ(400);
  //long d = 22016; // 2^9*43 - 5376 factors
  //long sigma = 8;
  //NTL::ZZ w = NTL::power2_ZZ(70);
  //YASHE SHE(t,q,d,sigma,w);

  end = clock();
  std::cout << "Reading parameters completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;

  std::cout << SHE.getNumFactors() << " factors." << std::endl;

  // Resize the image so we can pack it into a single
  // cipher text
  if (img.width() * img.height() > SHE.getNumFactors()) {
    double scalingFactor = SHE.getNumFactors()/double(img.width() * img.height());
    scalingFactor = sqrt(scalingFactor);
    long newWidth = img.width() * scalingFactor;
    long newHeight = img.height() * scalingFactor;
    img.resize(newWidth,newHeight, 1, 3, 5);
  }

  // Define a color space of 256 colors
  cimg_library::CImg<unsigned char> colorMap =
    cimg_library::CImg<unsigned char>::default_LUT256();

  // Convert the image into a list of integers
  // each integer represents a pixel defined
  // by the color mapping above. 
  std::vector<long> message;
  ImageFunctions::imageToLongs(message, img, colorMap);

  // In order for the output to reflect the
  // input we change img to be exactly the
  // image we are encrypting - quantized
  // to 256 colors.
  ImageFunctions::longsToImage(message, img, colorMap);

  // Resize the message so that it fills the entire
  // message space (the end of it will be junk)
  message.resize(SHE.getNumFactors());

  // Define a function on pixels.
  // This function takes a pixel value (0 - 255)
  // and returns another pixel value representing
  // the inversion of that pixel value.
  std::function<long(long)> invertColors = [colorMap](long input) {
    unsigned char r, g, b;
    double h, s, v;
    ImageFunctions::longToRGB(input, r, g, b, colorMap);
    ImageFunctions::RGBtoHSV(r, g, b, &h, &s, &v);

    // rotate hue by 30 degrees
    h = fmod(h - 75, 360);
    //s = pow(s, 4.);

    ImageFunctions::HSVtoRGB(&r, &g, &b, h, s, v);
    ImageFunctions::RGBToLong(input, r, g, b, colorMap);

    return input;
  };
  // The function is converted into
  // a polynomial of degree t = 257
  std::vector<long> poly = Functions::functionToPoly(invertColors, 257);

  start = clock();

  // Generate public, secret and evaluation keys
  NTL::ZZ_pX secretKey = SHE.keyGen();

  end = clock();
  std::cout << "Key generation completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;
  start = clock();

  // encrypt the message
  YASHE_CT ciphertext = SHE.encryptBatch(message);

  end = clock();
  std::cout << "Encryption completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;
  start = clock();

  // evaluate the polynomial
  YASHE_CT::evalPoly(ciphertext, ciphertext, poly);

  end = clock();
  std::cout << "Polynomial evaluation completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;
  start = clock();

  // decrypt the message
  std::vector<long> decryption = SHE.decryptBatch(ciphertext, secretKey);

  end = clock();
  std::cout << "Decryption completed in "
            << double(end - start)/CLOCKS_PER_SEC
            << " seconds"
            << std::endl;

  // turn the message back into an image
  cimg_library::CImg<unsigned char> outputImg(img.width(), img.height(), 1, 3);
  ImageFunctions::longsToImage(decryption, outputImg, colorMap);

  // Display the input next to the output!
  (img, outputImg).display("result!",false);

  return 0;
}
Esempio n. 11
0
int mri(
		float* img, 
		float complex* f, 
		float* mask, 
		float lambda,
		int N1,
		int N2)
{
	int i, j;

	float complex* f0	    = (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dx	    = (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dy	    = (float complex*) calloc(N1*N2,sizeof(float complex));

	float complex* dx_new   = (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dy_new   = (float complex*) calloc(N1*N2,sizeof(float complex));

	float complex* dtildex	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* dtildey	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* u_fft2	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* u		= (float complex*) calloc(N1*N2,sizeof(float complex));

	float complex* fftmul	= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* Lap		= (float complex*) calloc(N1*N2,sizeof(float complex));
	float complex* diff		= (float complex*) calloc(N1*N2,sizeof(float complex));

	float sum = 0;
	float scale		 = sqrtf(N1*N2);
	Lap(N1-1, N2-1)	= 0.f;
	Lap(N1-1, 0)	= 1.f; 
	Lap(N1-1, 1)	= 0.f;
	Lap(0, N2-1)	= 1.f;
	Lap(0, 0)		= -4.f; 
	Lap(0, 1)		= 1.f;
	Lap(1, N2-1)	= 0.f;
	Lap(1, 0)		= 1.f; 
	Lap(1, 1)		= 0.f;

	float complex *w1;
	float complex *w2;
	float complex *buff;
	double lambdaGamma1 = lambda/Gamma1;
	double lambdaGamma1Scale = lambda/Gamma1*scale;
	float Thresh=1.0/Gamma1;
	
	MPI_Datatype mpi_complexf;
	MPI_Type_contiguous(2, MPI_FLOAT, &mpi_complexf);
	MPI_Type_commit(&mpi_complexf);
	
	int np, rank;
    MPI_Comm_size(MPI_COMM_WORLD ,&np);
    MPI_Comm_rank(MPI_COMM_WORLD ,&rank);
    int chunksize = N1/np;
    int start = rank * chunksize;
    int cnt[np];
	int disp[np];
	for (i = 0 ; i < np - 1; i ++) {
		cnt[i] = chunksize * N2 ;
		disp[i] = chunksize * i * N2;
	}
	cnt[i] = (chunksize + N1%np) * N2;
	disp[i] = chunksize * i * N2;

    if (rank == np - 1)
    	chunksize += N1%np;
    int end = start + chunksize;


	for(i=start; i<chunksize; i++)
		for(j=0; j<N2; j++)
			sum += (SQR(crealf(f(i,j))/N1) + SQR(cimagf(f(i,j))/N1));
	MPI_Allreduce(&sum, &sum, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);		
	float normFactor = 1.f/sqrtf(sum);
	

	for(i=0; i<N1; i++) {
		for(j=0; j<N2; j++) {
			f(i, j)  = f(i, j)*normFactor;
			f0(i, j) = f(i, j);
		}
	}
	

	dft_init(&w1, &w2, &buff, N1, N2);
	dft(Lap, Lap, w1, w2, buff, N1, N2, start, end, cnt, disp, mpi_complexf, rank);
	MPI_Status *status;

	for(i=start;i<end;i++)
		for(j=0;j<N2;j++)					
			fftmul(i,j) = 1.0/((lambda/Gamma1)*mask(i,j) - Lap(i,j) + Gamma2);
			
	int OuterIter,iter;
	for(OuterIter= 0; OuterIter<MaxOutIter; OuterIter++) {
		for(iter = 0; iter<MaxIter; iter++) {
			
			for(i=start;i<end;i++)	
				for(j=0;j<N2;j++)
					diff(i,j)  = dtildex(i,j)-dtildex(i,(j-1)>=0?(j-1):0) + dtildey(i,j)- dtildey((i-1)>=0?(i-1):0,j) ;
			
			dft(diff, diff, w1, w2, buff, N1, N2, start, end, cnt, disp, mpi_complexf, rank);

			
			for(i=start;i<end;i++) {
				for(j=0;j<N2;j++) {
					u_fft2(i,j) = fftmul(i,j)*(f(i,j)*lambdaGamma1Scale-diff(i,j)+Gamma2*u_fft2(i,j)) ;
					if (iter == MaxIter - 1)
						f(i,j) += f0(i,j) - mask(i,j)*u_fft2(i,j)/scale; 
				}
			}
			
			idft(u, u_fft2, w1, w2, buff, N1, N2, start, end, cnt, disp, mpi_complexf, rank);
			//MPI_Allgatherv(u + disp[rank], cnt[rank], mpi_complexf, u, cnt, disp, mpi_complexf, MPI_COMM_WORLD);
			if (rank == np - 1)
				MPI_Send(u + disp[rank], N2, mpi_complexf, rank - 1, 0, MPI_COMM_WORLD);
			else if (rank == 0)
				MPI_Recv(u + disp[rank] + cnt[rank], N2, mpi_complexf, rank + 1, 0, MPI_COMM_WORLD, status);
			else {
				MPI_Recv(u + disp[rank] + cnt[rank], N2, mpi_complexf, rank + 1, 0, MPI_COMM_WORLD, status);
				MPI_Send(u + disp[rank], N2, mpi_complexf, rank - 1, 0, MPI_COMM_WORLD);
			}
			
			for(i=start;i<end;i++) {
				for(j=0;j<N2;j++) {
					float tmp;
					dx(i,j)     = u(i,j<(N2-1)?(j+1):j)-u(i,j)+dx(i,j)-dtildex(i,j) ;
					dy(i,j)     = u(i<(N1-1)?(i+1):i,j)-u(i,j)+dy(i,j)-dtildey(i,j) ;

					tmp = sqrtf(SQR(crealf(dx(i,j)))+SQR(cimagf(dx(i,j))) + SQR(crealf(dy(i,j)))+SQR(cimagf(dy(i,j))));
					tmp = max(0,tmp-Thresh)/(tmp+(tmp<Thresh));
					dx_new(i,j) =dx(i,j)*tmp;
					dy_new(i,j) =dy(i,j)*tmp;
					dtildex(i,j) = 2*dx_new(i,j) - dx(i,j);
					dtildey(i,j) = 2*dy_new(i,j) - dy(i,j);
					dx(i,j)      = dx_new(i,j);
					dy(i,j)      = dy_new(i,j);
				}
			}
			//MPI_Allgatherv(dtildey + disp[rank], cnt[rank], mpi_complexf, dtildey, cnt, disp, mpi_complexf, MPI_COMM_WORLD);
			if (rank == np - 1)
				MPI_Recv(dtildey + disp[rank] - N2, N2, mpi_complexf, rank - 1, 0, MPI_COMM_WORLD, status);
			else if (rank == 0)
				MPI_Send(dtildey + disp[rank] + cnt[rank] - N2, N2, mpi_complexf, rank + 1, 0, MPI_COMM_WORLD);
			else {
				MPI_Recv(dtildey + disp[rank] - N2, N2, mpi_complexf, rank - 1, 0, MPI_COMM_WORLD, status);
				MPI_Send(dtildey + disp[rank] + cnt[rank] - N2, N2, mpi_complexf, rank + 1, 0, MPI_COMM_WORLD);
			}
            
		}
	}

	for(i=start; i<end; i++) {
		for(j=0; j<N2; j++) {
			img(i, j) = sqrt(SQR(crealf(u(i, j))) + SQR(cimagf(u(i, j))));
		}
	}
	MPI_Gatherv(img + disp[rank], cnt[rank], MPI_FLOAT, img, cnt, disp, MPI_FLOAT, 0, MPI_COMM_WORLD);
	free(w1);
	free(w2);
	free(buff);
	MPI_Finalize();
	if (rank > 0)
		exit(0);
	return 0;
}
Esempio n. 12
0
TypedImage LoadPng(std::istream& source)
{
#ifdef HAVE_PNG
    //so First, we validate our stream with the validate function I just mentioned
    if (!pango_png_validate(source)) {
        throw std::runtime_error("Not valid PNG header");
    }

    //set up initial png structs
    png_structp png_ptr = png_create_read_struct( PNG_LIBPNG_VER_STRING, (png_voidp)NULL, NULL, &PngWarningsCallback);
    if (!png_ptr) {
        throw std::runtime_error( "PNG Init error 1" );
    }

    png_infop info_ptr = png_create_info_struct(png_ptr);
    if (!info_ptr)  {
        png_destroy_read_struct(&png_ptr, (png_infopp)NULL, (png_infopp)NULL);
        throw std::runtime_error( "PNG Init error 2" );
    }

    png_infop end_info = png_create_info_struct(png_ptr);
    if (!end_info) {
        png_destroy_read_struct(&png_ptr, &info_ptr, (png_infopp)NULL);
        throw std::runtime_error( "PNG Init error 3" );
    }

    png_set_read_fn(png_ptr,(png_voidp)&source, pango_png_stream_read);

    png_set_sig_bytes(png_ptr, PNGSIGSIZE);

    // Setup transformation options
    if( png_get_bit_depth(png_ptr, info_ptr) == 1)  {
        //Unpack bools to bytes to ease loading.
        png_set_packing(png_ptr);
    } else if( png_get_bit_depth(png_ptr, info_ptr) < 8) {
        //Expand nonbool colour depths up to 8bpp
        png_set_expand_gray_1_2_4_to_8(png_ptr);
    }

    //Get rid of palette, by transforming it to RGB
    if(png_get_color_type(png_ptr, info_ptr) == PNG_COLOR_TYPE_PALETTE) {
        png_set_palette_to_rgb(png_ptr);
    }

    //read the file
    png_read_png(png_ptr, info_ptr, PNG_TRANSFORM_SWAP_ENDIAN, NULL);

    if( png_get_interlace_type(png_ptr,info_ptr) != PNG_INTERLACE_NONE) {
        throw std::runtime_error( "Interlace not yet supported" );
    }

    const size_t w = png_get_image_width(png_ptr,info_ptr);
    const size_t h = png_get_image_height(png_ptr,info_ptr);
    const size_t pitch = png_get_rowbytes(png_ptr, info_ptr);

    TypedImage img(w, h, PngFormat(png_ptr, info_ptr), pitch);

    png_bytepp rows = png_get_rows(png_ptr, info_ptr);
    for( unsigned int r = 0; r < h; r++) {
        memcpy( img.ptr + pitch*r, rows[r], pitch );
    }
    png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);

    return img;
#else
    PANGOLIN_UNUSED(source);
    throw std::runtime_error("Rebuild Pangolin for PNG support.");
#endif // HAVE_PNG
}
Esempio n. 13
0
MyApp::MyApp()
{
  // debug output

  Painter = new sPainter;

  // geometry 

  CubeGeo = new sGeometry();
  CubeGeo->Init(sGF_TRILIST|sGF_INDEX16,sVertexFormatStandard);

  // load vertices and indices

  sVertexStandard *vp=0;

  CubeGeo->BeginLoadVB(24,sGD_STATIC,&vp);
  vp->Init(-1, 1,-1,  0, 0,-1, 1,0); vp++; // 3
  vp->Init( 1, 1,-1,  0, 0,-1, 1,1); vp++; // 2
  vp->Init( 1,-1,-1,  0, 0,-1, 0,1); vp++; // 1
  vp->Init(-1,-1,-1,  0, 0,-1, 0,0); vp++; // 0

  vp->Init(-1,-1, 1,  0, 0, 1, 1,0); vp++; // 4
  vp->Init( 1,-1, 1,  0, 0, 1, 1,1); vp++; // 5
  vp->Init( 1, 1, 1,  0, 0, 1, 0,1); vp++; // 6
  vp->Init(-1, 1, 1,  0, 0, 1, 0,0); vp++; // 7

  vp->Init(-1,-1,-1,  0,-1, 0, 1,0); vp++; // 0
  vp->Init( 1,-1,-1,  0,-1, 0, 1,1); vp++; // 1
  vp->Init( 1,-1, 1,  0,-1, 0, 0,1); vp++; // 5
  vp->Init(-1,-1, 1,  0,-1, 0, 0,0); vp++; // 4

  vp->Init( 1,-1,-1,  1, 0, 0, 1,0); vp++; // 1
  vp->Init( 1, 1,-1,  1, 0, 0, 1,1); vp++; // 2
  vp->Init( 1, 1, 1,  1, 0, 0, 0,1); vp++; // 6
  vp->Init( 1,-1, 1,  1, 0, 0, 0,0); vp++; // 5

  vp->Init( 1, 1,-1,  0, 1, 0, 1,0); vp++; // 2
  vp->Init(-1, 1,-1,  0, 1, 0, 1,1); vp++; // 3
  vp->Init(-1, 1, 1,  0, 1, 0, 0,1); vp++; // 7
  vp->Init( 1, 1, 1,  0, 1, 0, 0,0); vp++; // 6

  vp->Init(-1, 1,-1, -1, 0, 0, 1,0); vp++; // 3
  vp->Init(-1,-1,-1, -1, 0, 0, 1,1); vp++; // 0
  vp->Init(-1,-1, 1, -1, 0, 0, 0,1); vp++; // 4
  vp->Init(-1, 1, 1, -1, 0, 0, 0,0); vp++; // 7
  CubeGeo->EndLoadVB();

  sU16 *ip=0;
  CubeGeo->BeginLoadIB(6*6,sGD_STATIC,&ip);
  sQuad(ip,0, 0, 1, 2, 3);
  sQuad(ip,0, 4, 5, 6, 7);
  sQuad(ip,0, 8, 9,10,11);
  sQuad(ip,0,12,13,14,15);
  sQuad(ip,0,16,17,18,19);
  sQuad(ip,0,20,21,22,23);
  CubeGeo->EndLoadIB();

  // texture

  sImage img(64,64);
  img.Checker(0xffff8080,0xff80ff80,8,8);
  img.Glow(0.5f,0.5f,0.25f,0.25f,0xffffffff,1.0f,4.0f);
  Tex = sLoadTexture2D(&img,sTEX_2D|sTEX_ARGB8888);

  // material

  CubeMtrl = new MrtRender;
  CubeMtrl->Flags = sMTRL_ZON | sMTRL_CULLON;
  CubeMtrl->Flags |= sMTRL_LIGHTING;
  CubeMtrl->Texture[0] = Tex;
  CubeMtrl->TFlags[0] = sMTF_LEVEL2|sMTF_CLAMP;
  CubeMtrl->Prepare(CubeGeo->GetFormat());

  // light

  sClear(Env);
  Env.AmbientColor  = 0x00202020;
  Env.LightColor[0] = 0x00ffffff;
  Env.LightDir[0].Init(1,-1,1);
  Env.LightDir[0].Unit();
  Env.Fix();

  // rendertargets

  sInt xs,ys;
  sGetScreenSize(xs,ys);

  for(sInt i=0;i<3;i++)
    RT[i] = new sTexture2D(xs,ys,sTEX_2D|sTEX_ARGB16F|sTEX_RENDERTARGET,0);
  sEnlargeZBufferRT(xs,ys);

  // blit

  BlitMtrl = new MrtBlit;
  BlitMtrl->Texture[0] = RT[0];
  BlitMtrl->Texture[1] = RT[1];
  BlitMtrl->Prepare(sVertexFormatSingle);

  sVertexSingle *vp2;
  BlitGeo = new sGeometry(sGF_QUADLIST,sVertexFormatSingle);
  BlitGeo->BeginLoadVB(4,sGD_STATIC,&vp2);
  sF32 fx = 0.5f/xs;
  sF32 fy = 0.5f/ys;
  vp2[0].Init(-1+fx,-1+fy,0,0xffffffff,0,0);
  vp2[1].Init(-1+fx, 1+fy,0,0xffffffff,0,1);
  vp2[2].Init( 1+fx, 1+fy,0,0xffffffff,1,1);
  vp2[3].Init( 1+fx,-1+fy,0,0xffffffff,1,0);
  BlitGeo->EndLoadVB();
}
Esempio n. 14
0
QT_BEGIN_NAMESPACE

QPixmap QPixmap::grabWindow(WId window, int x, int y, int w, int h)
{
    QWidget *widget = QWidget::find(window);
    if (!widget)
        return QPixmap();

    QRect grabRect = widget->frameGeometry();
    if (!widget->isWindow())
        grabRect.translate(widget->parentWidget()->mapToGlobal(QPoint()));
    if (w < 0)
        w = widget->width() - x;
    if (h < 0)
        h = widget->height() - y;
    grabRect &= QRect(x, y, w, h).translated(widget->mapToGlobal(QPoint()));

    QScreen *screen = qt_screen;
    QDesktopWidget *desktop = QApplication::desktop();
    if (!desktop)
        return QPixmap();
    if (desktop->numScreens() > 1) {
        const int screenNo = desktop->screenNumber(widget);
        if (screenNo != -1)
            screen = qt_screen->subScreens().at(screenNo);
        grabRect = grabRect.translated(-screen->region().boundingRect().topLeft());
    }

    if (screen->pixelFormat() == QImage::Format_Invalid) {
        qWarning("QPixmap::grabWindow(): Unable to copy pixels from framebuffer");
        return QPixmap();
    }

    if (screen->isTransformed()) {
        const QSize screenSize(screen->width(), screen->height());
        grabRect = screen->mapToDevice(grabRect, screenSize);
    }

    QWSDisplay::grab(false);
    QPixmap pixmap;
    QImage img(screen->base(),
               screen->deviceWidth(), screen->deviceHeight(),
               screen->linestep(), screen->pixelFormat());
    img = img.copy(grabRect);
    QWSDisplay::ungrab();

    if (screen->isTransformed()) {
        QMatrix matrix;
        switch (screen->transformOrientation()) {
        case 1: matrix.rotate(90); break;
        case 2: matrix.rotate(180); break;
        case 3: matrix.rotate(270); break;
        default: break;
        }
        img = img.transformed(matrix);
    }

    if (screen->pixelType() == QScreen::BGRPixel)
        img = img.rgbSwapped();

    return QPixmap::fromImage(img);
}
Esempio n. 15
0
wxBitmap wxRibbonToolBar::MakeDisabledBitmap(const wxBitmap& original)
{
    wxImage img(original.ConvertToImage());
    return wxBitmap(img.ConvertToGreyscale());
}
Esempio n. 16
0
void PathEvaluator::test_filtering()
{
	/// Export results:
	QString sessionName = QString("session_%1").arg(QDateTime::currentDateTime().toString("dd.MM.yyyy_hh.mm.ss"));
	QString path = "results/" + sessionName;
	QDir d("");	d.mkpath( path ); d.mkpath( path + "/images" );

	int timeLimit = 5000; // ms
	int numInBetweens = 8;
	int numSamplesPerPath = numInBetweens * 3;

	/// Get number of paths to evaluate:
	int numPaths = 0;
	{
		Scheduler defaultSchedule( *b->m_scheduler );

		// Find time it takes for a single path
		QElapsedTimer timer; timer.start();
		{
			defaultSchedule.setSchedule( b->m_scheduler->getSchedule() ); // default
			defaultSchedule.timeStep = 1.0 / numSamplesPerPath;
			defaultSchedule.executeAll();
		}

		// FIXME
		// numPaths = qMax(1.0, double(timeLimit) / timer.elapsed() * omp_get_num_threads());
		numPaths = qMax(1.0, double(timeLimit) / timer.elapsed() * 4);

		// Export source and target images
		QColor inputColor(255,255,255);
		b->renderer->quickRender(defaultSchedule.allGraphs.front(), inputColor).save(path + "/images/source.png");
		b->renderer->quickRender(defaultSchedule.allGraphs.back(), inputColor).save(path + "/images/target.png");
	}

	// Force number of paths
	numPaths = 30;

	// Do this once for input graphs
	QVector<Structure::Graph*> inputGraphs;
	inputGraphs << b->s->inputGraphs[0]->g << b->s->inputGraphs[1]->g;

	ScorerManager r_manager(b->m_gcorr, b->m_scheduler.data(), inputGraphs);
	r_manager.parseConstraintPair();
	r_manager.parseConstraintGroup();
	r_manager.parseGlobalReflectionSymm();

	QVector<ScorerManager::PathScore> ps( numPaths );
	MatrixXd allRanges( numPaths, 3 * 4 );

	QVector<ScheduleType> allPaths = b->m_scheduler->manyRandomSchedules( numPaths );

	QElapsedTimer evalTimer; evalTimer.start();

	//#pragma omp parallel for
	for(int i = 0; i < allPaths.size(); i++)
	{
		// Setup schedule
		Scheduler s( *b->m_scheduler );
		s.setSchedule( allPaths[i] );
		s.timeStep = 1.0 / numSamplesPerPath;

		// Execute blend
		s.executeAll();

		// Compute its score
		ps[i] = r_manager.pathScore( s.allGraphs );

		QVector<QColor> colors;
		colors.push_back(QColor(255,0,0));
		colors.push_back(QColor(0,255,0));
		colors.push_back(QColor(0,0,255));

		// Range of values
		MatrixXd ranges = ps[i].computeRange();

		allRanges.row(i) = VectorXd::Map(ranges.data(), ranges.rows()*ranges.cols());

		//#pragma omp critical
		{
			QImage img(QSize(600,130), QImage::Format_ARGB32);
			img.fill(qRgba(0,0,0,0));

			QPainter painter(&img);
			painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform);

			// Text option
			QFont font("Monospace",8);
			font.setStyleHint(QFont::TypeWriter);
			painter.setFont(font);

			QVector<Structure::Graph*> inBetweens = s.topoVaryingInBetweens( numInBetweens );

			QVector< QVector<double> > scores(numInBetweens);

			int imgWidth = 0;

			for(int k = 0; k < numInBetweens; k++)
			{
				inBetweens[k]->moveCenterTo( AlphaBlend(inBetweens[k]->property["t"].toDouble(), 
					inBetweens[k]->property["sourceGraphCenter"].value<Vector3>(), 
					inBetweens[k]->property["targetGraphCenter"].value<Vector3>()), true);

				// Draw images
				QImage inBetween = b->renderer->quickRender(inBetweens[k], Qt::white);
				imgWidth = inBetween.width();

				// Rendered shape
				int newWidth = inBetween.width() * 0.5;
				int startX = k * newWidth;
				painter.drawImage(startX, 0, inBetween);

				// Store scores
				int idx = inBetweens[k]->property["graphIndex"].toInt();
				QVector<double> vals;
				vals << ps[i].connectivity[idx] << ps[i].localSymmetry[idx] << ps[i].globalSymmetry[idx];

				scores[k] = vals;
			}
			
			// Draw score lines
			for(int u = 0; u < scores.front().size(); u++)
			{
				// Graph
				QPainterPath poly;
				int padding = 0;

				QColor clr = colors[u];
				painter.setPen(QPen(clr, 1));

				// Render path
				for(int k = 0; k < numInBetweens; k++)
				{
					// Score graph
					//double minVal = ranges(u,0);
					//double range = ranges(u,2);

					//double val = (scores[k][u] - minVal) / range;
					double val = scores[k][u];

					// Graph line
					int newWidth = imgWidth * 0.5;
					int startX = k * newWidth;
					int x = startX + newWidth;
					int y = padding + (img.height() - (img.height() * val));

					if(k == 0)
						poly.moveTo(x, y);
					else
						poly.lineTo(x, y);

					// Dots
					painter.drawEllipse(QPoint(x,y), 2, 2);
				}

				painter.setBrush(Qt::NoBrush);
				painter.drawPath(poly);
			}

			// Draw ranges
			QStringList vals;
			for(int r = 0; r < 3; r++){
				QStringList curVals;
				for(int c = 0; c < 4; c++)
					curVals << QString::number(ranges(r,c),'f',2);
				vals << curVals.join("  ");
			}

			if( false )
			{
				painter.setPen(Qt::white);
				painter.drawText(11, img.height() - 9, vals.join("   #   "));

				painter.setPen(Qt::black);
				painter.drawText(10, img.height() - 10, vals.join("   #   "));
			}

			img.save( path + QString("/images/%1.png").arg(i) );
		}
	}

	// Get global average for the measures
	Vector3d globalAvg (allRanges.col(9).mean(), allRanges.col(10).mean(), allRanges.col(11).mean());

	// Sort based on score
	QMap<int,double> scoreMap;
	for(int i = 0; i < numPaths; i++) scoreMap[i] = ps[i].score();

	QVector<int> sortedIndices;
	typedef QPair<double,int> ValIdx;
	foreach(ValIdx d, sortQMapByValue( scoreMap )){
		sortedIndices.push_back( d.second );
	}
void Astar::paintImage() {
  int width  = 15;
  int height = 19;
  int s = 19;
  int r = s / 2;
  int o = r + 1;
  cv::Scalar white(255, 255, 255);
  cv::Scalar grey(200, 200, 200);
  cv::Scalar blue(200, 0, 0);
  cv::Scalar purple(200, 0, 200);
  cv::Scalar green(0, 200, 0);
  cv::Scalar red(0, 0, 200);
  cv::Scalar yellow(0, 255, 255);
  cv::Mat img(s*width, s*height, CV_8UC3, white);
  std::vector<Node> allNodes;
  for (std::set<Node>::iterator it = closedList.begin(); it != closedList.end(); it++) {
    allNodes.push_back(*it);
  }
  for (std::set<Node>::iterator it = openList.begin(); it != openList.end(); it++) {
    allNodes.push_back(*it);
  }
  for (std::set<Node>::iterator it = obstacles.begin(); it != obstacles.end(); it++) {
    allNodes.push_back(*it);
  }
  allNodes.push_back(this->goalNode);
  allNodes.push_back(this->startingNode);

  Node par;
  for (std::vector<Node>::iterator it = allNodes.begin(); it != allNodes.end(); it++) {
    Node cur = *it;
    int x = cur.getX();
    int y = cur.getY();
    switch(cur.getPaint()) {
      case OPEN:      cv::circle(img, cv::Point(s*x+o, s*y+o), r, grey, -1);
                      break;
      case CLOSED:    cv::circle(img, cv::Point(s*x+o, s*y+o), r, purple, -1);
                      break;
      case START:     cv::circle(img, cv::Point(s*x+o, s*y+o), r, red, -1);
                      break;
      case PATH:      par = cur.getParent();
                      cv::circle(img, cv::Point(s*x+o, s*y+o), r/2, yellow, -1);
                      break;
      case GOAL:      cv::circle(img, cv::Point(s*x+o, s*y+o), r, blue, -1);
                      break;
      case OBSTACLE:  cv::circle(img, cv::Point(s*x+o, s*y+o), r, green, -1);
                      break;
    }
  }
  for (std::vector<Node>::iterator it = path.begin(); it != path.end(); it++) {
    Node cur = *it;
    int x = cur.getX();
    int y = cur.getY();
    cv::circle(img, cv::Point(s*x+o, s*y+o), r/2, yellow, -1);
  }
  std::set<Node>::iterator it = std::min_element(obstacles.begin(), obstacles.end(), Node::CompareNode());
  Node currentNode = *it;
  std::stringstream title, filepath;
  title << "i=" << (currentNode.getX()-3);
  cv::namedWindow(title.str());
  cv::imshow(title.str(), img);
  filepath << "./img/" << "ue8_t2-" << ((currentNode.getX()-3)+2) << ".png";
  cv::imwrite(filepath.str(), img);
  cv::waitKey(0);
}
Esempio n. 18
0
// ######################################################################
// get gist histogram to visualize the data
Image<float> GistEstimatorGen::getGistImage(int sqSize,
                                            float minO, float maxO,
                                            float minC, float maxC,
                                            float minI, float maxI)
{
  // square size
  int s = sqSize;
  Image<float> img(NUM_GIST_COL * s, NUM_GIST_FEAT * s, ZEROS);
  float range;

  // setup range for orientation channel if necessary
  if(maxO == minO)
    {
      minO = itsGistVector.getVal(0);
      maxO = itsGistVector.getVal(0);
      for(int i = 0; i < 16; i++)
        for(int j = 0; j < NUM_GIST_FEAT; j++)
          {
            float val = itsGistVector.getVal(i*NUM_GIST_FEAT+j);
            if(val < minO)
              minO = val;
            else if(val > maxO)
              maxO = val;
          }
      LDEBUG("Orientation Channel Min: %f, max: %f", minO, maxO);
    }
  range = maxO - minO;

  // orientation channel
  for(int a = 0; a < 4; a++)
    for(int b = 0; b < 4; b++)
      for(int j = 0; j < NUM_GIST_FEAT; j++)
        {
          int i  = b*4 + a;
          int ii = a*4 + b;
          float val = itsGistVector.getVal(ii*NUM_GIST_FEAT+j);
          //float val = log(itsGistVector.getVal(i*NUM_GIST_FEAT+j)+1);
          //val = val * val;
          drawPatch(img, Point2D<int>(i*s+s/2,j*s+s/2),s/2,(val-minO)/range);
          //LINFO("val[%d]: %f",j,val);
        }

  // setup range for color channel if necessary
  if(maxC == minC)
    {
      minC = itsGistVector.getVal(16*NUM_GIST_FEAT);
      maxC = itsGistVector.getVal(16*NUM_GIST_FEAT);
      for(int i = 16; i < 28; i++)
        for(int j = 0; j < NUM_GIST_FEAT; j++)
          {
            float val = itsGistVector.getVal(i*NUM_GIST_FEAT+j);
            if(val < minC)
              minC = val;
            else if(val > maxC)
              maxC = val;
          }
      LDEBUG("Color Channel Min: %f, max: %f", minC, maxC);
    }
  range = maxC - minC;

  // color channel
  for(int i = 16; i < 28; i++)
    for(int j = 0; j < NUM_GIST_FEAT; j++)
      {
        float val = itsGistVector.getVal(i*NUM_GIST_FEAT+j);
        //float val = log(itsGistVector.getVal(i*NUM_GIST_FEAT+j)+1);
        //val = val * val;
        drawPatch(img, Point2D<int>(i*s+s/2,j*s+s/2),s/2,(val-minC)/range);
        //LINFO("val[%d]: %f",j,val);
      }

  // setup range for intensity channel if necessary
  if(maxI == minI)
    {
      minI = itsGistVector.getVal(28*NUM_GIST_FEAT);
      maxI = itsGistVector.getVal(28*NUM_GIST_FEAT);
      for(int i = 28; i < 34; i++)
        for(int j = 0; j < NUM_GIST_FEAT; j++)
          {
            float val = itsGistVector.getVal(i*NUM_GIST_FEAT+j);
            if(val < minI)
              minI = val;
            else if(val > maxI)
              maxI = val;
          }
      LDEBUG("Intensity Channel Min: %f, max: %f", minI, maxI);
    }
  range = maxI - minI;

  // intensity channel
  for(int i = 28; i < NUM_GIST_COL; i++)
    for(int j = 0; j < NUM_GIST_FEAT; j++)
      {
        float val = itsGistVector.getVal(i*NUM_GIST_FEAT+j);
        //float val = log(itsGistVector.getVal(i*NUM_GIST_FEAT+j)+1);
        //val = val * val;
        drawPatch(img, Point2D<int>(i*s+s/2,j*s+s/2),s/2,(val-minI)/range);
        //LINFO("val[%d]: %f",j,val);
      }

  // draw the delineation
  // spatially
  float max = 1.0f;
  drawLine(img, Point2D<int>(0,  1*s), Point2D<int>(NUM_GIST_COL*s,  1*s), max,   1);
  drawLine(img, Point2D<int>(0,  5*s), Point2D<int>(NUM_GIST_COL*s,  5*s), max,   1);
  drawLine(img, Point2D<int>(0,  9*s), Point2D<int>(NUM_GIST_COL*s,  9*s), max/2, 1);
  drawLine(img, Point2D<int>(0, 13*s), Point2D<int>(NUM_GIST_COL*s, 13*s), max/2, 1);
  drawLine(img, Point2D<int>(0, 17*s), Point2D<int>(NUM_GIST_COL*s, 17*s), max/2, 1);

  // channelwise
  drawLine(img, Point2D<int>( 4*s, 0), Point2D<int>( 4*s, NUM_GIST_FEAT*s), max, 1);
  drawLine(img, Point2D<int>( 8*s, 0), Point2D<int>( 8*s, NUM_GIST_FEAT*s), max, 1);
  drawLine(img, Point2D<int>(12*s, 0), Point2D<int>(12*s, NUM_GIST_FEAT*s), max, 1);
  drawLine(img, Point2D<int>(16*s, 0), Point2D<int>(16*s, NUM_GIST_FEAT*s), max, 1);
  drawLine(img, Point2D<int>(22*s, 0), Point2D<int>(22*s, NUM_GIST_FEAT*s), max, 1);
  drawLine(img, Point2D<int>(28*s, 0), Point2D<int>(28*s, NUM_GIST_FEAT*s), max, 1);

  return img;
}
Esempio n. 19
0
bool FontRenderer::append_bitmap(ushort symbol) {
    if (m_rendered.chars[symbol].locked) return false;
    const FT_GlyphSlot  slot = m_ft_face->glyph;
    const FT_Bitmap* bm = &(slot->bitmap);
    int w = bm->width;
    int h = bm->rows;
    QImage img(w,h,QImage::Format_ARGB32);
    img.fill(0x00ffffff);
    const uchar* src = bm->buffer;
    //QColor bg = m_config->bgColor();
    //QColor fg = m_config->fgColor();
    if (bm->pixel_mode==FT_PIXEL_MODE_GRAY) {
        for (int row=0;row<h;row++) {
            QRgb* dst = reinterpret_cast<QRgb*>(img.scanLine(row));
            for (int col=0;col<w;col++) {
                 {
                    uchar s = src[col];

                    *dst = qRgba(m_config->fontColor().red(),
                                 m_config->fontColor().green(),
                                 m_config->fontColor().blue(),
                                 s);
                }
                dst++;
            }
            src+=bm->pitch;
        }
    }else if (bm->pixel_mode==FT_PIXEL_MODE_MONO) {
        for (int row=0;row<h;row++) {
            QRgb* dst = reinterpret_cast<QRgb*>(img.scanLine(row));

            for (int col=0;col<w/8;col++) {
                uchar s = src[col];
                *dst++ = qRgba(255,255,255,(s&(1<<7))?255:0);
                *dst++ = qRgba(255,255,255,(s&(1<<6))?255:0);
                *dst++ = qRgba(255,255,255,(s&(1<<5))?255:0);
                *dst++ = qRgba(255,255,255,(s&(1<<4))?255:0);
                *dst++ = qRgba(255,255,255,(s&(1<<3))?255:0);
                *dst++ = qRgba(255,255,255,(s&(1<<2))?255:0);
                *dst++ = qRgba(255,255,255,(s&(1<<1))?255:0);
                *dst++ = qRgba(255,255,255,(s&(1<<0))?255:0);
            }
            {
                uchar s = src[w/8];
                int num = 7;
                switch (w%8) {
                case 7:  *dst++ = qRgba(255,255,255,(s&(1<<(num--)))?255:0);
                case 6:  *dst++ = qRgba(255,255,255,(s&(1<<(num--)))?255:0);
                case 5:  *dst++ = qRgba(255,255,255,(s&(1<<(num--)))?255:0);
                case 4:  *dst++ = qRgba(255,255,255,(s&(1<<(num--)))?255:0);
                case 3:  *dst++ = qRgba(255,255,255,(s&(1<<(num--)))?255:0);
                case 2:  *dst++ = qRgba(255,255,255,(s&(1<<(num--)))?255:0);
                case 1:  *dst++ = qRgba(255,255,255,(s&(1<<(num--)))?255:0);
                case 0:
                    break;
                }
            }

            src+=bm->pitch;
        }
    }

//    for (int row=0;row<h;row++) {
//        QRgb* dst = reinterpret_cast<QRgb*>(img.scanLine(row));

//        for (int col=0;col<w;col++) {
//            uchar s = src[col];
//            //*dst = qRgba(0xff,0xff,0xff, s);
//            *dst = qRgba(0x00,0xff,0xff, s);
//            dst++;
//        }

//        src+=bm->pitch;
//    }

    m_rendered.chars[symbol]=RenderedChar(symbol,slot->bitmap_left,slot->bitmap_top,slot->advance.x/64,img);
    m_chars.push_back(LayoutChar(symbol,slot->bitmap_left,-slot->bitmap_top,w,h));

    return true;
}
Esempio n. 20
0
/* compute the CSS image */
vector<int> ComputeCSSImageMaximas(const vector<double>& contourx_, const vector<double>& contoury_,
	vector<double>& contourx, vector<double>& contoury,
	bool isClosedCurve
	)
{
	ResampleCurve(contourx_, contoury_, contourx, contoury, 200, !isClosedCurve);
	vector<Point2d> pl; PolyLineMerge(pl, contourx, contoury);

	map<int, double> maximas;

	Mat_<Vec3b> img(500, 200, Vec3b(0, 0, 0)), contourimg(350, 350, Vec3b(0, 0, 0));
	bool done = false;
	//#pragma omp parallel for
	for (int i = 0; i<490; i++)
	{
		if (!done)
		{
			double sigma = 1.0 + ((double)i)*0.1;
			vector<double> kappa, smoothx, smoothy;
			ComputeCurveCSS(contourx, contoury, kappa, smoothx, smoothy, sigma);

			//			vector<vector<Point> > contours(1);
			//			PolyLineMerge(contours[0], smoothx, smoothy);
			//			contourimg = Vec3b(0,0,0);
			//			drawContours(contourimg, contours, 0, Scalar(255,255,255), CV_FILLED);

			vector<int> crossings = FindCSSInterestPointsZero(kappa);
			if (crossings.size() > 0)
			{
				for (int c = 0; c<crossings.size(); c++)
				{
					img(i, crossings[c]) = Vec3b(0, 255, 0);
					//					circle(contourimg, contours[0][crossings[c]], 5, Scalar(0,0,255), CV_FILLED);

					if (c < crossings.size() - 1) {
						if (fabs((float)crossings[c] - crossings[c + 1]) < 5.0)//fabs计算绝对值
						{
							//this is a maxima
							int idx = (crossings[c] + crossings[c + 1]) / 2;
							//#pragma omp critical
							maximas[idx] = (maximas[idx] < sigma) ? sigma : maximas[idx];

							circle(img, Point(idx, i), 3, Scalar(0, 0, 255), CV_FILLED);
						}
					}
				}
				//				char buf[128]; sprintf(buf, "evolution_%05d.png", i);
				//				imwrite(buf, contourimg);
				//				imshow("evolution", contourimg);
				//				waitKey(30);
			}
			else
			{
				done = true;
			}

		}
	}

	//find largest sigma
	double max_sigma = 0.0;
	for (map<int, double>::iterator itr = maximas.begin(); itr != maximas.end(); ++itr)
	{
		if (max_sigma < (*itr).second)
		{
			max_sigma = (*itr).second;
		}
	}
	//get segments with largest sigma
	vector<int> maximasv;
	for (map<int, double>::iterator itr = maximas.begin(); itr != maximas.end(); ++itr)
	{
		if ((*itr).second > max_sigma / 8.0)
		{
			maximasv.push_back((*itr).first);
		}
	}
	//eliminate degenerate segments (of very small length)
	vector<int> maximasvv = EliminateCloseMaximas(maximasv, maximas);	//1st pass
	maximasvv = EliminateCloseMaximas(maximasvv, maximas);				//2nd pass
	maximasv = maximasvv;
	for (vector<int>::iterator itr = maximasv.begin(); itr != maximasv.end(); ++itr) {
		cout << *itr << " - " << maximas[*itr] << endl;
	}
	//	Mat zoom; resize(img,zoom,Size(img.rows*2,img.cols*2));
	imshow("css image", img);
	//waitKey();
	return maximasv;
}
Esempio n. 21
0
void wxTabNavigatorWindow::Create(wxWindow* parent)
{
	long style = 0;
	if(  !wxDialog::Create(parent, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, style) )
		return;

	wxBoxSizer *sz = new wxBoxSizer( wxVERTICAL );
	SetSizer( sz );

	long flags = wxLB_SINGLE | wxNO_BORDER ;
	m_listBox = new wxListBox(this, wxID_ANY, wxDefaultPosition, wxSize(200, 150), 0, NULL, flags);

	static int panelHeight = 0;
	if( panelHeight == 0 )
	{
		wxMemoryDC mem_dc;

		// bitmap must be set before it can be used for anything
		wxBitmap bmp(10, 10);
		mem_dc.SelectObject(bmp);

		wxFont font(wxSystemSettings::GetFont(wxSYS_DEFAULT_GUI_FONT));
		font.SetWeight( wxBOLD );
		mem_dc.SetFont(font);
		int w;
		mem_dc.GetTextExtent(wxT("Tp"), &w, &panelHeight);
		panelHeight += 4; // Place a spacer of 2 pixels

		// Out signpost bitmap is 24 pixels
		if( panelHeight < 24 )
			panelHeight = 24;
	}

	m_panel = new wxPanel( this, wxID_ANY, wxDefaultPosition, wxSize(200, panelHeight));

	sz->Add( m_panel );
	sz->Add( m_listBox, 1, wxEXPAND );
	
	SetSizer( sz );

	// Connect events to the list box
	m_listBox->Connect(wxID_ANY, wxEVT_KEY_UP, wxKeyEventHandler(wxTabNavigatorWindow::OnKeyUp), NULL, this); 
	//Connect(wxEVT_CHAR_HOOK, wxCharEventHandler(wxTabNavigatorWindow::OnKeyUp), NULL, this);
	m_listBox->Connect(wxID_ANY, wxEVT_NAVIGATION_KEY, wxNavigationKeyEventHandler(wxTabNavigatorWindow::OnNavigationKey), NULL, this); 
	m_listBox->Connect(wxID_ANY, wxEVT_COMMAND_LISTBOX_DOUBLECLICKED, wxCommandEventHandler(wxTabNavigatorWindow::OnItemSelected), NULL, this);
	
	// Connect paint event to the panel
	m_panel->Connect(wxID_ANY, wxEVT_PAINT, wxPaintEventHandler(wxTabNavigatorWindow::OnPanelPaint), NULL, this);
	m_panel->Connect(wxID_ANY, wxEVT_ERASE_BACKGROUND, wxEraseEventHandler(wxTabNavigatorWindow::OnPanelEraseBg), NULL, this);

	SetBackgroundColour( wxSystemSettings::GetColour(wxSYS_COLOUR_3DFACE) );
	m_listBox->SetBackgroundColour(wxSystemSettings::GetColour(wxSYS_COLOUR_3DFACE));
	PopulateListControl( static_cast<wxFlatNotebook*>( parent ) );

	// Create the bitmap, only once
	if( !m_bmp.Ok() )
	{
		wxImage img(signpost_xpm);
		img.SetAlpha(signpost_alpha, true);
		m_bmp =  wxBitmap(img); 
	}
}
void MainWindow::actionChosen()
{
    if (!hasOpenedFile) {
        QMessageBox::about(this, tr("Erreur"), tr("<p>Veuillez ouvrir une image avant !</p>"));
        return;
    }

    QObject* menuItem = sender();

    if (menuItem == ui->action3x3)
        wrapper->choix(622);
    else if (menuItem == ui->action5x5)
        wrapper->choix(623);
    else if (menuItem == ui->actionClosing)
        wrapper->choix(614);
    else if (menuItem == ui->actionOpening)
        wrapper->choix(613);
    else if (menuItem == ui->actionDilatation)
        wrapper->choix(611);
    else if (menuItem == ui->actionErosion)
        wrapper->choix(612);
    else if (menuItem == ui->actionLaplacien_4)
        wrapper->choix(64);
    else if (menuItem == ui->actionLaplacien_8)
        wrapper->choix(65);
    else if (menuItem == ui->actionMoyenneur)
        wrapper->choix(621);
    else if (menuItem == ui->actionM_dian)
        wrapper->choix(66);
    else if (menuItem == ui->actionPrewitt)
        wrapper->choix(632);
    else if (menuItem == ui->actionSobel)
        wrapper->choix(633);
    else if (menuItem == ui->actionRoberts)
        wrapper->choix(631);
    else if (menuItem == ui->actionEgalisation_d_histogramme)
        wrapper->choix(73);
    else if (menuItem == ui->actionRectangle_englobant)
        wrapper->choix(74);
    /*else if (menuItem == ui->actionR_duction_de_la_palette) {
        bool ok;
        QString text = QInputDialog::getText(this, tr("Nombre de couleurs "),
                                           tr("Nombre entier positif < 255"), QLineEdit::Normal,
                                           "128", &ok);
        if (ok && !text.isEmpty() && text.toInt() > 0 && text.toInt() < 255) {
            wrapper->doPaletteReduction(text.toInt());
        }
    }*/
    else if (menuItem == ui->actionLancer_toutes_les_actions) {
        QMessageBox::StandardButton reply;
        reply = QMessageBox::question(this, "Projet de traitement d'images", "Lancer toutes les actions possibles ? (quelques secondes)",
                                    QMessageBox::Yes|QMessageBox::No);
        if (reply == QMessageBox::Yes) {
            QTime myTimer;
            myTimer.start();
            wrapper->choix(-1);

            QMessageBox::information(this, tr("Mini-Projet Tdi"), tr("Terminé (%1 s.)").arg((float)(myTimer.elapsed())/1000.0f));
            resetDest();
        }
        return;
    } else if (menuItem == ui->actionWatershed) {
        wrapper->choix(8);
    } else if (menuItem == ui->actionPersonnalis) {
        bool ok;
        QString text = QInputDialog::getText(this, tr("Rayon du filtre gaussien"),
                                           tr("Nombre entier positif impair < 50"), QLineEdit::Normal,
                                           "9", &ok);
        if (ok && !text.isEmpty() && text.toInt() > 0 && text.toInt() < 50 && text.toInt() %2 != 0) {
            wrapper->setTailleGaussien(text.toInt());
            wrapper->choix(624);
        }

    } else if (menuItem == ui->actionSeuil_manuel) {
        bool ok;
        QString text = QInputDialog::getText(this, tr("Valeur du seuil"),
                                           tr("Nombre entier positif [0-255]"), QLineEdit::Normal,
                                           "100", &ok);
        if (ok && !text.isEmpty() && text.toInt() >= 0 && text.toInt() <= 255) {
            wrapper->setManSeuil(text.toInt());
            wrapper->choix(3);
        }
    } else if (menuItem == ui->actionSeuil_automatique) {
        wrapper->choix(5);
    } else {
        std::cerr << "Erreur de signal/slot sur outil" << std::endl;
    }

    QImage img(QString(wrapper->getLatestNewImage()));
    ui->modifiedImage->setPixmap(QPixmap::fromImage(img));
}
Esempio n. 23
0
  void GSCam::publish_stream()
  {
    ROS_INFO_STREAM("Publishing stream...");

    // Pre-roll camera if needed
    if (preroll_) {
      ROS_DEBUG("Performing preroll...");

      //The PAUSE, PLAY, PAUSE, PLAY cycle is to ensure proper pre-roll
      //I am told this is needed and am erring on the side of caution.
      gst_element_set_state(pipeline_, GST_STATE_PLAYING);
      if (gst_element_get_state(pipeline_, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
        ROS_ERROR("Failed to PLAY during preroll.");
        return;
      } else {
        ROS_DEBUG("Stream is PLAYING in preroll.");
      }

      gst_element_set_state(pipeline_, GST_STATE_PAUSED);
      if (gst_element_get_state(pipeline_, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
        ROS_ERROR("Failed to PAUSE.");
        return;
      } else {
        ROS_INFO("Stream is PAUSED in preroll.");
      }
    }

    if(gst_element_set_state(pipeline_, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
      ROS_ERROR("Could not start stream!");
      return;
    }
    ROS_INFO("Started stream.");

    // Poll the data as fast a spossible
    while(ros::ok()) {
      // This should block until a new frame is awake, this way, we'll run at the
      // actual capture framerate of the device.
      ROS_DEBUG("Getting data...");
      GstBuffer* buf = gst_app_sink_pull_buffer(GST_APP_SINK(sink_));


      GstFormat fmt = GST_FORMAT_TIME;
      gint64 current = -1;

      // Query the current position of the stream
      //if (gst_element_query_position(pipeline_, &fmt, &current)) {
        //ROS_INFO_STREAM("Position "<<current);
      //}

      // Stop on end of stream
      if (!buf) {
        ROS_INFO("Stream ended.");
        break;
      }

      ROS_DEBUG("Got data.");

      // Get the image width and height
      GstPad* pad = gst_element_get_static_pad(sink_, "sink");
      const GstCaps *caps = gst_pad_get_negotiated_caps(pad);
      GstStructure *structure = gst_caps_get_structure(caps,0);
      gst_structure_get_int(structure,"width",&width_);
      gst_structure_get_int(structure,"height",&height_);

      // Complain if the returned buffer is smaller than we expect
      const unsigned int expected_frame_size =
          image_encoding_ == sensor_msgs::image_encodings::RGB8
              ? width_ * height_ * 3
              : width_ * height_;

      if (buf->size < expected_frame_size) {
        ROS_WARN_STREAM( "GStreamer image buffer underflow: Expected frame to be "
            << expected_frame_size << " bytes but got only "
            << (buf->size) << " bytes. (make sure frames are correctly encoded)");
      }

      // Construct Image message
      sensor_msgs::ImagePtr img(new sensor_msgs::Image());
      sensor_msgs::CameraInfoPtr cinfo;

      // Update header information
      cinfo.reset(new sensor_msgs::CameraInfo(camera_info_manager_.getCameraInfo()));
      cinfo->header.stamp = ros::Time::now();
      cinfo->header.frame_id = frame_id_;
      img->header = cinfo->header;

      // Image data and metadata
      img->width = width_;
      img->height = height_;
      img->encoding = image_encoding_;
      img->is_bigendian = false;
      img->data.resize(expected_frame_size);

      // Copy only the data we received
      // Since we're publishing shared pointers, we need to copy the image so
      // we can free the buffer allocated by gstreamer
      if (image_encoding_ == sensor_msgs::image_encodings::RGB8) {
        img->step = width_ * 3;
      } else {
        img->step = width_;
      }
      std::copy(
          buf->data,
          (buf->data)+(buf->size),
          img->data.begin());

      // Publish the image/info
      camera_pub_.publish(img, cinfo);

      // Release the buffer
      gst_buffer_unref(buf);

      ros::spinOnce();
    }
  }
Esempio n. 24
0
void setup_and_render()
{

	Image img(WIDTH, HEIGHT);
	img.addRef();

	//Set up the scene
	GeometryGroup scene;

	// load scene
	LWObject objects;
	objects.read("models/cube.obj", true);
	objects.addReferencesToScene(scene.primitives);	
	scene.rebuildIndex();
	
	//apply custom shaders
	BumpTexturePhongShader as;
	as.addRef();
	Image  grass;
	grass.addRef();
	grass.readPNG("models/mat.png");
	Texture textureGrass;
	textureGrass.addRef();
	textureGrass.image = &grass;
	as.diffTexture = &textureGrass;
	as.amibientTexture = &textureGrass;
	as.specularCoef = float4::rep(0);
	as.specularExponent = 10000.f;
	as.transparency = float4::rep(0.9);
	FractalLandscape f(Point(-4419,-8000,-569), Point(3581,0, -569),9, 0.1, &as, 5.0f);
	f.addReferencesToScene(scene.primitives);
	scene.rebuildIndex();
	
	// my phong
	RRPhongShader glass;
	glass.n1 = 1.0f;
	glass.n2 = 1.5f;
	glass.diffuseCoef = float4(0.1, 0.1, 0.1, 0);
	glass.ambientCoef = glass.diffuseCoef;
	glass.specularCoef = float4::rep(0.9);
	glass.specularExponent = 10000;
	glass.transparency = float4::rep(0.9);
	glass.addRef();
	Sphere sphere(Point(-78,1318,40), 25, &glass);;
	scene.primitives.push_back(&sphere);
	scene.rebuildIndex();	
	objects.materials[objects.materialMap["Glass"]].shader = &glass;

	
	//sample shader for noise
	ProceduralPhongShader skyShader;
	skyShader.addRef();
	CloudTexture nt;
	nt.addRef();
	skyShader.amibientNoiseTexture = &nt;
	skyShader.diffuseCoef = float4::rep(0.0f);
	skyShader.specularCoef = float4::rep(0.0f);

// 	float w = skyShader.amibientNoiseTexture->perlin->width;
 objects.materials[objects.materialMap["Sky"]].shader = &skyShader;

	//Set up the cameras
	PerspectiveCamera cam1(Point(-23, 1483, 30 ), forwardForCamera((0.0)*PI/180.0), Vector(0, 0, 1), 45,
		std::make_pair(img.width(), img.height()));
	
	cam1.addRef();

	//Set up the integrator
	IntegratorImpl integrator;
	integrator.addRef();
	integrator.scene = &scene;

	PointLightSource pls3;

	pls3.falloff = float4(0, 0, 1, 0);

	pls3.intensity  = float4::rep(0.9f);
	pls3.position = Point(299.5, 99, 518);
	integrator.lightSources.push_back(pls3);

// 	PointLightSource pls4;
// 
// 	pls4.falloff = float4(0, 0, 1, 0);
// 
// 	pls4.intensity  = float4::rep(0.9f);
// 	pls4.position = Point(1289.5, 99, 518);
// 	integrator.lightSources.push_back(pls4);
	
	areaLightSource(integrator, 0.9, 2, Point(-1180, -3860, -1718), 1000);
	integrator.ambientLight = float4::rep(0.1f);

	StratifiedSampler samp;
	samp.addRef();
 	samp.samplesX = 3;
	samp.samplesY = 3;

	//Render
	Renderer r;
	r.integrator = &integrator;
	r.target = &img;
	r.sampler = &samp;

	r.camera = &cam1;
	r.render();
	img.writePNG("result.png");
	
}
Esempio n. 25
0
void NotifyWindow::updateNotifyDisplay() {
	if (!item) return;

	int32 w = st::notifyWidth, h = st::notifyHeight;
	QImage img(w * cIntRetinaFactor(), h * cIntRetinaFactor(), QImage::Format_ARGB32_Premultiplied);
	if (cRetina()) img.setDevicePixelRatio(cRetinaFactor());
	img.fill(st::notifyBG->c);

	{
		QPainter p(&img);
		p.fillRect(0, 0, w - st::notifyBorderWidth, st::notifyBorderWidth, st::notifyBorder->b);
		p.fillRect(w - st::notifyBorderWidth, 0, st::notifyBorderWidth, h - st::notifyBorderWidth, st::notifyBorder->b);
		p.fillRect(st::notifyBorderWidth, h - st::notifyBorderWidth, w - st::notifyBorderWidth, st::notifyBorderWidth, st::notifyBorder->b);
		p.fillRect(0, st::notifyBorderWidth, st::notifyBorderWidth, h - st::notifyBorderWidth, st::notifyBorder->b);

		if (!App::passcoded() && cNotifyView() <= dbinvShowName) {
			if (history->peer->photo->loaded()) {
				p.drawPixmap(st::notifyPhotoPos.x(), st::notifyPhotoPos.y(), history->peer->photo->pix(st::notifyPhotoSize));
			} else {
				MTP::clearLoaderPriorities();
				peerPhoto = history->peer->photo;
				peerPhoto->load(true, true);
			}
		} else {
			static QPixmap icon = QPixmap::fromImage(App::wnd()->iconLarge().scaled(st::notifyPhotoSize, st::notifyPhotoSize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), Qt::ColorOnly);
			p.drawPixmap(st::notifyPhotoPos.x(), st::notifyPhotoPos.y(), icon);
		}

		int32 itemWidth = w - st::notifyPhotoPos.x() - st::notifyPhotoSize - st::notifyTextLeft - st::notifyClosePos.x() - st::notifyClose.width;

		QRect rectForName(st::notifyPhotoPos.x() + st::notifyPhotoSize + st::notifyTextLeft, st::notifyTextTop, itemWidth, st::msgNameFont->height);
		if (!App::passcoded() && cNotifyView() <= dbinvShowName) {
			if (history->peer->chat) {
				p.drawPixmap(QPoint(rectForName.left() + st::dlgChatImgLeft, rectForName.top() + st::dlgChatImgTop), App::sprite(), st::dlgChatImg);
				rectForName.setLeft(rectForName.left() + st::dlgChatImgSkip);
			}
		}

		QDateTime now(QDateTime::currentDateTime()), lastTime(item->date);
		QDate nowDate(now.date()), lastDate(lastTime.date());
		QString dt = lastTime.toString(cTimeFormat());
		int32 dtWidth = st::dlgHistFont->m.width(dt);
		rectForName.setWidth(rectForName.width() - dtWidth - st::dlgDateSkip);
		p.setFont(st::dlgDateFont->f);
		p.setPen(st::dlgDateColor->p);
		p.drawText(rectForName.left() + rectForName.width() + st::dlgDateSkip, rectForName.top() + st::dlgHistFont->ascent, dt);

		if (!App::passcoded() && cNotifyView() <= dbinvShowPreview) {
			const HistoryItem *textCachedFor = 0;
			Text itemTextCache(itemWidth);
			QRect r(st::notifyPhotoPos.x() + st::notifyPhotoSize + st::notifyTextLeft, st::notifyItemTop + st::msgNameFont->height, itemWidth, 2 * st::dlgFont->height);
			if (fwdCount < 2) {
				bool active = false;
				item->drawInDialog(p, r, active, textCachedFor, itemTextCache);
			} else {
				p.setFont(st::dlgHistFont->f);
				if (history->peer->chat) {
					itemTextCache.setText(st::dlgHistFont, item->from()->name);
					p.setPen(st::dlgSystemColor->p);
					itemTextCache.drawElided(p, r.left(), r.top(), r.width(), st::dlgHistFont->height);
					r.setTop(r.top() + st::dlgHistFont->height);
				}
				p.setPen(st::dlgTextColor->p);
				p.drawText(r.left(), r.top() + st::dlgHistFont->ascent, lng_forward_messages(lt_count, fwdCount));
			}
		} else {
			static QString notifyText = st::dlgHistFont->m.elidedText(lang(lng_notification_preview), Qt::ElideRight, itemWidth);
			p.setPen(st::dlgSystemColor->p);
			p.drawText(st::notifyPhotoPos.x() + st::notifyPhotoSize + st::notifyTextLeft, st::notifyItemTop + st::msgNameFont->height + st::dlgHistFont->ascent, notifyText);
		}

		p.setPen(st::dlgNameColor->p);
		if (!App::passcoded() && cNotifyView() <= dbinvShowName) {
			history->nameText.drawElided(p, rectForName.left(), rectForName.top(), rectForName.width());
		} else {
			p.setFont(st::msgNameFont->f);
			static QString notifyTitle = st::msgNameFont->m.elidedText(qsl("Telegram Desktop"), Qt::ElideRight, rectForName.width());
			p.drawText(rectForName.left(), rectForName.top() + st::msgNameFont->ascent, notifyTitle);
		}
	}

	pm = QPixmap::fromImage(img, Qt::ColorOnly);
	update();
}
Esempio n. 26
0
int main() {

    const unsigned oversample = 2;
    const unsigned w = 2048*oversample, h = 2048*oversample;
    cout << "allocating memory for a " << w << " x " << h << " image buffer..." << flush;
    simg img( w, h, {0, 0, 0, 255} );
    cout << " done." << endl;

    // list of points
    const int numpoints = 5;
    uniform_int_distribution<> dist_int( 0, numpoints-1 );
    vector< double[2] > points( numpoints );

    // radius: 0.5 will just touch the edges of the image
    const double rad = 0.99 * 0.5;
    // initial rotation: not important but makes each image unique
    const double rot = rand01();

    // distribute points evenly around a circle (not necessary, but makes it easy to compare)
    for( int p = 0; p < numpoints; p++ ) {
        double angle = (rot + ((double)p / numpoints)) * 2. * 3.14159265358979323846;
        points[p][0] = 0.5 + rad*cos(angle);
        points[p][1] = 0.5 + rad*sin(angle);
    }

    // additive RGBA (may be negative, usually want to leave alpha = 0)
    const srgba_light light = {10, 10, 10, 0};

    // a touchy constant: too big and the render appears fuzzy
    const double beta = 1. / 2.;


    // note: may need to try many different starting points to get a good image
    // * note: so far has worked with a single starting point
    double point[2] = { rand01(), rand01() };

    // how many times to plot the point: too big = slow
    const unsigned long numplots = 100000000;

    // do a render
    cout << "rendering " << numplots << " points..." << flush;
    for( unsigned long i = 0; i < numplots; i++ ) {
        
        // pick a point to move toward
        int p = dist_int( rand_gen );

        // compute new coordinates
        point[0] = points[p][0] + beta * ( point[0] - points[p][0] );
        point[1] = points[p][1] + beta * ( point[1] - points[p][1] );

        // plot point
        if( point[0] >= 0. && point[0] < 1. && point[1] >= 0. && point[1] < 1. ) {
            unsigned x = floor(point[0] * w);
            unsigned y = floor(point[1] * h);
            img.add( x, y, light );
        }
    }
    cout << " done." << endl;

    char filename[1024];
    sprintf( filename, "%d-points_%ux%u_%lu-samples.png", numpoints, w, h, numplots );

    img.save( string(filename) );

    return 0;
}
Esempio n. 27
0
/*!
    \internal
*/
void QDeclarativePaintedItem::paint(QPainter *p, const QStyleOptionGraphicsItem *, QWidget *)
{
    Q_D(QDeclarativePaintedItem);
    const QRect content = boundingRect().toRect();
    if (content.width() <= 0 || content.height() <= 0)
        return;

    ++inpaint;

    const QTransform &x = p->deviceTransform();
    QTransform xinv = x.inverted();
    QRegion effectiveClip;
    QRegion sysClip = p->paintEngine()->systemClip();
    if (xinv.type() <= QTransform::TxScale && sysClip.numRects() < 5) {
        // simple transform, region gets no more complicated...
        effectiveClip = xinv.map(sysClip);
    } else {
        // do not make complicated regions...
        effectiveClip = xinv.mapRect(sysClip.boundingRect());
    }

    QRegion topaint = p->clipRegion();
    if (topaint.isEmpty()) {
        if (effectiveClip.isEmpty())
            topaint = QRect(0,0,p->device()->width(),p->device()->height());
        else
            topaint = effectiveClip;
    } else if (!effectiveClip.isEmpty()) {
        topaint &= effectiveClip;
    }

    topaint &= content;
    QRegion uncached(content);
    p->setRenderHints(QPainter::SmoothPixmapTransform, d->smooth);

    int cachesize=0;
    for (int i=0; i<d->imagecache.count(); ++i) {
        QRect area = d->imagecache[i]->area;
        if (topaint.contains(area)) {
            QRectF target(area.x(), area.y(), area.width(), area.height());
            if (!d->cachefrozen) {
                if (!d->imagecache[i]->dirty.isNull() && topaint.contains(d->imagecache[i]->dirty)) {
#ifdef Q_WS_MAC
                    bool oldSmooth = qt_applefontsmoothing_enabled;
                    qt_applefontsmoothing_enabled = false;
#endif
                    QPainter qp(&d->imagecache[i]->image);
#ifdef Q_WS_MAC
                    qt_applefontsmoothing_enabled = oldSmooth;
#endif
                    qp.setRenderHints(QPainter::HighQualityAntialiasing | QPainter::TextAntialiasing | QPainter::SmoothPixmapTransform, d->smoothCache);
                    qp.translate(-area.x(), -area.y());
                    qp.scale(d->contentsScale,d->contentsScale);
                    QRect clip = d->imagecache[i]->dirty;
                    QRect sclip(qFloor(clip.x()/d->contentsScale),
                            qFloor(clip.y()/d->contentsScale),
                            qCeil(clip.width()/d->contentsScale+clip.x()/d->contentsScale-qFloor(clip.x()/d->contentsScale)),
                            qCeil(clip.height()/d->contentsScale+clip.y()/d->contentsScale-qFloor(clip.y()/d->contentsScale)));
                    qp.setClipRect(sclip);
                    if (d->fillColor.isValid()){
                        if(d->fillColor.alpha() < 255){
                            // ### Might not work outside of raster paintengine
                            QPainter::CompositionMode prev = qp.compositionMode();
                            qp.setCompositionMode(QPainter::CompositionMode_Source);
                            qp.fillRect(sclip,d->fillColor);
                            qp.setCompositionMode(prev);
                        }else{
                            qp.fillRect(sclip,d->fillColor);
                        }
                    }
                    drawContents(&qp, sclip);
                    d->imagecache[i]->dirty = QRect();
                }
            }
            p->drawPixmap(target.toRect(), d->imagecache[i]->image);
            topaint -= area;
            d->imagecache[i]->age=0;
        } else {
            d->imagecache[i]->age++;
        }
        cachesize += area.width()*area.height();
        uncached -= area;
    }

    if (!topaint.isEmpty()) {
        if (!d->cachefrozen) {
            // Find a sensible larger area, otherwise will paint lots of tiny images.
            QRect biggerrect = topaint.boundingRect().adjusted(-64,-64,128,128);
            cachesize += biggerrect.width() * biggerrect.height();
            while (d->imagecache.count() && cachesize > d->max_imagecache_size) {
                int oldest=-1;
                int age=-1;
                for (int i=0; i<d->imagecache.count(); ++i) {
                    int a = d->imagecache[i]->age;
                    if (a > age) {
                        oldest = i;
                        age = a;
                    }
                }
                cachesize -= d->imagecache[oldest]->area.width()*d->imagecache[oldest]->area.height();
                uncached += d->imagecache[oldest]->area;
                delete d->imagecache.takeAt(oldest);
            }
            const QRegion bigger = QRegion(biggerrect) & uncached;
            const QVector<QRect> rects = bigger.rects();
            for (int i = 0; i < rects.count(); ++i) {
                const QRect &r = rects.at(i);
                QPixmap img(r.size());
                if (d->fillColor.isValid())
                    img.fill(d->fillColor);
                {
#ifdef Q_WS_MAC
                    bool oldSmooth = qt_applefontsmoothing_enabled;
                    qt_applefontsmoothing_enabled = false;
#endif
                    QPainter qp(&img);
#ifdef Q_WS_MAC
                    qt_applefontsmoothing_enabled = oldSmooth;
#endif
                    qp.setRenderHints(QPainter::HighQualityAntialiasing | QPainter::TextAntialiasing | QPainter::SmoothPixmapTransform, d->smoothCache);

                    qp.translate(-r.x(),-r.y());
                    qp.scale(d->contentsScale,d->contentsScale);
                    QRect sclip(qFloor(r.x()/d->contentsScale),
                            qFloor(r.y()/d->contentsScale),
                            qCeil(r.width()/d->contentsScale+r.x()/d->contentsScale-qFloor(r.x()/d->contentsScale)),
                            qCeil(r.height()/d->contentsScale+r.y()/d->contentsScale-qFloor(r.y()/d->contentsScale)));
                    drawContents(&qp, sclip);
                }
                QDeclarativePaintedItemPrivate::ImageCacheItem *newitem = new QDeclarativePaintedItemPrivate::ImageCacheItem;
                newitem->area = r;
                newitem->image = img;
                d->imagecache.append(newitem);
                p->drawPixmap(r, newitem->image);
            }
        } else {
            const QVector<QRect> rects = uncached.rects();
            for (int i = 0; i < rects.count(); ++i)
                p->fillRect(rects.at(i), Qt::lightGray);
        }
    }

    if (inpaint_clearcache) {
        clearCache();
        inpaint_clearcache = 0;
    }

    --inpaint;
}
Esempio n. 28
0
int main (int argc, char *argv[]) {
    if(argc<3)
    {
      //  printf("Usage: ./executables/mandelbrot <rows> <cols> (filename)\n");
        return 0;
    }

    int rank, numprocs, err=0, i, j;
    std::vector<std::string> v;
    rows = atoi(argv[1]);
    cols = atoi(argv[2]);
    Matrix<double> img(rows, cols);
    std::string filename;
    if(argc > 3)
    {
        filename = argv[3];
        filename += ".ppm";
    }

    std::chrono::high_resolution_clock::time_point start, end;
    std::chrono::duration<double> time_span;
 
    start = std::chrono::high_resolution_clock::now();
    // Init MPI & get numprocs and rank
    MPI_Init(&argc,&argv);
    err = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    err = MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    if(err){
        fprintf(stderr,"Catastrophic MPI problem.\n");
        MPI_Abort(MPI_COMM_WORLD,1);
    }

    Construct_MPI_Datatypes<double>(rows, cols);
   
    if(rank == 0)
    {
        master<double>(numprocs, img);
    }
    slave<double>(numprocs);

    std::cout<<"rank " <<rank <<" waiting at barrier\n";
    MPI_Barrier(MPI_COMM_WORLD);

    if(rank == 0)
    {
        end = std::chrono::high_resolution_clock::now();
        time_span = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
        printf("\nProgram exec time %dx%d image: %f seconds \n\n",rows, cols, (double)time_span.count());

        if(argc > 3)
        {
            std::ofstream fout;
            fout.open(filename, std::ofstream::out | std::ofstream::binary);
            fout << "P3\n # Mandelbrot\n " << rows << " " <<cols << "\n 255\n";
             double val;
            int r, g, b;
            for(i=0; i<=std::floor(rows/2.0); i++)
            {
                for(j=0; j<cols; j++)
                {
                    val = std::abs(img[i][j]);
                    r = val * Palette[(int)val % 5][0]; 
                    g = val * Palette[(int)val % 5][1]; 
                    b = val * Palette[(int)val % 5][2]; 
                    fout << r << " " << g <<" " << b << " ";
                }
                fout <<"\n";
            }
            for(i=std::floor(rows/2.0); i>=0; i--)
            {
                for(j=0; j<cols; j++)
                {
                    val = std::abs(img[i][j]);
                    r = val * Palette[(int)val % 5][0]; 
                    g = val * Palette[(int)val % 5][1]; 
                    b = val * Palette[(int)val % 5][2]; 
                    fout << r << " " << g <<" " << b << " ";
                }
                fout <<"\n";
            }
            fout.close();
        }
    }

    // Free memory
    MPI_Type_free(&MPI_Vector);

    MPI_Finalize();
    return 0;
}
Esempio n. 29
0
int main(int argc, char* argv[]){

    std::string paramFileName = "param.txt";
    if(argc > 1){
        paramFileName = std::string(argv[1]);
    }

    cv::namedWindow("track", CV_WINDOW_NORMAL);
    cv::namedWindow("skin", CV_WINDOW_NORMAL);
    cv::VideoCapture vc(0);
    int h = 600;
    int w = 600;

    HandDetectorHist myHandDetector("track");
    //HandDetector myHandDetector;
    Tracker tracker(paramFileName);

    std::vector<std::pair<float, float>> samples;
    cv::Mat img, subImg, binimg;
    while(cv::waitKey(5) != 'q'){
        vc >> img ;
        subImg = img(cv::Range(0,h), cv::Range(0,w));
        cv::flip(subImg, subImg, 1);

        binimg = myHandDetector.findHand(subImg);

        samples.clear();

        for(int i=0 ; i<binimg.rows ; ++i){
            for(int j=0 ; j<binimg.cols ; ++j){
                if(binimg.at<unsigned char>(i,j) > 128)
                {
                    samples.push_back(std::make_pair(float(j),float(i)));
                }
            }
        }
        tracker.newFrame(samples.begin(), samples.end());
        tracker.draw(subImg, Display::MESH);

        cv::imshow("track", subImg);
        cv::imshow("skin", binimg);
    }
    std::vector<float> x= tracker.retvalue();
    for (int d=0; d<x.size(); d++) {
        printf("%f ",x[d]);
    }
    printf("\n");
    std::vector<float> y= tracker.retvalue1();
    for (int d=0; d<y.size(); d++) {
        printf("%f ",y[d]);
    }
    std::ofstream myfile;
    myfile.open("ab.csv",std::ios_base::app);
    for (int d=0; d<x.size(); d++)
    {
        myfile<<x[d]<<","<<y[d]<<",";
    }
    myfile<<"\n";
    myfile.close();
    return 0;
}
Esempio n. 30
0
IplImage* getTone(const char *filename)
{
	//实现论文中提到的正常铅笔画应有的直方图
	double p1, p2, p3, p[256];
	double temp = sqrt(2 * CV_PI * 10);
	for (int i = 0; i < 256; i++) {
		p1 = 1 / 9.0 * exp(-(256 - i) / 9.0);
		p2 = (i >= 105 && i <= 225) / (225 - 105.0);
		p3 = exp(-(i - 80)*(i - 80) / (2.0 * 10 * 10)) / temp;
		p[i] = 0.52 * p1 + 0.37 * p2 + 0.11 * p3;
	}
	smooth(p, 256);
	smooth(p, 256);
	double sum = 0;
	for (int i = 0; i < 256; i++) sum += p[i];
	for (int i = 0; i < 256; i++) p[i] /= sum;
	double G[256];
	G[0] = p[0];
	for (int i = 1; i < 256; i++) G[i] = G[i - 1] + p[i];

	//计算原图的直方图
	IplImage *pToneImage = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE);
	Image img(pToneImage);
	int h = pToneImage->height;
	int w = pToneImage->width;

	CvHistogram *pHis = CreateGrayImageHist(&pToneImage);
	double S[256];
	S[0] = cvQueryHistValue_1D(pHis, 0) / (h*w);
	for (int i = 1; i < 256; i++)
		S[i] = S[i - 1] + cvQueryHistValue_1D(pHis, i) / (h*w);

	//进行直方图匹配
	int index[256];
	for (int i = 0; i < 256; i++) {
		int k = 0;
		for (int j = 1; j < 256; j++)
			if (abs(G[k] - S[i]) > abs(G[j] - S[i])) k = j;
		index[i] = k;
	}

	for (int i = 0; i < h; i++)
		for (int j = 0; j < w; j++) img[i][j] = index[img[i][j]];

	//Pencil Texture Rendering
	IplImage *pRender = cvLoadImage("Tonal Texture.png", CV_LOAD_IMAGE_GRAYSCALE);
	Image pR(pRender);
	double **b = new double*[h];
	for (int i = 0; i < h; i++) 
	{
		b[i] = new double[w];
		for (int j = 0; j < w; j++)
		{
			double H = pR[i % pR.getH()][j % pR.getW()] / 256.0;
			double J = img[i][j] / 256.0;
			double bx = (i) ? b[i - 1][j] : 0;
			double by = (j) ? b[i][j - 1] : 0;
			double A = 0.2 * 2 + log(H)*log(H);
			double B = -2*(0.2*(bx + by) + log(H)*log(J));
			b[i][j] = -B / (2*A);
			img[i][j] = pow(H, b[i][j]) * 256;
		}
	}
	return pToneImage;
}