Пример #1
0
void UBMagnifier::setSize(qreal percentFromScene) 
{
    if(gView == NULL || mView == NULL) return;

    // calculate object size
    params.sizePercentFromScene = percentFromScene;
    QSize sceneSize = mView->size();
    qreal size = params.sizePercentFromScene * sceneSize.width() / 100;

    QRect currGeom = geometry();
    if(currGeom.width() == currGeom.height())
    {
        QPoint newPos = mView->mapFromGlobal(updPointMove);
        setGeometry(newPos.x() - size / 2, newPos.y() - size / 2, size, size);
    }
    else
        setGeometry(0, 0, size, size);

    // prepare transparent bit mask
    QImage mask_img(width(), height(), QImage::Format_Mono);
    mask_img.fill(0xff);
    QPainter mask_ptr(&mask_img);
    mask_ptr.setBrush( QBrush( QColor(0, 0, 0) ) );
    mask_ptr.drawEllipse(QPointF(size/2, size/2), size / 2 - sClosePixmap->width(), size / 2 - sClosePixmap->width());
    bmpMask = QBitmap::fromImage(mask_img);

    // prepare general image
    pMap = QPixmap(width(), height());
    pMap.fill(Qt::transparent);
    pMap.setMask(bmpMask);
}
void UBGraphicsDelegateFrame::setCursorFromAngle(QString angle)
{
    if (mCurrentTool == Rotate)
    {
        QWidget *controlViewport = UBApplication::boardController->controlView()->viewport();

        QSize cursorSize(45,30);


        QImage mask_img(cursorSize, QImage::Format_Mono);
        mask_img.fill(0xff);
        QPainter mask_ptr(&mask_img);
        mask_ptr.setBrush( QBrush( QColor(0, 0, 0) ) );
        mask_ptr.drawRoundedRect(0,0, cursorSize.width()-1, cursorSize.height()-1, 6, 6);
        QBitmap bmpMask = QBitmap::fromImage(mask_img);


        QPixmap pixCursor(cursorSize);
        pixCursor.fill(QColor(Qt::white));

        QPainter painter(&pixCursor);

        painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform);
        painter.setBrush(QBrush(Qt::white));
        painter.setPen(QPen(QColor(Qt::black)));
        painter.drawRoundedRect(1,1,cursorSize.width()-2,cursorSize.height()-2,6,6);
        painter.setFont(QFont("Arial", 10));
        painter.drawText(1,1,cursorSize.width(),cursorSize.height(), Qt::AlignCenter, angle.append(QChar(176)));
        painter.end();

        pixCursor.setMask(bmpMask);
        controlViewport->setCursor(pixCursor);
    }
}
int main (int argc, char **argv)
{
    cv::Mat input,mask_img,not_masked;
    //loading haar classifier
    std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
    cv::CascadeClassifier cascade;
    if(!cascade.load(cascadeName)){
        printf("ERROR: cascadefile見つからん!\n");
        return -1;
    }
    
    //loading resource file(for face)
    input=cv::imread("/Users/naoto/git/opencv_gl/opencv/minematsu1.png",1);
    if(input.empty()){
        printf("ERROR: image not found!\n");
        return 0;
    }
    
    double scale = 4.0;
    cv::Mat gray, smallImg(cv::saturate_cast<int>(input.rows/scale),cv::saturate_cast<int>(input.cols/scale),CV_8UC1);
    cv::cvtColor(input, gray, CV_BGR2GRAY);
    cv::resize(gray, smallImg, smallImg.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg, smallImg);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    
    std::vector<cv::Rect> faces;
    cascade.detectMultiScale(smallImg, faces,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    
    int i;
    printf("deteced faces:%d\n",(int)faces.size());
    for (i=0; i<faces.size(); i++) {
        cv::Point center,p1,p2;
        int radius;
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
        radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);
        p1.x=center.x-radius;p1.y=center.y-radius;
        p2.x=center.x+radius;p2.y=center.y+radius;
        cv::Rect roi_rect(center.x-radius,center.y-radius,radius*2,radius*2);//左上のx座標,y座標,width,depthというふうに格納していく
        mask_img.create(input.size(), CV_8UC1);
        mask_img=cv::Scalar(0,0,0);
        not_masked=mask_img(roi_rect);
        not_masked=cv::Scalar(255,255,255);
    }
    
    cv::namedWindow("result",1);
    cv::namedWindow("masked",1);
    cv::imshow("result", input);
    cv::imshow("masked", mask_img);
    cv::waitKey(0);
    return 0;
    
}
Пример #4
0
void UBMagnifier::createMask()
{
    if(gView == NULL || mView == NULL) return;

    // calculate object size
    QSize sceneSize = mView->size();
    qreal isize = params.sizePercentFromScene * sceneSize.width() / 100;

    QImage mask_img(width(), height(), QImage::Format_Mono);
    mask_img.fill(0xff);
    QPainter mask_ptr(&mask_img);
    mask_ptr.setBrush( QBrush( QColor(0, 0, 0) ) );

    if (circular == mDrawingMode)
        mask_ptr.drawEllipse(QPointF(isize/2, isize/2), isize / 2 - sClosePixmap->width(), isize / 2 - sClosePixmap->width());
    else if (rectangular == mDrawingMode)
        mask_ptr.drawRoundedRect(QRect(sClosePixmap->width(), sClosePixmap->width(), size().width() - 2*sClosePixmap->width(), size().height() - 2*sClosePixmap->width()), sClosePixmap->width()/2, sClosePixmap->width()/2);

    bmpMask = QBitmap::fromImage(mask_img);

    pMap = QPixmap(width(), height());
    pMap.fill(Qt::transparent);
    pMap.setMask(bmpMask);
}
Пример #5
0
// usage: poisson.exe [source image (color)] [destination image (color] [mask image (gray)] [outout image (color] [offset y] [offset x]
int main(int argc, char** argv){
    int i;
    int offset[2];
    IplImage *im_src = NULL, *im_dst = NULL, *im_mask = NULL;
    
    if(argc != 7){
        fprintf(stderr,"usage: poisson.exe [source image (color)] [destination image (color)] [mask image (gray)] [outout image (color)] [offset y] [offset x]\n");
        exit(0);
    }
//////loading source image(cv::Mat input) & creating mask(cv::Mat mask_img)
    cv::Mat input,mask_img,not_masked;
    
    //loading haar classifier
    std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
    cv::CascadeClassifier cascade;
    if(!cascade.load(cascadeName)){
        printf("ERROR: cascadefile見つからん!\n");
        return -1;
    }
    
    //loading resource file(for face)
    input=cv::imread(argv[1],1);
    if(input.empty()){
        printf("ERROR: image not found!\n");
        return 0;
    }
    
    double scale = 4.0;
    cv::Mat gray, smallImg(cv::saturate_cast<int>(input.rows/scale),cv::saturate_cast<int>(input.cols/scale),CV_8UC1);
    cv::cvtColor(input, gray, CV_BGR2GRAY);
    cv::resize(gray, smallImg, smallImg.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg, smallImg);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    
    std::vector<cv::Rect> faces;
    cascade.detectMultiScale(smallImg, faces,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    
    int i;
    printf("deteced faces:%d\n",(int)faces.size());
    for (i=0; i<faces.size(); i++) {
        cv::Point center;
        int radius;
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
        radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);
        cv::Point p1,p2;
        p1.x=center.x-radius;p1.y=center.y-radius;
        p2.x=center.x+radius;p2.y=center.y+radius;
        cv::Rect roi_rect(center.x-radius,center.y-radius,radius*2,radius*2);//左上のx座標,y座標,width,depthというふうに格納していく
        mask_img.create(input.size(), CV_8UC1);
        mask_img=cv::Scalar(0,0,0);
        not_masked=mask_img(roi_rect);
        not_masked=cv::Scalar(255,255,255);
    }
    // for debug(checking masks)
//    cv::namedWindow("result",1);
//    cv::namedWindow("masked",1);
//    cv::imshow("result", input);
//    cv::imshow("masked", mask_img);
//    cv::waitKey(0);
    
    //loading destination image() & find position and size
    //resizing source image & calc offset
    if( (im_src = cvLoadImage( argv[1], CV_LOAD_IMAGE_COLOR)) == 0 ){
        fprintf(stderr,"No such file %s", argv[1]);
        exit(0);
    }
    if( (im_dst = cvLoadImage( argv[2], CV_LOAD_IMAGE_COLOR)) == 0 ){
        fprintf(stderr,"No such file %s", argv[2]);
        exit(0);
    }
    if( (im_mask = cvLoadImage( argv[3], CV_LOAD_IMAGE_GRAYSCALE)) == 0 ){
        fprintf(stderr,"No such file %s", argv[3]);
        exit(0);
    }
    offset[0]=atoi(argv[5]);
    offset[1]=atoi(argv[6]);
    
    for(i=0;i<3;i++){// i:channnels
        quasi_poisson_solver(im_src, im_dst, im_mask, i, offset);
        //poisson_solver(im_src, im_dst, im_mask, i, offset);
    }
    cvSaveImage(argv[4],im_dst);
    cvReleaseImage(&im_src);
    cvReleaseImage(&im_dst);
    cvReleaseImage(&im_mask);
    return 0;
}
Пример #6
0
// usage: poisson.exe [source image (color)] [destination image (color] [mask image (gray)] [outout image (color] [offset y] [offset x]
int main(int argc, char** argv){
    int offset[2];
    IplImage *im_src =NULL, *im_dst = NULL, *im_mask = NULL;
    cv::Mat input,dst_img,mask_img,not_masked,mask_img_gray;
    
    if(argc != 4){
        fprintf(stderr,"usage: poisson.exe [source image (color)] [destination image (color)] [outout image (color)] [offset y] [offset x]\n");
        exit(0);
    }
    //////loading source image(cv::Mat input) & creating mask(cv::Mat mask_img)
    //loading haar classifier
    std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
    cv::CascadeClassifier cascade;
    if(!cascade.load(cascadeName)){
        printf("ERROR: cascadefile見つからん!\n");
        return -1;
    }
    //////loading destination image() & find position and size
    //loading destination file(for face)
    dst_img=cv::imread(argv[2],1);
    if(dst_img.empty()){
        printf("ERROR:distination image not found!\n");
        return 0;
    }
    //preprocessing
    double scale = 4.0;
    cv::Mat gray1, smallImg1(cv::saturate_cast<int>(dst_img.rows/scale),cv::saturate_cast<int>(dst_img.cols/scale),CV_8UC1);
    cv::cvtColor(dst_img, gray1, CV_BGR2GRAY);
    cv::resize(gray1, smallImg1, smallImg1.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg1, smallImg1);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    std::vector<cv::Rect> faces,faces1,faces2;
    cv::Point center2,center1;
    int radius,radius1,radius2;
    cv::Point p1,p2;
    //find face size
    cascade.detectMultiScale(smallImg1, faces1,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    printf("deteced faces:%d\n",(int)faces1.size());
    for (int i=0; i<faces1.size(); i++) {
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center1.x = cv::saturate_cast<int>((faces1[i].x + faces1[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center1.y = cv::saturate_cast<int>((faces1[i].y + faces1[i].height*0.5)*scale);
        radius1 = cv::saturate_cast<int>((faces1[i].width + faces1[i].height)*0.25*scale);
    }
    
    //loading resource file(for face)
    input=cv::imread(argv[1],1);
    if(input.empty()){
        printf("ERROR:resource image not found!\n");
        return 0;
    }
    //preprocessing
    cv::Mat gray, smallImg(cv::saturate_cast<int>(input.rows/scale),cv::saturate_cast<int>(input.cols/scale),CV_8UC1);
    cv::cvtColor(input, gray, CV_BGR2GRAY);
    cv::resize(gray, smallImg, smallImg.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg, smallImg);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    //find face size and generating mask
    cascade.detectMultiScale(smallImg, faces2,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    printf("deteced faces:%d\n",(int)faces2.size());
    for (int i=0; i<1; i++) {
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center2.x = cv::saturate_cast<int>((faces2[i].x + faces2[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center2.y = cv::saturate_cast<int>((faces2[i].y + faces2[i].height*0.5)*scale);
        radius2 = cv::saturate_cast<int>((faces2[i].width + faces2[i].height)*0.25*scale);
        p1.x=center2.x-radius;p1.y=center2.y-radius;
        p2.x=center2.x+radius;p2.y=center2.y+radius;
        cv::Rect roi_rect(center2.x-radius2,center2.y-radius2,radius2*2,radius2*2);//左上のx座標,y座標,width,depthというふうに格納していく
        mask_img.create(input.size(), CV_8UC3);
        mask_img=cv::Scalar(0,0,0);//真っ黒に
        not_masked=mask_img(roi_rect);
        not_masked=cv::Scalar(255,255,255);//真っ白に
        cvtColor(mask_img,mask_img_gray,CV_RGB2GRAY);
        input(roi_rect).copyTo(not_masked);
    }
    double ratio=(double)radius2/(double)radius1;//
    int difx=center1.x*ratio-center2.x;
    int dify=center1.y*ratio-center2.y;
    printf("%f%d%d\n",ratio,difx,dify);
    cv::Mat expanded_output(cv::saturate_cast<int>(dst_img.rows*ratio),cv::saturate_cast<int>(dst_img.cols*ratio),CV_8UC1);
    cv::resize(dst_img,expanded_output,expanded_output.size(),0,0,cv::INTER_LINEAR);
    //resizing source image & calc offset
    
    //    //for debug(checking masks)
    //        cv::namedWindow("input",1);
    //        cv::namedWindow("result",1);
    //        cv::namedWindow("masked",1);
    //        cv::imshow("input", input);
    //        cv::imshow("result", expanded_output);
    //        cv::imshow("masked", mask_img);
    //ref:http://bicycle.life.coocan.jp/takamints/index.php/doc/opencv/doc/Mat_conversion//
    //http://d.hatena.ne.jp/kamekamekame877/20110621
    IplImage buf1=input;//特殊なコピーコンストラクタが呼ばれてるかららしい
    IplImage buf2=mask_img_gray;//同じく
    IplImage buf3=expanded_output;
    im_src=&buf1;
    im_mask=&buf2;
    im_dst=&buf3;
    
    offset[0]=dify;
    offset[1]=difx;
    
    for(int i=0;i<3;i++){// i:channnels
        quasi_poisson_solver(im_src, im_dst, im_mask, i, offset);
        //poisson_solver(im_src, im_dst, im_mask, i, offset);
    }
    cvSaveImage(argv[3],im_dst);
    //    cvReleaseImage(&im_src);
    //    cvReleaseImage(&im_dst);
    //    cvReleaseImage(&im_mask);
    return 0;
}
Пример #7
0
	void RegionGrow<ImageSigT>::getBlock( int y, int x, Matrix<unsigned char>& image )
	{
		//四邻域或八邻域区域生长
		int i, j, pt_x, pt_y;
		int min_x, max_x, min_y, max_y;
		int m=0, n=0, pos=0;	
		unsigned char pix_value;
		std::vector<int> mask_x;
		std::vector<int> mask_y;
		bool break_flag = 0;

		int width = image.cols();
		int height = image.rows();

		for( i=y; i<height; ++i )
		{
			for( j=x; j<width; ++j )
			{
				pix_value = image(i,j);
				if( pix_value == image(i,j) )
				{
					//找到有值的点,求四连通,凡是连通区内的都算作一个区域。可以改成八连通
					image(i,j) = 0;
					mask_y.push_back(i);
					mask_x.push_back(j);
					min_y = i;
					max_y = i;
					min_x = j;
					max_x = j;
					m_pixel_num_vector.push_back(1);
					m_label_image(i,j) = (float)m_region_counter;
					m = n;
					pos = n;
					n++;
					while( m < n )
					{
						pt_x = mask_x[m];
						pt_y = mask_y[m];
						int index_x[8] = {pt_x,pt_x,(pt_x-1),(pt_x+1),(pt_x-1),(pt_x+1),(pt_x-1),(pt_x+1)};
						int index_y[8] = {(pt_y-1),(pt_y+1),pt_y,pt_y,(pt_y-1),(pt_y-1),(pt_y+1),(pt_y+1)};
						for( int k=0; k<m_neib_num; ++k )
						{
							if( index_x[k]<0 || index_y[k]<0 || index_x[k]>width-1 || index_y[k]>height-1 )
								continue;
							if( image(index_y[k],index_x[k]) == pix_value )
							{
								image(index_y[k],index_x[k]) = 0;
								mask_x.push_back( index_x[k] );
								mask_y.push_back( index_y[k] );
								if( index_x[k]>max_x )
									max_x = index_x[k];
								if( index_x[k]<min_x )
									min_x = index_x[k];
								if( index_y[k]>max_y )
									max_y = index_y[k];
								if( index_y[k]<min_y )
									min_y = index_y[k];
								m_label_image( index_y[k], index_x[k] ) = (float)m_region_counter;
								m_pixel_num_vector[ m_region_counter - 1 ] ++;
								n++;
								if( height*width == n )
								{
									m = n;
									break;
								}
							}
						}
						m++;
					}
					if( (n-pos)>0 && (n-pos)<width*height )
					{
						if( mask_x.size() > (unsigned int)m_min_area )
						{
							//给mask加一个margin,为了之后可能的形态学操作
							int margin = 1;
							RECT region_rect;
							region_rect.left =		EAGLEEYE_MAX( 0, min_x-margin );
							region_rect.top  =		EAGLEEYE_MAX( 0, min_y-margin );
							region_rect.right =		EAGLEEYE_MIN( width-1, max_x+margin );
							region_rect.bottom =	EAGLEEYE_MIN( height-1, max_y+margin );
							m_rects.push_back( region_rect );
							Matrix<unsigned char> mask_img( region_rect.bottom-region_rect.top+1,
															region_rect.right-region_rect.left+1, (unsigned char)0 );
							for( unsigned int k=0; k<mask_x.size(); ++k )
							{
								mask_img( mask_y[k]-region_rect.top, mask_x[k]-region_rect.left ) = (unsigned char)255;
							}
							m_masks.push_back( mask_img );
							m_region_counter_area_limited++;
						}
					}
					break_flag = 1;
					break;	//不继续找不连通,但值一样的区域了
				}
			}
			if( break_flag )
				break;
		}
		mask_x.clear();
		mask_y.clear();
	}