void BrowserUtils::generateIconFromFile(const QString inFile, const QString outFile, const QSize imageSize)
{
	QImage inImage(inFile);
	if (inImage.isNull()) {
		qWarning() << "generateIconFromFile - failed to open source file";
		Q_EMIT iconGenerated(false, outFile);
		return;
	}
	const int nMargin = 4;// Must agree with pixel data in image files
	const int nIconSize = 64;// Width & height of output image
	const int nIconWidth = nIconSize-2*nMargin;// Width of icon image within file
	const int nIconHeight = nIconSize-2*nMargin;
	QImage outImage(nIconSize, nIconSize, QImage::Format_ARGB32_Premultiplied);
	outImage.fill(0);
	QPainter painter(&outImage);
	painter.setRenderHint(QPainter::SmoothPixmapTransform);
	QRectF source(0.0, 0.0, imageSize.width(), imageSize.height());
	QRectF target(nMargin, nMargin, nIconWidth, nIconHeight);
	QRectF size(0.0, 0.0, nIconSize, nIconSize);
	painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
	painter.drawImage(target, inImage, source);
	painter.setCompositionMode(QPainter::CompositionMode_DestinationIn);
	QImage maskImage(kIconMaskFile);
	painter.drawImage(target, maskImage, target);
	painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
	QImage overlayImage(kIconOverlayFile);
	painter.drawImage(size, overlayImage, size);

	QFileInfo imageInfo(outFile);
	QDir imageDir(imageInfo.path());
	if (!imageDir.exists()) {
		imageDir.mkpath(".");
	}

	bool saved = outImage.save(outFile);
	Q_EMIT iconGenerated(saved, outFile);
}
Ejemplo n.º 2
0
void Scene::process( Mat &frame )
{
    if( detectMarkersOption )
    {
//        Mat grayscaleMat; cvtColor( frame, grayscaleMat, CV_BGR2GRAY );
//        Mat binaryMat; threshold( grayscaleMat, binaryMat, 128, 255, cv::THRESH_BINARY );

        std::vector< Marker > detectedMarkersVector;
//        cameraParameters->resize( binaryMat.size() );
        cameraParameters->resize( frame.size() );
//        markerDetector->detect( binaryMat, detectedMarkersVector, *cameraParameters, 0.08f );
        markerDetector->detect( frame, detectedMarkersVector, *cameraParameters, 0.08f );
        detectedMarkers = QVector< Marker >::fromStdVector( detectedMarkersVector );

        if( descriptionOption )
            for( int i = 0; i < detectedMarkers.size(); i++ )
                detectedMarkers.at( i ).draw( frame, Scalar( 255, 0, 255 ), 1 );
    }
    else detectedMarkers.clear();

    if( detectFacesOption )
    {
        std::vector< Rect > detectedFaces;
        detectedFaces.clear();
        frontalFaceClassifier.detectMultiScale( frame, detectedFaces,
                                                1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size( 150, 150 ) );

        if( detectedFaces.size() > 0 )  {
            emit message( "<div style=\"color:green;\">Cantidad de caras detectadas: " +
                          QString::number( detectedFaces.size() ) + "</div>" );
            if (seguirRostroOption)
                actualizaServoSiCorresponde(detectedFaces.at( 0 ).x + detectedFaces.at( 0 ).width/2);
        }
        else
            emit message( "Cantidad de caras detectadas: " + QString::number( detectedFaces.size() ) );

        if( firstFaceOption )
        {
            if( detectedFaces.size() > 0 )
            {
                emit message( "Recortando una cara" );
                detectedFaces.at( 0 ).width = detectedFaces.at( 0 ).width - detectedFaces.at( 0 ).width % 4;
                Mat faceMat = Mat( frame, detectedFaces.at( 0 ) );
                frame = faceMat.clone();
            }
        }

        else if( !eraseMouthOption )
            for( unsigned int i = 0; i < detectedFaces.size(); i++ )
                circle( frame, Point( detectedFaces.at( i ).x + ( detectedFaces.at( i ).width / 2 ),
                                      detectedFaces.at( i ).y + ( detectedFaces.at( i ).height / 2 ) ),
                        ( detectedFaces.at( i ).width / 2 ), Scalar( 255, 0, 0 ), linesWidthOption );

        if( eraseMouthOption )
        for( unsigned int i = 0; i < detectedFaces.size(); i ++ )
        {
            int xMouthZone = detectedFaces.at( i ).x + detectedFaces.at( i ).width * 0.15;
            int yMouthZone = detectedFaces.at( i ).y + ( detectedFaces.at( i ).height / 2 ) + ( detectedFaces.at( i ).height * 0.15 );
            int wMouthZone = detectedFaces.at( i ).width * 0.70;
            int hMouthZone = detectedFaces.at( i ).height / 2;
            Rect mouthZoneRect( xMouthZone, yMouthZone, wMouthZone, hMouthZone );

            if( xMouthZone + wMouthZone > frame.cols ) return;
            if( yMouthZone + hMouthZone > frame.rows ) return;

            Mat mouthZoneMat( frame, mouthZoneRect );
            std::vector<Rect> mouthsDetected;
            mouthsDetected.clear();
            mouthClassifier.detectMultiScale( mouthZoneMat, mouthsDetected,
                                               1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size( 50, 50 ) );

            if( mouthsDetected.size() > 0 )
            {
                emit message( "Borrando la boca" );

                Rect correctedMouthRect( mouthsDetected.at( 0 ).x - mouthsDetected.at( 0 ).width * 0.15,
                                         mouthsDetected.at( 0 ).y - mouthsDetected.at( 0 ).height * 0.28,
                                         mouthsDetected.at( 0 ).width * 1.3, mouthsDetected.at( 0 ).height * 1.4 );

                Rect correctedMouthRectToGlobal( mouthZoneRect.x + correctedMouthRect.x,
                                                 mouthZoneRect.y + correctedMouthRect.y,
                                                 correctedMouthRect.width, correctedMouthRect.height );

                Mat mouthMat( frame, correctedMouthRectToGlobal );
                Mat mouthMatCopy = mouthMat.clone();
                blur( mouthMatCopy, mouthMatCopy, Size( blurMouthOption, blurMouthOption ) );
                toFourChannels( mouthMatCopy, mouthMatCopy );
                blurBorders( mouthMatCopy );
                overlayImage( frame, mouthMatCopy, frame,
                              Point( correctedMouthRectToGlobal.x, correctedMouthRectToGlobal.y ) );
            }
        }
    }
}
Ejemplo n.º 3
0
void VideoCorrect::correctImage(Mat& inputFrame, Mat& outputFrame, bool developerMode){
	
	resize(inputFrame, inputFrame, CAMERA_RESOLUTION);
	inputFrame.copyTo(img);

	//Convert to YCbCr color space
	cvtColor(img, ycbcr, CV_BGR2YCrCb);

	//Skin color thresholding
	inRange(ycbcr, Scalar(0, 150 - Cr, 100 - Cb), Scalar(255, 150 + Cr, 100 + Cb), bw);

	if(IS_INITIAL_FRAME){
		face = detectFaces(img);
		if(face.x != 0){
			lastFace = face;
		}
		else{
			outputFrame = img;
			return;
		}
		prevSize = Size(face.width/2, face.height/2);
		head = Mat::zeros(bw.rows, bw.cols, bw.type());
		ellipse(head, Point(face.x + face.width/2, face.y + face.height/2), prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0);
		if(face.x > 0 && face.y > 0 && face.width > 0 && face.height > 0 
			&& (face.x + face.width) < img.cols && (face.y + face.height) < img.rows){
			img(face).copyTo(bestImg);
		}
		putText(img, "Give your best pose!", Point(face.x, face.y), CV_FONT_HERSHEY_SIMPLEX, 0.4, Scalar(255,255,255,0), 1, CV_AA);
	}

	firstFrameCounter--;

	if(face.x == 0) //missing face prevention
		face = lastFace;

	//Mask the background out
	bw &= head;

	//Compute more accurate image moments after background removal
	m = moments(bw, true);
	angle = (atan((2*m.nu11)/(m.nu20-m.nu02))/2)*180/PI;
	center = Point(m.m10/m.m00,m.m01/m.m00);

	//Smooth rotation (running average)
	bufferCounter++;
	rotationBuffer[ bufferCounter % SMOOTHER_SIZE ] = angle;
	smoothAngle += (angle - rotationBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE;

	//Expand borders
	copyMakeBorder( img, img, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, 
					BORDER_REPLICATE, Scalar(255,255,255,0));

	if(!IS_INITIAL_FRAME){
		//Rotate the image to correct the leaning angle
		rotateImage(img, smoothAngle);
	
		//After rotation detect faces
		face = detectFaces(img);
		if(face.x != 0)
			lastFace = face;

		//Create background mask around the face
		head = Mat::zeros(bw.rows, bw.cols, bw.type());
		ellipse(head, Point(face.x - BORDER_EXPAND + face.width/2, face.y -BORDER_EXPAND + face.height/2),
					  prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0);

		//Draw a rectangle around the face
		//rectangle(img, face, Scalar(255,255,255,0), 1, 8, 0);

		//Overlay the ideal pose
		if(replaceFace && center.x > 0 && center.y > 0){
			center = Point(face.x + face.width/2, face.y + face.width/2);
			overlayImage(img, bestImg, center, smoothSize);
		}

	} else{
		face.x += BORDER_EXPAND; //position alignment after border expansion (not necessary if we detect the face after expansion)
		face.y += BORDER_EXPAND;
	}
	
	//Smooth ideal image size (running average)
	sizeBuffer[ bufferCounter % SMOOTHER_SIZE ] = face.width;
	smoothSize += (face.width - sizeBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE;

	//Get ROI
	center = Point(face.x + face.width/2, face.y + face.width/2);
	roi = getROI(img, center);
	if(roi.x > 0 && roi.y > 0 && roi.width > 0 && roi.height > 0 
		&& (roi.x + roi.width) < img.cols && (roi.y + roi.height) < img.rows){
		img = img(roi);
	}

	//Resize the final image
	resize(img, img, CAMERA_RESOLUTION);

	if(developerMode){

		Mat developerScreen(img.rows, 
							img.cols + 
							inputFrame.cols +
							bw.cols, CV_8UC3);

		Mat left(developerScreen, Rect(0, 0, img.size().width, img.size().height));
		img.copyTo(left);

		Mat center(developerScreen, Rect(img.cols, 0, inputFrame.cols, inputFrame.rows));
		inputFrame.copyTo(center);

		cvtColor(bw, bw, CV_GRAY2BGR);
		Mat right(developerScreen, Rect(img.size().width + inputFrame.size().width, 0, bw.size().width, bw.size().height));
		bw.copyTo(right);

		Mat rightmost(developerScreen, Rect(img.size().width + inputFrame.size().width + bw.size().width - bestImg.size().width, 0,
											bestImg.size().width, bestImg.size().height));
		bestImg.copyTo(rightmost);

		outputFrame = developerScreen;
	}
	else{
		outputFrame = img;
	}
}