Example #1
0
void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
                                      float h, float hForColorComponents,
                                      int templateWindowSize, int searchWindowSize)
{
    Mat src = _src.getMat();
    _dst.create(src.size(), src.type());
    Mat dst = _dst.getMat();

    if (src.type() != CV_8UC3) {
        CV_Error(CV_StsBadArg, "Type of input image should be CV_8UC3!");
        return;
    }

    Mat src_lab;
    cvtColor(src, src_lab, CV_LBGR2Lab);

    Mat l(src.size(), CV_8U);
    Mat ab(src.size(), CV_8UC2);
    Mat l_ab[] = { l, ab };
    int from_to[] = { 0,0, 1,1, 2,2 };
    mixChannels(&src_lab, 1, l_ab, 2, from_to, 3);

    fastNlMeansDenoising(l, l, h, templateWindowSize, searchWindowSize);
    fastNlMeansDenoising(ab, ab, hForColorComponents, templateWindowSize, searchWindowSize);

    Mat l_ab_denoised[] = { l, ab };
    Mat dst_lab(src.size(), src.type());
    mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3);

    cvtColor(dst_lab, dst, CV_Lab2LBGR);
}
static bool ocl_fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
                                      float h, float hForColorComponents,
                                      int templateWindowSize, int searchWindowSize)
{
    UMat src = _src.getUMat();
    _dst.create(src.size(), src.type());
    UMat dst = _dst.getUMat();

    UMat src_lab;
    cvtColor(src, src_lab, COLOR_LBGR2Lab);

    UMat l(src.size(), CV_8U);
    UMat ab(src.size(), CV_8UC2);
    std::vector<UMat> l_ab(2), l_ab_denoised(2);
    l_ab[0] = l;
    l_ab[1] = ab;
    l_ab_denoised[0].create(src.size(), CV_8U);
    l_ab_denoised[1].create(src.size(), CV_8UC2);

    int from_to[] = { 0,0, 1,1, 2,2 };
    mixChannels(std::vector<UMat>(1, src_lab), l_ab, from_to, 3);

    fastNlMeansDenoising(l_ab[0], l_ab_denoised[0], h, templateWindowSize, searchWindowSize);
    fastNlMeansDenoising(l_ab[1], l_ab_denoised[1], hForColorComponents, templateWindowSize, searchWindowSize);

    UMat dst_lab(src.size(), CV_8UC3);
    mixChannels(l_ab_denoised, std::vector<UMat>(1, dst_lab), from_to, 3);

    cvtColor(dst_lab, dst, COLOR_Lab2LBGR, src.channels());
    return true;
}
static SDL_Color addColors(const SDL_Color & base, const SDL_Color & over)
{
	return CSDL_Ext::makeColor(
	            mixChannels(over.r, base.r, over.unused, base.unused),
	            mixChannels(over.g, base.g, over.unused, base.unused),
	            mixChannels(over.b, base.b, over.unused, base.unused),
	            ui8(over.unused + base.unused * (255 - over.unused) / 256)
	            );
}
Example #4
0
void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputArray _dst,
                                           int imgToDenoiseIndex, int temporalWindowSize,
                                           float h, float hForColorComponents,
                                           int templateWindowSize, int searchWindowSize)
{
    std::vector<Mat> srcImgs;
    _srcImgs.getMatVector(srcImgs);

    fastNlMeansDenoisingMultiCheckPreconditions(
        srcImgs, imgToDenoiseIndex,
        temporalWindowSize, templateWindowSize, searchWindowSize);

    _dst.create(srcImgs[0].size(), srcImgs[0].type());
    Mat dst = _dst.getMat();

    int src_imgs_size = static_cast<int>(srcImgs.size());

    if (srcImgs[0].type() != CV_8UC3)
    {
        CV_Error(Error::StsBadArg, "Type of input images should be CV_8UC3!");
        return;
    }

    int from_to[] = { 0,0, 1,1, 2,2 };

    // TODO convert only required images
    std::vector<Mat> src_lab(src_imgs_size);
    std::vector<Mat> l(src_imgs_size);
    std::vector<Mat> ab(src_imgs_size);
    for (int i = 0; i < src_imgs_size; i++)
    {
        src_lab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC3);
        l[i] = Mat::zeros(srcImgs[0].size(), CV_8UC1);
        ab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC2);
        cvtColor(srcImgs[i], src_lab[i], COLOR_LBGR2Lab);

        Mat l_ab[] = { l[i], ab[i] };
        mixChannels(&src_lab[i], 1, l_ab, 2, from_to, 3);
    }

    Mat dst_l;
    Mat dst_ab;

    fastNlMeansDenoisingMulti(
        l, dst_l, imgToDenoiseIndex, temporalWindowSize,
        h, templateWindowSize, searchWindowSize);

    fastNlMeansDenoisingMulti(
        ab, dst_ab, imgToDenoiseIndex, temporalWindowSize,
        hForColorComponents, templateWindowSize, searchWindowSize);

    Mat l_ab_denoised[] = { dst_l, dst_ab };
    Mat dst_lab(srcImgs[0].size(), srcImgs[0].type());
    mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3);

    cvtColor(dst_lab, dst, COLOR_Lab2LBGR);
}
//-----------------------------------------------------------------------------------------------------
void FrameAnalyzerHistogram::alphaCompose(cv::Mat &rgba1, cv::Mat &rgba2, cv::Mat &rgba_dest) {
    Mat a1(rgba1.size(), rgba1.type()), ra1;
    Mat a2(rgba2.size(), rgba2.type());
    int mixch[] = { 3, 0, 3, 1, 3, 2, 3, 3 };
    mixChannels(&rgba1, 1, &a1, 1, mixch, 4);
    mixChannels(&rgba2, 1, &a2, 1, mixch, 4);
    subtract(Scalar::all(255), a1, ra1);
    bitwise_or(a1, Scalar(0, 0, 0, 255), a1);
    bitwise_or(a2, Scalar(0, 0, 0, 255), a2);
    multiply(a2, ra1, a2, 1. / 255);
    multiply(a1, rgba1, a1, 1. / 255);
    multiply(a2, rgba2, a2, 1. / 255);
    add(a1, a2, rgba_dest);
}
void Feature::calcColorFeature()
{
    // TODO: optimize this part, reduce extra work
    
    Mat hsv;
    cvtColor(mROI, hsv, CV_BGR2HSV_FULL);
    
    Mat temp(mROI.size(), CV_8UC3), mixed;
    Mat src[] = { mROI, mGray, hsv };
    int fromTo[] = { 2,0, 3,1, 5,2 };
    mixChannels(src, 3, &temp, 1, fromTo, 3);
    temp.convertTo(mixed, CV_64F);
    
    Scalar avg, stdDev;
    meanStdDev(mixed, avg, stdDev, mMask);
    Scalar var = stdDev.mul(stdDev);
    Mat temp1 = mixed - avg;
    Mat temp2 = temp1.mul(temp1);
    Scalar sk = mean(temp1.mul(temp2), mMask) / (var.mul(stdDev));
    Scalar ku = mean(temp2.mul(temp2), mMask) / (var.mul(var));
    
    Scalar stat[] = { avg, stdDev, sk, ku };
    for (int i = 0; i < 4; i++) {
        red[i] = stat[i][0];
        gray[i] = stat[i][1];
        saturation[i] = stat[i][2];
    }
}
Example #7
0
void camera_HUE_display(int num) {
	int c;
	IplImage* color_img;
	CvCapture* cv_cap = cvCaptureFromCAM(num);
	cvNamedWindow("Video", 0); // create window
	for(;;) {
		color_img = cvQueryFrame(cv_cap); // get frame
		if(color_img != 0) {
			Mat cam_mat(color_img);
			Mat frameBGR;
			cam_mat.copyTo(frameBGR);
			Mat frameHSV;
			cvtColor(frameBGR, frameHSV, CV_BGR2HSV);
			Mat Hue = Mat(frameHSV.rows, frameHSV.cols, CV_8UC1);
			//Hue.create(frameHSV.size(), frameHSV.depth());
			int ch[] = { 1, 0 };
			mixChannels( &frameHSV, 1, &Hue, 1, ch, 1 );
			imshow("Video", Hue);
			c = cvWaitKey(10); // wait 10 ms or for key stroke
			if(c == 27) {
				break; // if ESC, break and quit
			}
		}
		c = cvWaitKey(10); // wait 10 ms or for key stroke
		if(c == 27)
			break; // if ESC, break and quit
	}
	/* clean up */
	cvReleaseCapture( &cv_cap );
	cvDestroyWindow("Video");
}
Example #8
0
void cv::insertChannel(InputArray _src, InputOutputArray _dst, int coi)
{
    Mat src = _src.getMat(), dst = _dst.getMat();
    CV_Assert( src.size == dst.size && src.depth() == dst.depth() );
    CV_Assert( 0 <= coi && coi < dst.channels() && src.channels() == 1 );
    int ch[] = { 0, coi };
    mixChannels(&src, 1, &dst, 1, ch, 1);
}
 Mat calcProbability()
 {
     Mat hue,backproj;
     hue.create(hsv.size(), hsv.depth());
     mixChannels(&hsv, 1, &hue, 1, ch, 1);
     calcBackProject(&hue, 1, 0, hist, backproj, (const float**)phranges);
     return backproj;
 }
Example #10
0
static Mat iplImageToMat(const IplImage* img, bool copyData)
{
    Mat m;

    if( !img )
        return m;

    m.dims = 2;
    CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);

    int imgdepth = IPL2CV_DEPTH(img->depth);
    size_t esz;
    m.step[0] = img->widthStep;

    if(!img->roi)
    {
        CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL);
        m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, img->nChannels);
        m.rows = img->height;
        m.cols = img->width;
        m.datastart = m.data = (uchar*)img->imageData;
        esz = CV_ELEM_SIZE(m.flags);
    }
    else
    {
        CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0);
        bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE;
        m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, selectedPlane ? 1 : img->nChannels);
        m.rows = img->roi->height;
        m.cols = img->roi->width;
        esz = CV_ELEM_SIZE(m.flags);
        m.datastart = m.data = (uchar*)img->imageData +
            (selectedPlane ? (img->roi->coi - 1)*m.step*img->height : 0) +
            img->roi->yOffset*m.step[0] + img->roi->xOffset*esz;
    }
    m.datalimit = m.datastart + m.step.p[0]*m.rows;
    m.dataend = m.datastart + m.step.p[0]*(m.rows-1) + esz*m.cols;
    m.step[1] = esz;
    m.updateContinuityFlag();

    if( copyData )
    {
        Mat m2 = m;
        m.release();
        if( !img->roi || !img->roi->coi ||
            img->dataOrder == IPL_DATA_ORDER_PLANE)
            m2.copyTo(m);
        else
        {
            int ch[] = {img->roi->coi - 1, 0};
            m.create(m2.rows, m2.cols, m2.type());
            mixChannels(&m2, 1, &m, 1, ch, 1);
        }
    }

    return m;
}
Example #11
0
void cv::extractChannel(InputArray _src, OutputArray _dst, int coi)
{
    Mat src = _src.getMat();
    CV_Assert( 0 <= coi && coi < src.channels() );
    _dst.create(src.dims, &src.size[0], src.depth());
    Mat dst = _dst.getMat();
    int ch[] = { coi, 0 };
    mixChannels(&src, 1, &dst, 1, ch, 1);
}
    void buildHistogram()
    {

        Mat hue;
        hue.create(hsv.size(), hsv.depth());
        mixChannels(&hsv, 1, &hue, 1, ch, 1);
        Mat roi(hue, selection);
        calcHist(&roi, 1, 0, Mat(), hist, 1, &hsize,(const float **) phranges);
        normalize(hist, hist, 0, 255, CV_MINMAX);
        trackObject=true;
    }
Example #13
0
void insertImageCOI(InputArray _ch, CvArr* arr, int coi)
{
    Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
    if(coi < 0)
    {
        CV_Assert( CV_IS_IMAGE(arr) );
        coi = cvGetImageCOI((const IplImage*)arr)-1;
    }
    CV_Assert(ch.size == mat.size && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels());
    int _pairs[] = { 0, coi };
    mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
}
Example #14
0
//--------------------------------------------------------------
void testApp::update() {
  if (origin.y > 0) {
    unsigned char *data = pixelsBelowWindow(origin.x, origin.y, 400, 400);
    cv::Mat argb(400, 400, CV_8UC4, data);
    cv::Mat rgb(400, 400, CV_8UC3);
    int fromTo[] = {1,0, 2,1, 3,2};
    mixChannels(&argb, 1, &rgb, 1, fromTo, 3);
    ofxCv::toOf(rgb, image);
    image.reloadTexture();
    
    getStateFromImage();
    findAnswer();
    
    if (selected.size()) {
      mouseThread.move(origin.x + (selected[0] + (ofRandom(1) < 0.5 ? -1 : 1)) * 50 + 23,
                       origin.y + selected[1] * 50 + 23);
      return;
    }
    
    if (wildcard.size()) {
        mouseThread.click(origin.x + wildcard[0] * 50 + 23, origin.y + wildcard[1] * 50 + 23);
        return;
//      int max_count = 0;
//      unsigned int max_id = 0;
//      for (map<unsigned int, int>::iterator it = count.begin(); it != count.end(); it++) {
//        if (it->second > max_count) {
//          max_count = it->second;
//          max_id = it->first;
//        }
//      }
//      if (max_count > 0) {
//        cout << "max_id: " << max_id << ", count: " << max_count << endl;
//        for (int i = 0; i < wildcard.size(); i += 2) {
//          int x = wildcard[i];
//          int y = wildcard[i + 1];
//          if (state[x][y].id == max_id) {
//            mouseThread.click(origin.x + x * 50 + 23, origin.y + y * 50 + 23);
//            return;
//          }
//        }
//      }
    }
    
    if (answers.size() > 0) {
      Answer ans = answers.at(ofRandom(MIN(answers.size(), 3)));
      mouseThread.drag(origin.x + ans.x1 * 50 + 23,
                       origin.y + ans.y1 * 50 + 23,
                       origin.x + ans.x2 * 50 + 23,
                       origin.y + ans.y2 * 50 + 23);
    }
  }
}
Example #15
0
void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
                                      float h, float hForColorComponents,
                                      int templateWindowSize, int searchWindowSize)
{
    int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);

    if (type != CV_8UC3 && type != CV_8UC4)
    {
        CV_Error(Error::StsBadArg, "Type of input image should be CV_8UC3!");
        return;
    }

    CV_OCL_RUN(_src.dims() <= 2 && (_dst.isUMat() || _src.isUMat()),
                ocl_fastNlMeansDenoisingColored(_src, _dst, h, hForColorComponents,
                                                templateWindowSize, searchWindowSize))

    Mat src = _src.getMat();
    _dst.create(src.size(), type);
    Mat dst = _dst.getMat();

    Mat src_lab;
    cvtColor(src, src_lab, COLOR_LBGR2Lab);

    Mat l(src.size(), CV_8U);
    Mat ab(src.size(), CV_8UC2);
    Mat l_ab[] = { l, ab };
    int from_to[] = { 0,0, 1,1, 2,2 };
    mixChannels(&src_lab, 1, l_ab, 2, from_to, 3);

    fastNlMeansDenoising(l, l, h, templateWindowSize, searchWindowSize);
    fastNlMeansDenoising(ab, ab, hForColorComponents, templateWindowSize, searchWindowSize);

    Mat l_ab_denoised[] = { l, ab };
    Mat dst_lab(src.size(), CV_MAKE_TYPE(depth, 3));
    mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3);

    cvtColor(dst_lab, dst, COLOR_Lab2LBGR, cn);
}
Example #16
0
void extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
{
    Mat mat = cvarrToMat(arr, false, true, 1);
    _ch.create(mat.dims, mat.size, mat.depth());
    Mat ch = _ch.getMat();
    if(coi < 0)
    {
        CV_Assert( CV_IS_IMAGE(arr) );
        coi = cvGetImageCOI((const IplImage*)arr)-1;
    }
    CV_Assert(0 <= coi && coi < mat.channels());
    int _pairs[] = { coi, 0 };
    mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
}
Example #17
0
void cv::mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst,
                     const vector<int>& fromTo)
{
    if(fromTo.empty())
        return;
    int i, nsrc = (int)src.total(), ndst = (int)dst.total();
    CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
    cv::AutoBuffer<Mat> _buf(nsrc + ndst);
    Mat* buf = _buf;
    for( i = 0; i < nsrc; i++ )
        buf[i] = src.getMat(i);
    for( i = 0; i < ndst; i++ )
        buf[nsrc + i] = dst.getMat(i);
    mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2);
}
Example #18
0
//--------------------------------------------------------------
void testApp::findOrigin() {
  int width = ofGetScreenWidth();
  int height = ofGetScreenHeight();
  unsigned char *data = pixelsBelowWindow(0, 0, width, height);
  ofImage banner;
  banner.loadImage("banner.png");
  float start = ofGetElapsedTimef();
  cv::Mat captured(height, width, CV_8UC4, data, 0);
  cv::Mat screen(height, width, CV_8UC3);
  int fromTo[] = {1,0, 2,1, 3,2};
  mixChannels(&captured, 1, &screen, 1, fromTo, 3);
  cv::Mat temp_img = ofxCv::toCv(banner);
  cv::Mat result_img;
  cv::matchTemplate(screen, temp_img, result_img, CV_TM_CCOEFF_NORMED);
  cv::Point max_pt;
  double maxVal;
  cv::minMaxLoc(result_img, NULL, &maxVal, NULL, &max_pt);
  cout << "(" << max_pt.x << ", " << max_pt.y << "), score=" << maxVal << ", time=" << (ofGetElapsedTimef() - start) << endl;
  if (maxVal > 0.9) {
    origin.x = max_pt.x + 20;
    origin.y = max_pt.y + 115 + 63;
    
#if 0
    for (int y = 0; y < 8; y++) {
      for (int x = 0; x < 8; x++) {
        cv::Mat name = screen(cv::Rect(origin.x + x * 50, origin.y + y * 50, 46, 46)).clone();
        ofImage nameko;
        ofxCv::toOf(name, nameko);
        nameko.reloadTexture();
        char filename[256];
        sprintf(filename, "nameko/%06x.png", nameko.getColor(23, 2).getHex());
        ofFile file(filename);
        if (!file.exists()) {
          nameko.saveImage(file);
        }
      }
    }
#endif

    ofSetWindowPosition(origin.x - ofGetWindowWidth() - 25, origin.y - 1);
  }
}
Example #19
0
static AS3_Val applyFilter(void* self, AS3_Val args)
{	
	// Flash image data in ARGB format :
	cv::Mat img (frameHeight, frameWidth, CV_8UC4, (void*)buffer);
	// C process duration
	/*struct timeval start, end;
    long mtime, seconds, useconds;
    gettimeofday(&start, NULL);*/
	
	// grayScale filter
	if (strcmp (filterType, "grayScale") == 0) {
		cv::Mat grayImg;
		cv::cvtColor (img, grayImg, CV_RGBA2GRAY); // should be ARGB2GRAY ?

		//mix channels so we output argb data
		int gs_to_argb[] = { 0,1,  0,2,  0,3 };
		mixChannels (&grayImg, 1, &img, 1, gs_to_argb, 3);
		
	// medianBlur filter
	} else if (strcmp (filterType, "medianBlur") == 0) {
		cv::medianBlur (img, img, 5);
		
	// flip vertical
	} else if (strcmp (filterType, "verticalMirror") == 0) {
		cv::flip (img, img, 0);
		
	// flip horizontal
	} else if (strcmp (filterType, "horizontalMirror") == 0) {
		cv::flip (img, img, 1);
	}
	// C process duration
	/*gettimeofday(&end, NULL);
    seconds  = end.tv_sec  - start.tv_sec;
    useconds = end.tv_usec - start.tv_usec;
    mtime = ((seconds) * 1000 + useconds/1000.0) + 0.5;
	fprintf(stderr, "[OPENCV] applyFilter: %ld", mtime);*/
	
	return 0;
}
Example #20
0
void cv::mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst,
                     const vector<int>& fromTo)
{
    if(fromTo.empty())
        return;
    bool src_is_mat = src.kind() != _InputArray::STD_VECTOR_MAT &&
                      src.kind() != _InputArray::STD_VECTOR_VECTOR;
    bool dst_is_mat = dst.kind() != _InputArray::STD_VECTOR_MAT &&
                      dst.kind() != _InputArray::STD_VECTOR_VECTOR;
    int i;
    int nsrc = src_is_mat ? 1 : (int)src.total();
    int ndst = dst_is_mat ? 1 : (int)dst.total();
    
    CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
    cv::AutoBuffer<Mat> _buf(nsrc + ndst);
    Mat* buf = _buf;
    for( i = 0; i < nsrc; i++ )
        buf[i] = src.getMat(src_is_mat ? -1 : i);
    for( i = 0; i < ndst; i++ )
        buf[nsrc + i] = dst.getMat(dst_is_mat ? -1 : i);
    mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2);
}
Example #21
0
void ModelMaker::extractModel(cv::Mat origframe)
{
    //       qDebug()<<"Frame is"<<frame.empty();
    for(int i = 0; i < centroids.size(); i++) {
        
        Mat subtractedframe(origframe);
        subtractedframe = subtractBack(origframe);
        Mat polymask = subtractedframe.clone();
        
        //                cv::cvtColor(frameMat, frameMat, CV_BGR2BGRA);
        
        //                cv::cvtColor(polymask, polymask, CV_BGR2BGRA);
        
        
        //cv::rectangle(mask, Point( 0, 0 ), Point( mask.cols, mask.rows), Scalar( 0, 255,0,255 ),-1, 8 ); //Fill all mask in
        
        polymask.setTo(Scalar(0,0,0,0));
        //Polgon Masking
        polygon = paintCanvas->polygons.at(i);
        
        Point poly_points[polygon.size()];
        
        //Find point furthest from center
        Point furthest = Point(paintCanvas->centerPoint.x()*xscale,paintCanvas->centerPoint.y()*yscale);  //set to center
        
        int scaledcenterx = paintCanvas->centerPoint.x()*xscale;
        int scaledcentery = paintCanvas->centerPoint.y()*yscale;
        int scaledheadx= paintCanvas->headPoint.x()*xscale;
        int scaledheady=paintCanvas->headPoint.y()*yscale;
        
        
        float biggestdistancesquared=0;
        
        
        for(int j=0;j<polygon.size();j++)
        {
            poly_points[j]=Point(xscale*polygon.at(j).x(), yscale*polygon.at(j).y());
            
            Point candidate = Point(xscale*polygon.at(j).x(), yscale*polygon.at(j).y());
            float distancecandidatesquared;
            //Find furthest
            distancecandidatesquared= (candidate.x - scaledcenterx)*(candidate.x - scaledcenterx) + (candidate.y - scaledcentery)*(candidate.y - scaledcentery);
            if(distancecandidatesquared>biggestdistancesquared){
                biggestdistancesquared=distancecandidatesquared;
                qDebug()<<"biggcandidate x "<<candidate.x <<"  y "<<candidate.y << "    distance ="<<biggestdistancesquared;
                
            }
            
            
            
        }
        
        const Point* ppt[1] = { poly_points };
        int npt[] = { polygon.size() };
        
        
        
        fillPoly( polymask,
                  ppt,
                  npt,
                  1,
                  Scalar( 255, 255,255,255 ),
                  
                  8,
                  0);
        
        
        
        //Debug
        //                                cv::circle(origframe,cv::Point(scaledcenterx,scaledcentery),1,Scalar(255,255,255,255),2);
        //                                cv::circle(origframe,cv::Point(scaledheadx,scaledheady),1,Scalar(255,0,255, 254),2);
        
        //cv::circle(polymask,cv::Point(scaledcenterx,scaledcentery),1,Scalar(255,255,255,255),2);
        //  cv::circle(polymask,cv::Point(scaledheadx,scaledheady),1,Scalar(255,0,255, 254),2);
        
        //  cv::circle(subtractedframe,cv::Point(scaledcenterx,scaledcentery),1,Scalar(255,255,255,255),2);
        //  cv::circle(subtractedframe,cv::Point(scaledheadx,scaledheady),1,Scalar(255,0,255, 254),2);
        
        
        
        //background subtraction: take original image, apply background as a mask, save over original
        //bitwise_and(subtractedframe, polymask, subtractedframe);
        
        qDebug()<<"Roi "<<x1<<"  "<<y1<<"  "<<x2<<"  "<<y2<<"  ";
        
        
        cv::cvtColor(polymask,polymask, CV_RGB2GRAY);
        
        
        //Full alpha mask = polygon selection (a =200) + BG subtracted organism (a= 255) + Center Mark ( a = 250) + head mark (a = 240)
        //Set Head to alpha=240
        //Set Center to Alpha = 250
        //Everything inside mask == alpha 200
        //Everything outside alpha=100;
        //BG subtracted ant = 255
        
        Mat maskedsubtraction;
        subtractedframe.copyTo(maskedsubtraction,polymask); // note that m.copyTo(m,mask) will have no masking effect
        cvtColor(maskedsubtraction, maskedsubtraction,CV_BGR2GRAY);
        polymask = polymask+155;   //255 moves to 255, 0 moves to 155
        polymask = polymask - 55;  //255 moves to 200, 155 moves to 100
        maskedsubtraction = polymask+maskedsubtraction;
        
        cv::circle(maskedsubtraction,cv::Point(scaledcenterx,scaledcentery),1,Scalar(250),2); //Encode the Center
        cv::circle(maskedsubtraction,cv::Point(scaledheadx,scaledheady),1,Scalar(240),2); //encode the head
        
        Mat bgr;
        bgr=origframe.clone();
        
        Mat alpha;
        maskedsubtraction.copyTo(alpha);
        Mat bgra;
        cvtColor(origframe, bgra,CV_BGR2BGRA); //Copy the origframe, we'll write over it next
        
        
        
        
        // forming array of matrices is quite efficient operations,
        // because the matrix data is not copied, only the headers
        Mat in[] = { bgr, alpha };
        // BGRa[0] -> bgr[0], BGRa[1] -> bgr[1],
        // BGRa[2] -> bgr[2], BGRa[3] -> alpha[0]
        int from_to[] = { 0,0,  1,1,  2,2,  3,3 };
        mixChannels( in, 2, &bgra, 1, from_to, 4 ); // input array, number of files in input array, destination, number of files in destination, from-to array, number of pairs in from-to
        
        
        
        QString ext = ".png";
        // QString fullframe = savepath+paintCanvas->polyNames.at(i)+"_"+QString::number(centroids[i].x())+"_"+QString::number(centroids[i].y())+"_"+QString::number(currentFrame)+ext;
        QString modelfilename = savepath+paintCanvas->polyNames.at(i)+"_f"+QString::number(currentFrame)+ext;
        
        //DEBUG IMAGES
        /*
              imwrite(modelfilename.toStdString()+"_subtraction",subtractedframe);
                imwrite(modelfilename.toStdString()+"_polymask",polymask);
                imwrite(modelfilename.toStdString()+"_alpha",alpha);
        */
        
        
        
        //save out Model
        
        //Full Keyframe
//        imwrite(modelfilename.toStdString()+"_keyframe",bgra); //Disabled for now
        
        qDebug()<<"Saved out: "<<modelfilename;
        
        //***Crop and Rotate ***//
        qDebug()<<"crop centered on  "<<scaledcenterx<<"  "<<scaledcentery;
        //crop the frame based on ROI
        Point2f src_center(scaledcenterx, scaledcentery);
        //To do this correctly use getRectSubPix instead of frameMat(MYROI) method
        // getRectSubPix only works with certain image formats (this is undocumented in opencv : P
        // so we have to do that cutting and mixing again!
        getRectSubPix(bgr, cv::Size(sqrt(biggestdistancesquared)*2,sqrt(biggestdistancesquared)*2), src_center, bgr);
        getRectSubPix(alpha, cv::Size(sqrt(biggestdistancesquared)*2,sqrt(biggestdistancesquared)*2), src_center, alpha);
        
        Mat bgracropped;
        cvtColor(bgr, bgracropped,CV_BGR2BGRA); //Copy the origframe, we'll write over it next
        Mat inagain[] = { bgr, alpha };
        int from_to2[] = { 0,0,  1,1,  2,2,  3,3 };
        
        //Note: the height and width dimensions have to be the same for the inputs and outputs
        mixChannels( inagain, 2, &bgracropped, 1, from_to2, 4 ); // input array, number of files in input array, destination, number of files in destination, from-to array, number of pairs in from-to
        
        
        //rotate the cropped frame about the center of the cropped frame.
        qDebug()<<"Rotate that image  "<<angle;
        bgracropped = rotateImage(bgracropped, angle);//Rotate full image about this center
        
        //after I rotate clear the global angle
        angle =0;
        //debug
        angle=-1;
        
        

        // Save the Nicely Rotated and Cropped Model File
        imwrite(modelfilename.toStdString(),bgracropped);
        
        
        centroids.clear();
        maxXY.clear();
        minXY.clear();
        paintCanvas->polyNames.clear();
        paintCanvas->polygons.clear();
        paintCanvas->masks.pop_back();
        polyinfo = "Polygon cleared";
        paintCanvas->temp.clear();
        ui->statusBar->showMessage(polyinfo,2000);
        paintCanvas->replyMask = replyNull;
        capture.set(CV_CAP_PROP_POS_FRAMES,(double)currentFrame);
    }
    
    
}
void FindObjectMain::process_camshift()
{
// Some user defined parameters
	int vmin = config.vmin;
	int vmax = config.vmax;
	int smin = config.smin;
	float hranges[] = { 0, 180 };
	const float* phranges = hranges;


// Create aligned, RGB images
	if(!object_image)
	{
		object_image = cvCreateImage( 
			cvSize(object_image_w, object_image_h), 
			8, 
			3);
	}

	if(!scene_image)
	{
		scene_image = cvCreateImage( 
			cvSize(scene_image_w, scene_image_h), 
			8, 
			3);
	}

// Temporary row pointers
	unsigned char **object_rows = new unsigned char*[object_image_h];
	unsigned char **scene_rows = new unsigned char*[scene_image_h];
	for(int i = 0; i < object_image_h; i++)
	{
		object_rows[i] = (unsigned char*)(object_image->imageData + i * object_image_w * 3);
	}
	for(int i = 0; i < scene_image_h; i++)
	{
		scene_rows[i] = (unsigned char*)(scene_image->imageData + i * scene_image_w * 3);
	}

// Transfer object & scene to RGB images for OpenCV
	if(!prev_object) prev_object = new unsigned char[object_image_w * object_image_h * 3];
// Back up old object image
	memcpy(prev_object, object_image->imageData, object_image_w * object_image_h * 3);

	BC_CModels::transfer(object_rows,
		get_input(object_layer)->get_rows(),
		0,
		0,
		0,
		0,
		0,
		0,
		object_x1,
		object_y1,
		object_w,
		object_h,
		0,
		0,
		object_w,
		object_h,
		get_input(object_layer)->get_color_model(),
		BC_RGB888,
		0,
		0,
		0);
	BC_CModels::transfer(scene_rows,
		get_input(scene_layer)->get_rows(),
		0,
		0,
		0,
		0,
		0,
		0,
		scene_x1,
		scene_y1,
		scene_w,
		scene_h,
		0,
		0,
		scene_w,
		scene_h,
		get_input(scene_layer)->get_color_model(),
		BC_RGB888,
		0,
		0,
		0);

	delete [] object_rows;
	delete [] scene_rows;

// from camshiftdemo.cpp
// Compute new object	
	if(memcmp(prev_object, 
		object_image->imageData, 
		object_image_w * object_image_h * 3) ||
		!hist.dims)
	{
		Mat image(object_image);
		Mat hsv, hue, mask;
		cvtColor(image, hsv, CV_RGB2HSV);
    	int _vmin = vmin, _vmax = vmax;
//printf("FindObjectMain::process_camshift %d\n", __LINE__);

    	inRange(hsv, 
			Scalar(0, smin, MIN(_vmin,_vmax)),
        	Scalar(180, 256, MAX(_vmin, _vmax)), 
			mask);
    	int ch[] = { 0, 0 };
    	hue.create(hsv.size(), hsv.depth());
    	mixChannels(&hsv, 1, &hue, 1, ch, 1);

		Rect selection = Rect(0, 0, object_w, object_h);
		trackWindow = selection;
		int hsize = 16;
		Mat roi(hue, selection), maskroi(mask, selection);
		calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
		normalize(hist, hist, 0, 255, CV_MINMAX);
	}


// compute scene
	Mat image(scene_image);
	Mat hsv, hue, mask, backproj;
	cvtColor(image, hsv, CV_RGB2HSV);
    int _vmin = vmin, _vmax = vmax;

    inRange(hsv, 
		Scalar(0, smin, MIN(_vmin,_vmax)),
        Scalar(180, 256, MAX(_vmin, _vmax)), 
		mask);
    int ch[] = {0, 0};
    hue.create(hsv.size(), hsv.depth());
    mixChannels(&hsv, 1, &hue, 1, ch, 1);
	
//printf("FindObjectMain::process_camshift %d %d %d\n", __LINE__, hist.dims, hist.size[1]);
	RotatedRect trackBox = RotatedRect(
		Point2f((object_x1 + object_x2) / 2, (object_y1 + object_y2) / 2), 
		Size2f(object_w, object_h), 
		0);
	trackWindow = Rect(0, 
		0,
        scene_w, 
		scene_h);
	if(hist.dims > 0)
	{
		

		calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
		backproj &= mask;
//printf("FindObjectMain::process_camshift %d\n", __LINE__);
// 		if(trackWindow.width <= 0 ||
// 			trackWindow.height <= 0)
// 		{
// 			trackWindow.width = object_w;
// 			trackWindow.height = object_h;
// 		}

		trackBox = CamShift(backproj, 
			trackWindow,
        	TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
//printf("FindObjectMain::process_camshift %d\n", __LINE__);


//     	if( trackWindow.area() <= 1 )
//     	{
//         	int cols = backproj.cols;
// 			int rows = backproj.rows;
// 			int r = (MIN(cols, rows) + 5) / 6;
//         	trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
//                         	   trackWindow.x + r, trackWindow.y + r) &
//                     	  Rect(0, 0, cols, rows);
//     	}
	}
// printf("FindObjectMain::process_camshift %d %d %d %d %d\n", 
// __LINE__,
// trackWindow.x,
// trackWindow.y,
// trackWindow.width,
// trackWindow.height);


// Draw mask over scene
	if(config.draw_keypoints)
	{
		for(int i = 0; i < scene_h; i++)
		{
			switch(get_input(scene_layer)->get_color_model())
			{
				case BC_YUV888:
				{
					unsigned char *input = backproj.data + i * scene_image_w;
					unsigned char *output = get_input(scene_layer)->get_rows()[i + scene_y1] + scene_x1 * 3;
					for(int j = 0; j < scene_w; j++)
					{
						output[0] = *input;
						output[1] = 0x80;
						output[2] = 0x80;
						output += 3;
						input++;
					}
					break;
				}
			}
		}
	}

// Get object outline in the scene layer
// printf("FindObjectMain::process_camshift %d %d %d %d %d %d\n", 
// __LINE__,
// (int)trackBox.center.x,
// (int)trackBox.center.y,
// (int)trackBox.size.width,
// (int)trackBox.size.height,
// (int)trackBox.angle);
	double angle = trackBox.angle * 2 * M_PI / 360;
	double angle1 = atan2(-(double)trackBox.size.height / 2, -(double)trackBox.size.width / 2) + angle;
	double angle2 = atan2(-(double)trackBox.size.height / 2, (double)trackBox.size.width / 2) + angle;
	double angle3 = atan2((double)trackBox.size.height / 2, (double)trackBox.size.width / 2) + angle;
	double angle4 = atan2((double)trackBox.size.height / 2, -(double)trackBox.size.width / 2) + angle;
	double radius = sqrt(SQR(trackBox.size.height / 2) + SQR(trackBox.size.width / 2));
	border_x1 = (int)(trackBox.center.x + cos(angle1) * radius) + scene_x1;
	border_y1 = (int)(trackBox.center.y + sin(angle1) * radius) + scene_y1;
	border_x2 = (int)(trackBox.center.x + cos(angle2) * radius) + scene_x1;
	border_y2 = (int)(trackBox.center.y + sin(angle2) * radius) + scene_y1;
	border_x3 = (int)(trackBox.center.x + cos(angle3) * radius) + scene_x1;
	border_y3 = (int)(trackBox.center.y + sin(angle3) * radius) + scene_y1;
	border_x4 = (int)(trackBox.center.x + cos(angle4) * radius) + scene_x1;
	border_y4 = (int)(trackBox.center.y + sin(angle4) * radius) + scene_y1;

}
void SquareOcl::find_squares_cpu( const Mat& image, vector<vector<Point> >& squares )
{
    squares.clear();

    Mat pyr, timg, gray0(image.size(), CV_8U), gray;

    // down-scale and upscale the image to filter out the noise
    pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
    pyrUp(pyr, timg, image.size());
    vector<vector<Point> > contours;

    // find squares in every color plane of the image
    for( int c = 0; c < 3; c++ )
    {
        int ch[] = {c, 0};
        mixChannels(&timg, 1, &gray0, 1, ch, 1);

        // try several threshold levels
        for( int l = 0; l < SQUARE_OCL_THRESH_LEVEL_H; l++ )
        {
            // hack: use Canny instead of zero threshold level.
            // Canny helps to catch squares with gradient shading
            if( l == 0 )
            {
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                Canny(gray0, gray, 0, SQUARE_OCL_EDGE_THRESH_H, 5);
                // dilate canny output to remove potential
                // holes between edge segments
                dilate(gray, gray, Mat(), Point(-1,-1));
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                gray = gray0 >= (l+1)*255/SQUARE_OCL_THRESH_LEVEL_H;
            }

            // find contours and store them all as a list
            findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

            vector<Point> approx;

            // test each contour
            for( size_t i = 0; i < contours.size(); i++ )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

                // square contours should have 4 vertices after approximation
                // relatively large area (to filter out noisy contours)
                // and be convex.
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if( approx.size() == 4 &&
                        fabs(contourArea(Mat(approx))) > 1000 &&
                        isContourConvex(Mat(approx)) )
                {
                    double maxCosine = 0;

                    for( int j = 2; j < 5; j++ )
                    {
                        // find the maximum cosine of the angle between joint edges
                        double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
                        maxCosine = MAX(maxCosine, cosine);
                    }

                    // if cosines of all angles are small
                    // (all angles are ~90 degree) then write quandrange
                    // vertices to resultant sequence
                    if( maxCosine < 0.3 )
                        squares.push_back(approx);
                }
            }
        }
    }
}
Example #24
0
void crossCorr( const Mat& img, const Mat& _templ, Mat& corr,
                Size corrsize, int ctype,
                Point anchor, double delta, int borderType )
{
    const double blockScale = 4.5;
    const int minBlockSize = 256;
    std::vector<uchar> buf;

    Mat templ = _templ;
    int depth = img.depth(), cn = img.channels();
    int tdepth = templ.depth(), tcn = templ.channels();
    int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype);

    CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 );

    if( depth != tdepth && tdepth != std::max(CV_32F, depth) )
    {
        _templ.convertTo(templ, std::max(CV_32F, depth));
        tdepth = templ.depth();
    }

    CV_Assert( depth == tdepth || tdepth == CV_32F);
    CV_Assert( corrsize.height <= img.rows + templ.rows - 1 &&
               corrsize.width <= img.cols + templ.cols - 1 );

    CV_Assert( ccn == 1 || delta == 0 );

    corr.create(corrsize, ctype);

    int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth);
    Size blocksize, dftsize;

    blocksize.width = cvRound(templ.cols*blockScale);
    blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 );
    blocksize.width = std::min( blocksize.width, corr.cols );
    blocksize.height = cvRound(templ.rows*blockScale);
    blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 );
    blocksize.height = std::min( blocksize.height, corr.rows );

    dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2);
    dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ.cols + 1;
    blocksize.width = MIN( blocksize.width, corr.cols );
    blocksize.height = dftsize.height - templ.rows + 1;
    blocksize.height = MIN( blocksize.height, corr.rows );

    Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth );
    Mat dftImg( dftsize, maxDepth );

    int i, k, bufSize = 0;
    if( tcn > 1 && tdepth != maxDepth )
        bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth);

    if( cn > 1 && depth != maxDepth )
        bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)*
            (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth));

    if( (ccn > 1 || cn > 1) && cdepth != maxDepth )
        bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth));

    buf.resize(bufSize);

    // compute DFT of each template plane
    for( k = 0; k < tcn; k++ )
    {
        int yofs = k*dftsize.height;
        Mat src = templ;
        Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height));
        Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows));

        if( tcn > 1 )
        {
            src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]);
            int pairs[] = {k, 0};
            mixChannels(&templ, 1, &src, 1, pairs, 1);
        }

        if( dst1.data != src.data )
            src.convertTo(dst1, dst1.depth());

        if( dst.cols > templ.cols )
        {
            Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols));
            part = Scalar::all(0);
        }
        dft(dst, dst, 0, templ.rows);
    }

    int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width;
    int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height;
    int tileCount = tileCountX * tileCountY;

    Size wholeSize = img.size();
    Point roiofs(0,0);
    Mat img0 = img;

    if( !(borderType & BORDER_ISOLATED) )
    {
        img.locateROI(wholeSize, roiofs);
        img0.adjustROI(roiofs.y, wholeSize.height-img.rows-roiofs.y,
                       roiofs.x, wholeSize.width-img.cols-roiofs.x);
    }
    borderType |= BORDER_ISOLATED;

    // calculate correlation by blocks
    for( i = 0; i < tileCount; i++ )
    {
        int x = (i%tileCountX)*blocksize.width;
        int y = (i/tileCountX)*blocksize.height;

        Size bsz(std::min(blocksize.width, corr.cols - x),
                 std::min(blocksize.height, corr.rows - y));
        Size dsz(bsz.width + templ.cols - 1, bsz.height + templ.rows - 1);
        int x0 = x - anchor.x + roiofs.x, y0 = y - anchor.y + roiofs.y;
        int x1 = std::max(0, x0), y1 = std::max(0, y0);
        int x2 = std::min(img0.cols, x0 + dsz.width);
        int y2 = std::min(img0.rows, y0 + dsz.height);
        Mat src0(img0, Range(y1, y2), Range(x1, x2));
        Mat dst(dftImg, Rect(0, 0, dsz.width, dsz.height));
        Mat dst1(dftImg, Rect(x1-x0, y1-y0, x2-x1, y2-y1));
        Mat cdst(corr, Rect(x, y, bsz.width, bsz.height));

        for( k = 0; k < cn; k++ )
        {
            Mat src = src0;
            dftImg = Scalar::all(0);

            if( cn > 1 )
            {
                src = depth == maxDepth ? dst1 : Mat(y2-y1, x2-x1, depth, &buf[0]);
                int pairs[] = {k, 0};
                mixChannels(&src0, 1, &src, 1, pairs, 1);
            }

            if( dst1.data != src.data )
                src.convertTo(dst1, dst1.depth());

            if( x2 - x1 < dsz.width || y2 - y1 < dsz.height )
                copyMakeBorder(dst1, dst, y1-y0, dst.rows-dst1.rows-(y1-y0),
                               x1-x0, dst.cols-dst1.cols-(x1-x0), borderType);

            dft( dftImg, dftImg, 0, dsz.height );
            Mat dftTempl1(dftTempl, Rect(0, tcn > 1 ? k*dftsize.height : 0,
                                         dftsize.width, dftsize.height));
            mulSpectrums(dftImg, dftTempl1, dftImg, 0, true);
            dft( dftImg, dftImg, DFT_INVERSE + DFT_SCALE, bsz.height );

            src = dftImg(Rect(0, 0, bsz.width, bsz.height));

            if( ccn > 1 )
            {
                if( cdepth != maxDepth )
                {
                    Mat plane(bsz, cdepth, &buf[0]);
                    src.convertTo(plane, cdepth, 1, delta);
                    src = plane;
                }
                int pairs[] = {0, k};
                mixChannels(&src, 1, &cdst, 1, pairs, 1);
            }
            else
            {
                if( k == 0 )
                    src.convertTo(cdst, cdepth, 1, delta);
                else
                {
                    if( maxDepth != cdepth )
                    {
                        Mat plane(bsz, cdepth, &buf[0]);
                        src.convertTo(plane, cdepth);
                        src = plane;
                    }
                    add(src, cdst, cdst);
                }
            }
        }
    }
}
Example #25
0
void EngineMaster::process(const CSAMPLE *, const CSAMPLE *pOut, const int iBufferSize) {
    static bool haveSetName = false;
    if (!haveSetName) {
        QThread::currentThread()->setObjectName("Engine");
        haveSetName = true;
    }
    ScopedTimer t("EngineMaster::process");

    CSAMPLE **pOutput = (CSAMPLE**)pOut;
    Q_UNUSED(pOutput);

    // Prepare each channel for output

    // Bitvector of enabled channels
    const unsigned int maxChannels = 32;
    unsigned int masterOutput = 0;
    unsigned int headphoneOutput = 0;

    // Compute headphone mix
    // Head phone left/right mix
    float cf_val = head_mix->get();
    float chead_gain = 0.5*(-cf_val+1.);
    float cmaster_gain = 0.5*(cf_val+1.);
    // qDebug() << "head val " << cf_val << ", head " << chead_gain
    //          << ", master " << cmaster_gain;

    Timer timer("EngineMaster::process channels");
    QList<ChannelInfo*>::iterator it = m_channels.begin();
    for (unsigned int channel_number = 0;
            it != m_channels.end(); ++it, ++channel_number) {
        ChannelInfo* pChannelInfo = *it;
        EngineChannel* pChannel = pChannelInfo->m_pChannel;

        if (!pChannel->isActive()) {
            continue;
        }

        bool needsProcessing = false;
        if (pChannel->isMaster()) {
            masterOutput |= (1 << channel_number);
            needsProcessing = true;
        }

        // If the channel is enabled for previewing in headphones, copy it
        // over to the headphone buffer
        if (pChannel->isPFL()) {
            headphoneOutput |= (1 << channel_number);
            needsProcessing = true;
        }

        // Process the buffer if necessary
        if (needsProcessing) {
            pChannel->process(NULL, pChannelInfo->m_pBuffer, iBufferSize);
        }
    }
    timer.elapsed(true);

    // Mix all the enabled headphone channels together.
    m_headphoneGain.setGain(chead_gain);
    mixChannels(headphoneOutput, maxChannels, m_pHead, iBufferSize, &m_headphoneGain);

    // Calculate the crossfader gains for left and right side of the crossfader
    float c1_gain, c2_gain;
    EngineXfader::getXfadeGains(c1_gain, c2_gain,
                                crossfader->get(), xFaderCurve->get(),
                                xFaderCalibration->get(),
                                xFaderMode->get()==MIXXX_XFADER_CONSTPWR,
                                xFaderReverse->get()==1.0);

    // Now set the gains for overall volume and the left, center, right gains.
    m_masterGain.setGains(m_pMasterVolume->get(), c1_gain, 1.0, c2_gain);

    // Perform the master mix
    mixChannels(masterOutput, maxChannels, m_pMaster, iBufferSize, &m_masterGain);

#ifdef __LADSPA__
    // LADPSA master effects
    ladspa->process(m_pMaster, m_pMaster, iBufferSize);
#endif

    // Clipping
    clipping->process(m_pMaster, m_pMaster, iBufferSize);

    // Balance values
    float balright = 1.;
    float balleft = 1.;
    float bal = m_pBalance->get();
    if (bal>0.)
        balleft -= bal;
    else if (bal<0.)
        balright += bal;

    // Perform balancing on main out
    SampleUtil::applyAlternatingGain(m_pMaster, balleft, balright, iBufferSize);

    // Update VU meter (it does not return anything). Needs to be here so that
    // master balance is reflected in the VU meter.
    if (vumeter != NULL)
        vumeter->process(m_pMaster, m_pMaster, iBufferSize);

    //Submit master samples to the side chain to do shoutcasting, recording,
    //etc.  (cpu intensive non-realtime tasks)
    if (m_pSideChain != NULL) {
        m_pSideChain->writeSamples(m_pMaster, iBufferSize);
    }

    // Add master to headphone with appropriate gain
    SampleUtil::addWithGain(m_pHead, m_pMaster, cmaster_gain, iBufferSize);

    // Head volume and clipping
    SampleUtil::applyGain(m_pHead, m_pHeadVolume->get(), iBufferSize);
    head_clipping->process(m_pHead, m_pHead, iBufferSize);

    //Master/headphones interleaving is now done in
    //SoundManager::requestBuffer() - Albert Nov 18/07

    // We're close to the end of the callback. Wake up the engine worker
    // scheduler so that it runs the workers.
    m_pWorkerScheduler->runWorkers();
}
Example #26
0
bool ContourFlip::update()
{
  //if (!ImageNode::update()) return false;
  if (!Contour::update()) return false;

  // TBD get dirtiness of in to see if flip map needs to be recomputed
  
  if (!isDirty(this, 23)) { return true;}

  cv::Mat to_flip = getImage("to_flip");
  if (to_flip.empty()) {
    VLOG(2) << name << " in is empty";
    return false;
  }
  
  cv::Mat flipped = cv::Mat(to_flip.size(), to_flip.type());
 
  bool valid;
  bool is_dirty;
  cv::Mat in = getImage("in", valid, is_dirty, 51);
  
  if (is_dirty) 
  {
  
  LOG(INFO) << "contour flip updating " << is_dirty;
  cv::Mat dist = cv::Mat(to_flip.size(), to_flip.type());

  const int wd = dist.cols;
  const int ht = dist.rows;

  // This is very slow for dense contours, maybe make
  // scale option that will process the image at a lower resolution 
  // then upscale the off_x,off_y for the remap
  for (int y = 0; y < ht; y++) {
  for (int x = 0; x < wd; x++) {

  float min_dist = 1e9;
  cv::Point2f min_closest;

  int count = 0;
  // TBD just find the nearest contour point for now, don't worry about long segment
  // or the actual normal of the segment - just flip the pixel on the nearest point
  for (int i = 0; i < contours0.size(); i++) {  
  for (int j = 0; j < contours0[i].size(); j++) { 
  
    cv::Point2f v = contours0[i][j]; 
    cv::Point2f w = contours0[i][ (j+1) % contours0[i].size() ]; 
    //const float dx = (contours0[i][j].x - x); 
    //const float dy = (contours0[i][j].y - y);
    //const float cur_dist = fabs(dx) + fabs(dy); 
    //const float cur_dist = sqrt(dx*dx + dy*dy);
    cv::Point2f closest;
    const float cur_dist = minimum_distance( v, w, cv::Point2f(x, y), closest ); 
    if (cur_dist < min_dist) {
      min_dist = cur_dist;
      min_closest = closest;
    }
    count++;
  }}

  if ( (x == 0) && ( y == 0) ) setSignal("count", count);

  // TBD make a reflection effect instead of straight rolling over the edges?
  const int src_x = ((x + (int) ( 2 * (min_closest.x - x) ) ) + wd) % wd; 
  const int src_y = ((y + (int) ( 2 * (min_closest.y - y) ) ) + ht) % ht;
 
  // TBD this could be a map for remap and if the in image doesn't change it will
  // be more efficient
  //flipped.at<cv::Vec4b>(y, x) = to_flip.at<cv::Vec4b>(src_y, src_x);
  off_x.at<float>(y, x) = src_x - x;
  off_y.at<float>(y, x) = src_y - y;
  //LOG_FIRST_N(INFO,20) << src_x << " " << x << ", " << src_y << " " << y;
  dist.at<cv::Vec4b>(y, x) = cv::Scalar::all(min_dist); // % 255);
  }}

  cv::Mat dist_x = base_x + (off_x); //_scaled - offsetx * scalex);
  cv::Mat dist_y = base_y + (off_y); //_scaled - offsety * scaley);

  cv::convertMaps(dist_x, dist_y, dist_xy16, dist_int, CV_16SC2, true);
  
  setImage("dist", dist);

  {
    cv::Mat dist_xy8;
    cv::Mat dist_xy16_temp;
    cv::convertMaps(off_x, off_y, dist_xy16_temp, dist_int, CV_16SC2, true);
    dist_xy16_temp.convertTo(dist_xy8, CV_8UC2, getSignal("map_scale"));
    cv::Mat mapx = cv::Mat( Config::inst()->getImSize(), CV_8UC4, cv::Scalar(0,0,0,0));
    cv::Mat mapy = cv::Mat( Config::inst()->getImSize(), CV_8UC4, cv::Scalar(0,0,0,0));

    int chx[] = {0,0, 0,1, 0,2};
    mixChannels(&dist_xy8, 1, &mapx, 1, chx, 3 );
    int chy[] = {1,0, 1,1, 1,2};
    mixChannels(&dist_xy8, 1, &mapy, 1, chy, 3 );

    setImage("mapx", mapx);
    setImage("mapy", mapy);
  }

  }
  
 
  if (!dist_xy16.empty()) 
    cv::remap(to_flip, flipped, dist_xy16, cv::Mat(), getModeType(), getBorderType());

  setImage("flipped", flipped);

  return true;
}
Example #27
0
static cv::Mat &_cvtRGBColor(
        const cv::Mat &in,
        cv::Mat &out,
        CGImageAlphaInfo srcAlpha,
        osx::disp::RGBType destFmt
        ) {
    if (in.data == out.data) {
        throw invalid_argument("`in` must be different from `out`");
    }

    switch (srcAlpha) {
        case kCGImageAlphaPremultipliedLast:
            // fall through
        case kCGImageAlphaLast:
            // fall through
        case kCGImageAlphaNoneSkipLast:
            switch (destFmt) {
                case osx::disp::COLOR_BGR:
                    cvtColor(in, out, cv::COLOR_RGBA2BGR);
                    break;
                case osx::disp::COLOR_ARGB: {
                    out.create(in.size(), CV_8UC4);
                    const int fromTo[] = {0,1, 1,2, 2,3, 3,0};
                    mixChannels(&in, 1, &out, 1, fromTo, 4);
                    break;
                }
                case osx::disp::COLOR_RGBA:
                    out = in;
                    break;
                default:
                    throw invalid_argument("invalid destFmt");
                    break;
            }
            break;
        case kCGImageAlphaPremultipliedFirst:
            // fall through
        case kCGImageAlphaFirst:
            // fall through
        case kCGImageAlphaNoneSkipFirst:
            switch (destFmt) {
                case osx::disp::COLOR_BGR: {
                    out.create(in.size(), CV_8UC3);
                    const int fromTo[] = {1,2, 2,1, 3,0};
                    mixChannels(&in, 1, &out, 1, fromTo, 3);
                    break;
                }
                case osx::disp::COLOR_ARGB:
                    out = in;
                    break;
                case osx::disp::COLOR_RGBA: {
                    out.create(in.size(), CV_8UC4);
                    const int fromTo[] = {0,3, 1,0, 2,1, 3,2};
                    mixChannels(&in, 1, &out, 1, fromTo, 4);
                    break;
                }
                default:
                    throw invalid_argument("invalid destFmt");
                    break;
            }
            break;
        default:
            throw invalid_argument("invalid srcAlpha");
            break;
    }

    return out;
}
Example #28
0
long QSProcessThreadFunc(CTCSys *QS)
{
	int i;
	int	pass = -1;
	while (QS->EventEndProcess == FALSE) {

#ifdef PTGREY
		if (QS->IR.Acquisition == TRUE) {
			for (i = 0; i < QS->IR.NumCameras; i++) {
				if (QS->IR.pgrCamera[i]->RetrieveBuffer(&QS->IR.PtGBuf[i]) == PGRERROR_OK) {
					QS->QSSysConvertToOpenCV(&QS->IR.AcqBuf[i], QS->IR.PtGBuf[i]);
				}
			}
			for (i = 0; i < QS->IR.NumCameras; i++) {
#ifdef PTG_COLOR
				mixChannels(&QS->IR.AcqBuf[i], 1, &QS->IR.ProcBuf[i], 1, QS->IR.from_to, 3); // Swap B and R channels anc=d copy out the image at the same time.
#else
				QS->IR.AcqBuf[i].copyTo(QS->IR.ProcBuf[i][BufID]);	// Has to copy out of acquisition buffer before processing
#endif
			}
		}
#else
		Sleep(200);
#endif
		// Process Image ProcBuf
		if (QS->IR.Inspection) {
			// Images are acquired into ProcBuf{0] 
			// May need to create child image for processing to exclude background and speed up processing
			
			for (i = 0; i < QS->IR.NumCameras; i++)
			{
				//Obtain image
				Mat imgFull = QS->IR.ProcBuf[i];

				//Crop image
				static Rect roi;
				if (roi.width == 0)
					calcAndOrDraw(imgFull, roi, true);
				Mat imgCropped(imgFull, roi);
				Mat img = imgCropped;

				// Threshold
				Mat imgHsv, d0, d2;
				cvtColor(img, imgHsv, CV_BGR2HSV);
				channelDist(imgHsv, d0, 16, 0);
				bitwise_not(d0, d0);
				getChannel(imgHsv, d2, 2);
				addWeighted(d0, .5, d2, .5, 0, img);
				GaussianBlur(img, img, Size(5, 5), 0);
				static int th = 0;
				if (th == 0)
					th = cv::threshold(img, img, 60, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
				else
					cv::threshold(img, img, th, 255, CV_THRESH_BINARY);

				//Find contours
				vector<vector<Point> > contours;
				vector<Vec4i> hierarchy;
				findContours(img, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
				img = Mat::zeros(img.size(), CV_8UC3);
				for (int i = 0; i < contours.size(); i++)
					drawContours(img, contours, i, Scalar(255, 255, 255));

				// Find parent contour
				int pretzelContourIndex = -1;
				int minPretzelSize = 1000;
				int largestContourSize = minPretzelSize;
				for (int i = 0; i < hierarchy.size(); i++)
				{
					if (hierarchy[i][3] == -1)
					{
						Moments mm = moments((Mat)contours[i]);
						if (mm.m00 > largestContourSize)
						{
							if (pretzelContourIndex != -1) // if multiple pretzels
							{
								pretzelContourIndex = -2;
								break;
							}
							pretzelContourIndex = i;
							largestContourSize = mm.m00;
							printf("Size: %d\n", (int)mm.m00);
						}
					}
				}
				int pretzelSize = largestContourSize;

				// Evaluate pretzel based on contour children
				int minHoleSize = 10;
				if (pretzelContourIndex != -1 && pretzelContourIndex != -2)
				{
					// Find center of mass
					Moments mm = moments((Mat)contours[pretzelContourIndex]);
					double centerX = (mm.m10 / mm.m00);
					double centerY = (mm.m01 / mm.m00);
					circle(img, Point(centerX, centerY), 4, Scalar(0, 255, 0));

					int borderSize = 100;
					if (centerY > borderSize && centerY < img.size().height - borderSize)
					{
						int numberOfHoles = 0;
						int child = hierarchy[pretzelContourIndex][2];
						while (child != -1)
						{
							if (contours[child].size() > minHoleSize)
								numberOfHoles++;
							child = hierarchy[child][0];
						}
						if (numberOfHoles <= 1)
							pass = 2;
						else if (numberOfHoles == 2)
							pass = 1;
						else if (numberOfHoles == 3)
							pass = 0;
					}
					else
						pass = 3; //no pretzel on belt
				}
				else if (pretzelContourIndex == -1)
					pass = 3; //no pretzel on belt
				else //if (pretzelContourIndex == -2)
					pass = -1; //error

				//Output Image
				if (img.channels() != 3)
					cvtColor(img, img, CV_GRAY2BGR);
				img.copyTo(imgCropped);

				if (imgFull.channels() == 3)
					cvtColor(imgFull, imgFull, CV_BGR2GRAY);
				imgFull.copyTo(QS->IR.OutBuf1[i]);
			}

		}
		// Display Image
		if (QS->IR.UpdateImage) {
			for (i = 0; i<QS->IR.NumCameras; i++) {
				if (!QS->IR.Inspection) {
					// Example of displaying color buffer ProcBuf
					QS->IR.ProcBuf[i].copyTo(QS->IR.DispBuf[i]);
				}
				else {
					// Example of displaying B/W buffer OutBuf1
					QS->IR.OutBuf[0] = QS->IR.OutBuf[1] = QS->IR.OutBuf[2] = QS->IR.OutROI1[i];
					merge(QS->IR.OutBuf, 3, QS->IR.DispROI[i]);
					// Example to show inspection result, print result after the image is copied
					QS->QSSysPrintResult(pass);
				}
			}
			QS->QSSysDisplayImage();
		}
	}
	QS->EventEndProcess = FALSE;
	return 0;
}
Example #29
0
void cv::merge(const Mat* mv, size_t n, OutputArray _dst)
{
    CV_Assert( mv && n > 0 );
    
    int depth = mv[0].depth();
    bool allch1 = true;
    int k, cn = 0;
    size_t i;
    
    for( i = 0; i < n; i++ )
    {
        CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth);
        allch1 = allch1 && mv[i].channels() == 1;
        cn += mv[i].channels();
    }
    
    CV_Assert( 0 < cn && cn <= CV_CN_MAX );
    _dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn));
    Mat dst = _dst.getMat();
    
    if( n == 1 )
    {
        mv[0].copyTo(dst);
        return;
    }
    
    if( !allch1 )
    {
        AutoBuffer<int> pairs(cn*2);
        int j, ni=0;
        
        for( i = 0, j = 0; i < n; i++, j += ni )
        {
            ni = mv[i].channels();
            for( k = 0; k < ni; k++ )
            {
                pairs[(j+k)*2] = j + k;
                pairs[(j+k)*2+1] = j + k;
            }
        }
        mixChannels( mv, n, &dst, 1, &pairs[0], cn );
        return;
    }
        
    size_t esz = dst.elemSize(), esz1 = dst.elemSize1();
    int blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz);
    AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
    const Mat** arrays = (const Mat**)(uchar*)_buf;
    uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
    
    arrays[0] = &dst;
    for( k = 0; k < cn; k++ )
        arrays[k+1] = &mv[k];
    
    NAryMatIterator it(arrays, ptrs, cn+1);
    int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
    MergeFunc func = mergeTab[depth];
    
    for( i = 0; i < it.nplanes; i++, ++it )
    {
        for( int j = 0; j < total; j += blocksize )
        {
            int bsz = std::min(total - j, blocksize);
            func( (const uchar**)&ptrs[1], ptrs[0], bsz, cn );
            
            if( j + blocksize < total )
            {
                ptrs[0] += bsz*esz;
                for( int k = 0; k < cn; k++ )
                    ptrs[k+1] += bsz*esz1;
            }
        }
    }
}
Example #30
0
void cv::mixChannels(const vector<Mat>& src, vector<Mat>& dst,
                 const int* fromTo, size_t npairs)
{
    mixChannels(!src.empty() ? &src[0] : 0, src.size(),
                !dst.empty() ? &dst[0] : 0, dst.size(), fromTo, npairs);
}