Beispiel #1
0
void TRop::over(const TRasterP &rout, const TRasterP &rup, const TPoint &pos)
{
	TRect outRect(rout->getBounds());
	TRect upRect(rup->getBounds() + pos);
	TRect intersection = outRect * upRect;
	if (intersection.isEmpty())
		return;

	TRasterP cRout = rout->extract(intersection);
	TRect r = intersection - pos;
	TRasterP cRup = rup->extract(r);

	TRaster32P rout32 = cRout, rup32 = cRup;
	TRaster64P rout64 = cRout, rup64 = cRup;

	TRasterGR8P rout8 = cRout, rup8 = cRup;

	TRasterCM32P routCM32 = cRout, rupCM32 = cRup;

	rout->lock();
	rup->lock();

	// TRaster64P rout64 = rout, rin64 = rin;
	if (rout32 && rup32) {
#ifdef _WIN32
		if (TSystem::getCPUExtensions() & TSystem::CpuSupportsSse2)
			do_over_SSE2(rout32, rup32);
		else
#endif
			do_overT2<TPixel32, UCHAR>(rout32, rup32);
	} else if (rout64) {
		if (!rup64) {
			TRaster64P raux(cRup->getSize());
			TRop::convert(raux, cRup);
			rup64 = raux;
		}
		do_overT2<TPixel64, USHORT>(rout64, rup64);
	} else if (rout32 && rup8)
		do_over(rout32, rup8);
	else if (rout8 && rup32)
		do_over(rout8, rup32);
	else if (rout8 && rup8)
		TRop::copy(rout8, rup8);
	else if (routCM32 && rupCM32)
		do_over(routCM32, rupCM32);
	else {
		rout->unlock();
		rup->unlock();
		throw TRopException("unsupported pixel type");
	}

	rout->unlock();
	rup->unlock();
}
Beispiel #2
0
TPoint computeCentroid(const TRaster32P &r) {
  TPoint ret(1, 1);

  TRasterGR8P raux(r->getLx() + 2, r->getLy() + 2);

  if (fillByteRaster(r, raux)) doComputeCentroid(raux, ret);

  ret.x--;
  ret.y--; /* per il bordo aggiunto */
  return ret;
}
Beispiel #3
0
void Convert2Tlv::buildToonzRaster(TRasterCM32P &rout, const TRasterP &rin1, const TRasterP &rin2)
{
	if (rin2)
		assert(rin1->getSize() == rin2->getSize());

	rout->clear();

	std::cout << "      computing inks...\n";
	TRaster32P r1 = (TRaster32P)rin1;
	TRasterGR8P r1gr = (TRasterGR8P)rin1;
	TRaster32P r2 = (TRaster32P)rin2;
	TRasterGR8P r2gr = (TRasterGR8P)rin2;
	TRasterP rU, rP;

	if (r1gr) {
		rU = r1gr;
		rP = r2;
	} else if (r2gr) {
		rU = r2gr;
		rP = r1;
	} else if (!r1)
		rU = r2;
	else if (!r2)
		rU = r1;
	else if (firstIsUnpainted(r1, r2)) {
		rU = r1;
		rP = r2;
	} else {
		rU = r2;
		rP = r1;
	}

	TRasterCM32P r;
	if (rout->getSize() != rU->getSize()) {
		int dx = rout->getLx() - rU->getLx();
		int dy = rout->getLy() - rU->getLy();
		assert(dx >= 0 && dy >= 0);

		r = rout->extract(dx / 2, dy / 2, dx / 2 + rU->getLx() - 1, dy / 2 + rU->getLy() - 1);
	} else
		r = rout;

	if ((TRasterGR8P)rU)
		buildInksFromGrayTones(r, rU);
	else if (m_isUnpaintedFromNAA)
		buildInksForNAAImage(r, (TRaster32P)rU);
	else {
		int maxMatte = getMaxMatte((TRaster32P)rU);
		if (maxMatte == -1)
			buildInksFromGrayTones(r, rU);
		else {
			if (maxMatte < 255)
				normalize(rU, maxMatte);
			buildInks(r, (TRaster32P)rU /*rP,*/);
		}
	}

	if (m_autoclose)
		TAutocloser(r, AutocloseDistance, AutocloseAngle, 1, AutocloseOpacity).exec();

	if (rP) {
		std::cout << "      computing paints...\n";
		doFill(r, rP);
	}
	if (m_antialiasType == 2) //remove antialias
		removeAntialias(r);
	else if (m_antialiasType == 1) //add antialias
	{
		TRasterCM32P raux(r->getSize());
		TRop::antialias(r, raux, 10, m_antialiasValue);
		rout = raux;
	}
}
Beispiel #4
0
TImageP TImageReader::load0()
{
	if (!m_reader && !m_vectorReader)
		open();

	if (m_reader) {
		TImageInfo info = m_reader->getImageInfo();
		if (info.m_lx <= 0 || info.m_ly <= 0)
			return TImageP();

		// Initialize raster info
		assert(m_shrink > 0);

		// Build loading rect
		int x0 = 0;
		int x1 = info.m_lx - 1;
		int y0 = 0;
		int y1 = info.m_ly - 1;

		if (!m_region.isEmpty()) {
			// Intersect with the externally specified loading region

			x0 = tmax(x0, m_region.x0);
			y0 = tmax(y0, m_region.y0);
			x1 = tmin(x1, m_region.x1);
			y1 = tmin(y1, m_region.y1);

			if (x0 >= x1 || y0 >= y1)
				return TImageP();
		}

		if (m_shrink > 1) {
			// Crop to shrink multiples
			x1 -= (x1 - x0) % m_shrink;
			y1 -= (y1 - y0) % m_shrink;
		}

		assert(x0 <= x1 && y0 <= y1);

		TDimension imageDimension = TDimension((x1 - x0) / m_shrink + 1, (y1 - y0) / m_shrink + 1);

		if (m_path.getType() == "tzp" || m_path.getType() == "tzu") {
			// Colormap case

			TRasterCM32P ras(imageDimension);
			readRaster(ras, m_reader, x0, y0, x1, y1, info.m_lx, info.m_ly, m_shrink);

			// Build the savebox
			TRect saveBox(info.m_x0, info.m_y0, info.m_x1, info.m_y1);
			if (!m_region.isEmpty()) {
				// Intersect with the loading rect
				if (m_region.overlaps(saveBox)) {
					saveBox *= m_region;

					int sbx0 = saveBox.x0 - m_region.x0;
					int sby0 = saveBox.y0 - m_region.y0;
					int sbx1 = sbx0 + saveBox.getLx() - 1;
					int sby1 = sby0 + saveBox.getLy() - 1;
					assert(sbx0 >= 0);
					assert(sby0 >= 0);
					assert(sbx1 >= 0);
					assert(sby1 >= 0);

					saveBox = TRect(sbx0, sby0, sbx1, sby1);
				} else
					saveBox = TRect(0, 0, 1, 1);
			}

			if (m_shrink > 1) {
				saveBox.x0 = saveBox.x0 / m_shrink;
				saveBox.y0 = saveBox.y0 / m_shrink;
				saveBox.x1 = saveBox.x1 / m_shrink;
				saveBox.y1 = saveBox.y1 / m_shrink;
			}

			TToonzImageP ti(ras, ras->getBounds() * saveBox);
			ti->setDpi(info.m_dpix, info.m_dpiy);

			return ti;
		} else if (info.m_bitsPerSample >= 8) {
			// Common byte-based rasters (see below, we have black-and-white bit-based images too)

			if (info.m_samplePerPixel == 1 && m_readGreytones) {
				//  Greymap case
				// NOTE: Uses a full 32-bit raster first, and then copies down to the GR8

				// Observe that GR16 file images get immediately down-cast to GR8...
				// Should we implement that too?

				TRasterGR8P ras(imageDimension);
				readRaster_copyLines(ras, m_reader, x0, y0, x1, y1, info.m_lx, info.m_ly, m_shrink);

				TRasterImageP ri(ras);
				ri->setDpi(info.m_dpix, info.m_dpiy);

				return ri;
			}

			// assert(info.m_samplePerPixel == 3 || info.m_samplePerPixel == 4);

			TRasterP _ras;
			if (info.m_bitsPerSample == 16) {
				if (m_is64BitEnabled || m_path.getType() != "tif") {
					//  Standard 64-bit case.

					// Also covers down-casting to 32-bit from a 64-bit image file whenever
					// not a tif file (see below).

					TRaster64P ras(imageDimension);
					readRaster(ras, m_reader, x0, y0, x1, y1, info.m_lx, info.m_ly, m_shrink);

					_ras = ras;
				} else {
					// The Tif reader has got an automatically down-casting readLine(char*, ...)
					// in case the input file is 64-bit. The advantage is that no intermediate
					// 64-bit raster is required in this case.

					TRaster32P ras(imageDimension);
					readRaster(ras, m_reader, x0, y0, x1, y1, info.m_lx, info.m_ly, m_shrink);

					_ras = ras;
				}
			} else if (info.m_bitsPerSample == 8) {
				//  Standard 32-bit case
				TRaster32P ras(imageDimension);
				readRaster(ras, m_reader, x0, y0, x1, y1, info.m_lx, info.m_ly, m_shrink);

				_ras = ras;
			} else
				throw TImageException(m_path, "Image format not supported");

			// 64-bit to 32-bit conversions if necessary  (64 bit images not allowed)
			if (!m_is64BitEnabled && (TRaster64P)_ras) {
				TRaster32P raux(_ras->getLx(), _ras->getLy());
				TRop::convert(raux, _ras);
				_ras = raux;
			}

			// Return the image
			TRasterImageP ri(_ras);
			ri->setDpi(info.m_dpix, info.m_dpiy);

			return ri;
		} else if (info.m_samplePerPixel == 1 && info.m_valid == true) {
			//  Previously dubbed as 'Palette cases'. No clue about what is this... :|

			TRaster32P ras(imageDimension);
			readRaster(ras, m_reader, x0, y0, x1, y1, info.m_lx, info.m_ly, m_shrink);

			TRasterImageP ri(ras);
			ri->setDpi(info.m_dpix, info.m_dpiy);

			return ri;
		} else if (info.m_samplePerPixel == 1 && m_readGreytones) {
			//  Black-and-White case, I guess. Standard greymaps were considered above...

			TRasterGR8P ras(imageDimension);
			readRaster_copyLines(ras, m_reader, x0, y0, x1, y1, info.m_lx, info.m_ly, m_shrink);

			TRasterImageP ri(ras);
			ri->setDpi(info.m_dpix, info.m_dpiy);

			if (info.m_bitsPerSample == 1) // I suspect this always evaluates true...
				ri->setScanBWFlag(true);

			return ri;
		} else
			return TImageP();
	} else if (m_vectorReader) {
		TVectorImage *vi = m_vectorReader->read();
		return TVectorImageP(vi);
	} else
		return TImageP();
}
bool Homography::extract(cv::Mat &H, irr::core::vector2di *corners, irr::core::vector3df *position, irr::core::vector3df *angles, int refine) {
    
    if ( matches.size() > 3 && objectKeyPoints.size() < sceneKeyPoints.size() )
    {
        std::vector<cv::Point2f> objectPoints;
        std::vector<cv::Point2f> scenePoints;
        
        // get the keypoints from the goodmatches
        for( int i = 0; i < matches.size(); i++ )
        {
            objectPoints.push_back( objectKeyPoints[ matches[i].queryIdx ].pt );
            scenePoints.push_back( sceneKeyPoints[ matches[i].trainIdx ].pt );
        }
        
        // find the homography of the keypoints.
        H = cv::findHomography( objectPoints, scenePoints, CV_RANSAC );
        
        std::vector<cv::Point2f> obj_corners(4);;
        std::vector<cv::Point2f> scene_corners(4);
        obj_corners[0] = cvPoint( 0,          0 );
        obj_corners[1] = cvPoint( objectSize.width, 0 );
        obj_corners[2] = cvPoint( objectSize.width, objectSize.height );
        obj_corners[3] = cvPoint( 0,          objectSize.height );
        
        // get the 2D points for the homography corners
        perspectiveTransform( obj_corners, scene_corners, H);
        
        if (refine > 0) {
            cv::Mat sceneCopyCopy = sceneCopy.clone();
            cv::warpPerspective(sceneCopy, sceneCopy, H, objectSize, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
            cv::Mat H2;
            analyze(sceneCopy);
            if (extract(H2, NULL, NULL, NULL, refine - 1)) {
                H *= H2;
                perspectiveTransform( obj_corners, scene_corners, H);
            }
        }
        
        // give the caller the corners of the 2D plane
        if (corners != NULL)
            for (int i = 0; i < 4; i++) {
                corners[i] = irr::core::vector2di(scene_corners[i].x, scene_corners[i].y);
            }
        
        // init the rotation and translation vectors
        cv::Mat raux(3, 1, CV_64F), taux(3, 1, CV_64F);
        
        // calculating 3D points
        float maxSize = std::max(objectSize.width, objectSize.height);
        float unitW = objectSize.width / maxSize;
        float unitH = objectSize.height / maxSize;
        
        // get the rotation and translation vectors
        std::vector<cv::Point3f> scene_3d_corners(4);
        scene_3d_corners[0] = cv::Point3f(-unitW, -unitH, 0);
        scene_3d_corners[1] = cv::Point3f( unitW, -unitH, 0);
        scene_3d_corners[2] = cv::Point3f( unitW,  unitH, 0);
        scene_3d_corners[3] = cv::Point3f(-unitW,  unitH, 0);
        cv::solvePnP(scene_3d_corners, scene_corners, getCamIntrinsic(), cv::Mat(), raux, taux);
        
        // give the caller the 3D plane position and angle
        if (position != NULL)
            position->set(taux.at<double>(0, 0), -taux.at<double>(1, 0), taux.at<double>(2, 0));
        if (angles != NULL)
            angles->set(-raux.at<double>(0, 0) * irr::core::RADTODEG, raux.at<double>(1, 0) * irr::core::RADTODEG, -raux.at<double>(2, 0) * irr::core::RADTODEG);
        
        return true;
    }
    
    return false;
}
void regionalSegmentationModule::completeBlobs(std::vector<Blob> &blobs,
                                               QImage *fg, QImage *current) {

    int nbins = 256/m_bins;
    int i, j, w = fg->width(), h = fg->height();
    int i0, j0, w0, h0;
    double maxVal;

    QImage curr888 = current->convertToFormat(QImage::Format_RGB888);
    cv::Mat c(h, w, CV_8UC3), c_yuv(h, w, CV_8UC3),
            f(h, w, CV_8UC1), f0(h, w, CV_8UC1), r(h, w, CV_8UC3);

    int bl = fg->bytesPerLine(), bl2 = curr888.bytesPerLine();
    std::vector<Blob>::iterator it, it_end = blobs.end();
    uchar d1, d2, d3,
          *fg_p = fg->bits(),
          *c_p = curr888.bits();

    memcpy(c.data, c_p, h*bl2);
    memcpy(f.data, fg_p, h*bl);
    memset(c_yuv.data, 0, h*bl2);
    memset(r.data, 0, h*bl2);

    f.copyTo(f0);

    cv::Rect roi;
    //Histogram parameters
    int channels[] = {1, 2};
    int histSize[] = {nbins, nbins};
    float pranges[] = { 0, 256 };
    const float* ranges[] = { pranges, pranges };

    //Rectangular structuring element
    cv::Mat element = cv::getStructuringElement( cv::MORPH_RECT,
                                           cv::Size( 3, 3 ),
                                           cv::Point( 1, 1 ) );

    //Set local window pixel histogram for comparison
    cv::Rect wroi;
    wroi.width = wroi.height = w_size/2;

    m_data->hulls.clear();

    //Start blobs processing for hulls calculation
    for(it=blobs.begin(); it!=it_end; it++) {
        Blob &b = (*it);
        i0 = b.bbox.ytop, j0 = b.bbox.xleft,
        h0 = b.bbox.height, w0 = b.bbox.width;
        //Con la misma Mat f .....

        //Apertura en blob
        roi.x = j0;
        roi.y = i0;
        roi.width = w0;
        roi.height = h0;

        //Restrict operations to blob zone
        cv::Mat aux(f, roi);
        cv::Mat aux0(f0, roi);

        //Reduce bad detections, in general near borders
        cv::erode(aux, aux, element, cv::Point(-1,-1), 1);

        //Reduce bad detections, in general near borders and recover shape
        cv::erode(aux0, aux0, element, cv::Point(-1,-1), 1);
        cv::dilate(aux0, aux0, element, cv::Point(-1,-1), 1);

        //Border detection
        cv::Mat border_aux(aux.size(), CV_8UC1);
        cv::Canny(aux,border_aux, 50,100, 3);

#ifdef RSEG_DEBUG
        cv::namedWindow( "Canny", 1 );
        cv::imshow( "Canny", border_aux );
#endif

        //Find confining convex hull (Note: used border_copy as findContours modifies the image)
        std::vector<std::vector<cv::Point> > contours;
        std::vector<cv::Vec4i> hierarchy;
        cv::Mat border_copy(border_aux.size(), CV_8UC1);
        border_aux.copyTo(border_copy);
        cv::findContours(border_copy, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );

#ifdef RSEG_DEBUG
        cv::Scalar color = cv::Scalar( 255, 255, 255);
        cv::Mat drawing = cv::Mat::zeros( border_aux.size(), CV_8UC3);
        for(i = 0; i< contours.size(); i++ )
            cv::drawContours( drawing, contours, i, color, 1, 8, hierarchy, 0, cv::Point() );
        cv::namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
        cv::imshow( "Contours", drawing );
#endif

        //One contour to confine all detected contours
        std::vector<cv::Point> big_contour;
        std::vector<cv::Point> hull;
        if(contours.size() > 0) {
            //Group found contours in one big contour
            for(i=0;i<contours.size(); i++) {
                if(hierarchy[i][2] < 0) { // No parent, so it's parent
                    if(big_contour.empty())
                        big_contour = contours[i];
                    else
                        big_contour.insert( big_contour.end(), contours[i].begin(), contours[i].end());
                }
            }
            //Get initial convex hull
            cv::convexHull( big_contour, hull, false );

#ifdef RSEG_DEBUG
            //Print contour and hull
            /*std::cout << "Hull" << std::endl;
            for(i=0; i<hull.size(); i++)
                std::cout << hull[i].x << "," << hull[i].y << std::endl;*/
            cv::Mat drawing2 = cv::Mat::zeros( border_aux.size(), CV_8UC3);
            cv::Scalar color = cv::Scalar( 255, 0, 255 );
            std::vector<std::vector<cv::Point> > drawc, drawh;
            drawc.push_back(big_contour);
            drawh.push_back(hull);
            color = cv::Scalar( 0, 0, 255 );
            cv::drawContours( drawing2, drawh, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
            color = cv::Scalar( 255, 0, 255 );
            cv::drawContours( drawing2, drawc, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
            cv::namedWindow( "Contour and Hull", CV_WINDOW_AUTOSIZE );
            cv::imshow( "Contour and Hull", drawing2 );
#endif
        } else
            continue;
        if(hull.size() == 0)
            continue;

        //Confine current image to blob, and get inverted foreground mask
        cv::Mat caux(c, roi), aux2 = 255 - aux;

        //COLOR HISTOGRAM
        //Get YCrCb image
        cv::Mat c_yuvaux(c_yuv, roi);
        cv::cvtColor(caux, c_yuvaux, CV_BGR2YCrCb);

        //Calculate foreground and background chroma histograms
        cv::MatND hist, hist2;
        //Foreground
        cv::calcHist( &c_yuvaux, 1, channels, aux, // do not use mask
                  hist, 2, histSize, ranges,
                 true, // the histogram is uniform
                 false );
        maxVal=0;
        cv::minMaxLoc(hist, 0, &maxVal, 0, 0);
        hist = hist/maxVal;
        //Background
        cv::calcHist( &c_yuvaux, 1, channels, aux2, // do not use mask
                    hist2, 2, histSize, ranges,
                   true, // the histogram is uniform
                   false );
        maxVal=0;
        cv::minMaxLoc(hist2, 0, &maxVal, 0, 0);
        hist2 = hist2/maxVal;

        //Check correlation between color histograms:
        cv::MatND pixhist;
        for(i = i0; i < i0 + h0; i++ ) {
            if(i==710)
                i=710;
            for(j = j0; j < j0 + w0; j++ ) {
                //Just for points inside the convex hull and a little offset
                if(j==257)
                    j=257;
                if(cv::pointPolygonTest(hull, cv::Point2f(j-j0,i-i0), true) > - m_hullOffset) {
                    if(f.data[i*bl+j]) { //Movement point
                       //Set augmented segmentation image
                       r.data[i*bl2+3*j] = r.data[i*bl2+3*j+1] = r.data[i*bl2+3*j+2] = 255; //White
                    } else { //Non-movement
                        //Check neighborhood for movement.
                       if(    j + w_size/2 >= w || i + w_size/2 >= h
                           || j - w_size/2 < 0  || i - w_size/2 < 0 )
                            continue;
                        wroi.x = j - w_size/2;
                        wroi.y = i - w_size/2;
                        if(movementFound(f, w_size, i, j, roi)) {
                            //Generate local histogram for comparison
                            cv::Mat c_yuvpix(c_yuv, wroi);
                            cv::calcHist( &c_yuvpix, 1, channels, cv::Mat(), // do not use mask
                                        pixhist, 2, histSize, ranges,
                                        true, // the histogram is uniform
                                        false );
                            maxVal = 0;
                            cv::minMaxLoc(pixhist, 0, &maxVal, 0, 0);
                            pixhist = pixhist/maxVal;

                            //Decide if background or foreground, comparing histograms
                            if(histogramDistance(hist,pixhist) < histogramDistance(hist2,pixhist)) {
                                r.data[i*bl2+3*j] = 255; //Red
                            }
                        }
                    }
                }
            }
        }
        //Integrate results with original mask
        for(i = i0; i < i0 + h0; i++ )
            for(j = j0; j < j0 + w0; j++ )
                if(f0.data[i*bl+j] != 0 || r.data[i*bl2+3*j] != 0 || r.data[i*bl2+3*j+1] != 0 || r.data[i*bl2+3*j+2] != 0) {
                    f.data[i*bl+j] = 255;
                    if(f0.data[i*bl+j] != 0)
                        r.data[i*bl2+3*j] = r.data[i*bl2+3*j+1] = r.data[i*bl2+3*j+2] = 255;
                }
        //Opening and Closing
        cv::erode(aux, aux, element);
        cv::dilate(aux, aux, element,cv::Point(-1,-1),2);
        cv::erode(aux, aux, element);

        //Recalculate Convex Hull
        cv::Canny(aux,border_aux, 50,100, 3);
        contours.clear();
        hierarchy.clear();
        big_contour.clear();
        hull.clear();
        cv::findContours(border_aux, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
        for(i=0;i<contours.size(); i++) {
            if(hierarchy[i][2] < 0) { // No parent, so it's parent
                if(big_contour.empty())
                    big_contour = contours[i];
                else
                    big_contour.insert( big_contour.end(), contours[i].begin(), contours[i].end());
            }
        }
        cv::convexHull( big_contour, hull, false );

        //Get main/minor axis
        std::vector<cv::Point2f> data_aux(h0*w0);
        float mean_x = 0, mean_y = 0;
        int count = 0;

        for(i=0; i<h0; i++)
            for(j=0; j<w0; j++)
                if(cv::pointPolygonTest(hull, cv::Point2f(j, i), true) > - m_hullOffset) {
                    data_aux[count++] = cv::Point2f(j, i);
                    mean_x += j;
                    mean_y += i;
                }
        //data_aux.resize(count);
        //cv::Mat data(2, count, CV_32FC1, &data_aux.front());
        cv::Mat data(2, count, CV_32FC1);
        cv::Point2f x;
        for(i=0; i<count; i++) {
            data.at<float>(0,i) = data_aux[i].x;
            data.at<float>(1,i) = data_aux[i].y;
        }

        //cv::Mat data();
        mean_x /= count;
        mean_y /= count;
        cv::Mat mean(2, 1, CV_32FC1);
        mean.at<float>(0) = mean_x;
        mean.at<float>(1) = mean_y;

        //2. perform PCA
        cv::PCA pca(data, mean, CV_PCA_DATA_AS_COL, maxComponents);
        //result is contained in pca.eigenvectors (as row vectors)
        //std::cout << pca.eigenvectors << std::endl;

        //3. get angle of principal axis
        float dx = pca.eigenvectors.at<float>(0, 0),
              dy = pca.eigenvectors.at<float>(0, 1),
              scale = 40.0;
        cv::Point3f rline;
        cv::Point2f r1, r2;

        //Get line general form from principal component
        getGeneralLineForm(cv::Point2f(mean_x, mean_y),
                           cv::Point2f(mean_x + dx*scale, mean_y + dy*scale),
                           rline);
        //Get segment from line
        int n1, n2;
        //getContourToLineIntersection(hull, rline, r1, r2, &n1, &n2);

        //Get pixel intersections for normals
        std::vector< segment2D<float> > segs;
        //getNormalIntersections(aux, roi, hull, r1, r2, n1, n2, dx, dy, segs);

        //Get the pixel distance function
        std::vector<float> dfunction;
        //dfunction.resize((int)D_axis + 1); //

                //First and last are zero for sure (axis intersects contour).
        //dfunction[0] = 0.0;
        //dfunction[(int)D_axis] = 0.0;
        //for

#ifdef RSEG_DEBUG
        std::cout << "Final Hull" << std::endl;
        for(i=0; i<hull.size(); i++)
            std::cout << i << " : " << hull[i].x << " ; " << hull[i].y << std::endl;

/*        std::cout << "Distances" << std::endl;
        for(i=0; i<segs.size(); i++) {
            double dx = segs[i].first.x - segs[i].last.x;
            double dy = segs[i].first.y - segs[i].last.y;
            std::cout << i << " : " << sqrt(dx*dx+dy*dy) << std::endl;

        }*/
        color = cv::Scalar( 0, 255, 255 );
        std::vector<std::vector<cv::Point> > drawc;
        drawc.push_back(hull);
        cv::Mat raux(r, roi);
        cv::drawContours( raux, drawc, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
        color = cv::Scalar( 0, 255, 0 );
        cv::line(raux, r1, r2, color);
        //cv::line(raux, cv::Point(mean_x - dx*scale, mean_y - dy*scale),
        //               cv::Point(mean_x + dx*scale, mean_y + dy*scale), color);
        cv::namedWindow( "Final", CV_WINDOW_AUTOSIZE );
        cv::imshow( "Final", raux );

        /*std::cout << "Background:" << std::endl;
        std:







:cout << "\tRows:" << hist2.rows << ", Cols:" << hist2.cols << std::endl;
        std::cout << "\tChannels:" << hist2.channels() << ", Depth:" << hist2.depth() << std::endl;
        for(j=0; j<hist2.cols; j++)
          std::cout << "\t" << j;
        std::cout << std::endl;
        for(i=0; i<hist2.rows; i++) {
            for(j=0; j<hist2.cols; j++) {
                std::cout << "\t" << hist2.at<float>(i,j);
            }
            std::cout << std::endl;
        }*/
#endif
    }
    //Set datapool images
    memcpy(fg_p, f.data, h*bl);
    memcpy(m_data->rFgImage->bits(), r.data, h*bl2);

}