Datum htg_distance(PG_FUNCTION_ARGS){ Histogram * htg1 = (Histogram*) PG_GETARG_POINTER(0); Histogram * htg2 = (Histogram*) PG_GETARG_POINTER(1); Dis result = histogramDistance(htg1,htg2); PG_RETURN_FLOAT8(result); }
void HullToObjectModule::completeBlobs(std::deque<SpRMMMobileObject> &objects, QImage *fg, QImage *current) { if(objects.size() == 0) return; int nbins = 256/m_bins; int i, j, w = fg->width(), h = fg->height(), k=0; int i0, j0, w0, h0; double maxVal; QImage curr888 = current->convertToFormat(QImage::Format_RGB888); cv::Mat c(h, w, CV_8UC3), c_yuv(h, w, CV_8UC3), f(h, w, CV_8UC1), f0(h, w, CV_8UC1), r(h, w, CV_8UC3); int bl = fg->bytesPerLine(), bl2 = curr888.bytesPerLine(); std::deque<SpRMMMobileObject>::iterator it, it_end = objects.end(); uchar d1, d2, d3, *fg_p = fg->bits(), *c_p = curr888.bits(); memcpy(c.data, c_p, h*bl2); memcpy(f.data, fg_p, h*bl); memset(c_yuv.data, 0, h*bl2); memset(r.data, 0, h*bl2); f.copyTo(f0); cv::Rect roi; //Histogram parameters int channels[] = {1, 2}; int histSize[] = {nbins, nbins}; float pranges[] = { 0, 256 }; const float* ranges[] = { pranges, pranges }; //Rectangular structuring element cv::Mat element = cv::getStructuringElement( cv::MORPH_RECT, cv::Size( 3, 3 ), cv::Point( 1, 1 ) ); //Set local window pixel histogram for comparison cv::Rect wroi; wroi.width = wroi.height = w_size/2; //Start blobs processing for hulls calculation for(it=objects.begin(); it!=it_end; it++) { k++; SpRMMMobileObject obj = (*it); SpHullModel newHull(new HullModel()); Blob &b = obj->multiModel.binterface; i0 = b.bbox.ytop, j0 = b.bbox.xleft, h0 = b.bbox.height, w0 = b.bbox.width; if(j0 >= f.cols || i0 >= f.rows || h0 <= 0 || w0 <= 0) { m_data->hulls.push_back(newHull); continue; } if(j0 < 0) { w0 += j0; j0 = 0; if(w0 <= 0) w0 = 1; } if(i0 < 0) { h0 += i0; i0 = 0; if(h0 <= 0) h0 = 1; } if(j0 + w0 > f.cols) w0 = f.cols - j0; if(i0 + h0 > f.rows) h0 = f.rows - i0; //Con la misma Mat f ..... //Apertura en blob roi.x = j0; roi.y = i0; roi.width = w0; roi.height = h0; /* if(m_data->frameNumber == 962) { std::cout << "Error frame " << m_data->frameNumber << std::endl; std::cout << "Mobile id " << obj->mobile_id << std::endl; std::cout << "\troi.x: " << roi.x; std::cout << "\troi.y: " << roi.y; std::cout << ";\troi.width: " << roi.width; std::cout << ";\troi.height: " << roi.height; std::cout << ";\tf.cols: " << f.cols << std::endl; } */ //Restrict operations to blob zone if(!(0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= f.cols)) { std::cout << "Error frame " << m_data->frameNumber << std::endl; std::cout << "\troi.x: " << roi.x; std::cout << ";\troi.width: " << roi.width; std::cout << ";\tf.cols: " << f.cols << std::endl; } if(!(0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= f.rows)) { std::cout << "Error frame " << m_data->frameNumber << std::endl; std::cout << "\troi.x: " << roi.y; std::cout << ";\troi.width: " << roi.height; std::cout << ";\tf.cols: " << f.rows << std::endl; } cv::Mat aux(f, roi); cv::Mat aux0(f0, roi); //Reduce bad detections, in general near borders cv::erode(aux, aux, element, cv::Point(-1,-1), 1); //Reduce bad detections, in general near borders and recover shape cv::erode(aux0, aux0, element, cv::Point(-1,-1), 1); cv::dilate(aux0, aux0, element, cv::Point(-1,-1), 1); //Border detection cv::Mat border_aux(aux.size(), CV_8UC1); cv::Canny(aux,border_aux, 50,100, 3); #ifdef RSEG_DEBUG // cv::namedWindow( "Canny", 1 ); // cv::imshow( "Canny", border_aux ); #endif //Find confining convex hull (Note: used border_copy as findContours modifies the image) std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy; cv::Mat border_copy(border_aux.size(), CV_8UC1); border_aux.copyTo(border_copy); #ifdef __OPENCV3__ cv::findContours(border_copy, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0) ); #else cv::findContours(border_copy, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) ); #endif #ifdef RSEG_DEBUG /* cv::Scalar color = cv::Scalar( 255, 255, 255); cv::Mat drawing = cv::Mat::zeros( border_aux.size(), CV_8UC3); for(i = 0; i< contours.size(); i++ ) cv::drawContours( drawing, contours, i, color, 1, 8, hierarchy, 0, cv::Point() ); cv::namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); cv::imshow( "Contours", drawing );*/ #endif //One contour to confine all detected contours std::vector<cv::Point> big_contour; std::vector<cv::Point> hull; if(contours.size() > 0) { //Group found contours in one big contour for(i=0;i<contours.size(); i++) { if(hierarchy[i][2] < 0) { // No parent, so it's parent if(big_contour.empty()) big_contour = contours[i]; else big_contour.insert( big_contour.end(), contours[i].begin(), contours[i].end()); } } //Get initial convex hull cv::convexHull( big_contour, hull, false ); #ifdef RSEG_DEBUG //Print contour and hull /*std::cout << "Hull" << std::endl; for(i=0; i<hull.size(); i++) std::cout << hull[i].x << "," << hull[i].y << std::endl; cv::Mat drawing2 = cv::Mat::zeros( border_aux.size(), CV_8UC3); cv::Scalar color = cv::Scalar( 255, 0, 255 ); std::vector<std::vector<cv::Point> > drawc, drawh; drawc.push_back(big_contour); drawh.push_back(hull); color = cv::Scalar( 0, 0, 255 ); cv::drawContours( drawing2, drawh, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() ); color = cv::Scalar( 255, 0, 255 ); cv::drawContours( drawing2, drawc, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() ); cv::namedWindow( "Contour and Hull", CV_WINDOW_AUTOSIZE ); cv::imshow( "Contour and Hull", drawing2 );*/ #endif } else { m_data->hulls.push_back(newHull); continue; } if(hull.size() == 0) { m_data->hulls.push_back(newHull); continue; } //Confine current image to blob, and get inverted foreground mask cv::Mat caux(c, roi), aux2 = 255 - aux; //COLOR HISTOGRAM //Get YCrCb image cv::Mat c_yuvaux(c_yuv, roi); #ifdef __OPENCV3__ cv::cvtColor(caux, c_yuvaux, cv::COLOR_BGR2YCrCb); #else cv::cvtColor(caux, c_yuvaux, CV_BGR2YCrCb); #endif //Calculate foreground and background chroma histograms cv::MatND hist, hist2; //Foreground cv::calcHist( &c_yuvaux, 1, channels, aux, // do not use mask hist, 2, histSize, ranges, true, // the histogram is uniform false ); maxVal=0; cv::minMaxLoc(hist, 0, &maxVal, 0, 0); hist = hist/maxVal; //Background cv::calcHist( &c_yuvaux, 1, channels, aux2, // do not use mask hist2, 2, histSize, ranges, true, // the histogram is uniform false ); maxVal=0; cv::minMaxLoc(hist2, 0, &maxVal, 0, 0); hist2 = hist2/maxVal; //Check correlation between color histograms: cv::MatND pixhist; for(i = i0; i < i0 + h0; i++ ) { for(j = j0; j < j0 + w0; j++ ) { //Just for points inside the convex hull and a little offset if(cv::pointPolygonTest(hull, cv::Point2f(j-j0,i-i0), true) > - m_hullOffset) { if(f.data[i*bl+j]) { //Movement point //Set augmented segmentation image r.data[i*bl2+3*j] = r.data[i*bl2+3*j+1] = r.data[i*bl2+3*j+2] = 255; //White } else { //Non-movement //Check neighborhood for movement. if( j + w_size/2 >= w || i + w_size/2 >= h || j - w_size/2 < 0 || i - w_size/2 < 0 ) continue; wroi.x = j - w_size/2; wroi.y = i - w_size/2; if(movementFound(f, w_size, i, j, roi)) { //Generate local histogram for comparison cv::Mat c_yuvpix(c_yuv, wroi); cv::calcHist( &c_yuvpix, 1, channels, cv::Mat(), // do not use mask pixhist, 2, histSize, ranges, true, // the histogram is uniform false ); maxVal = 0; cv::minMaxLoc(pixhist, 0, &maxVal, 0, 0); pixhist = pixhist/maxVal; //Decide if background or foreground, comparing histograms if(histogramDistance(hist,pixhist) < histogramDistance(hist2,pixhist)) { r.data[i*bl2+3*j] = 255; //Red } } } } } } //Integrate results with original mask for(i = i0; i < i0 + h0; i++ ) for(j = j0; j < j0 + w0; j++ ) if(f0.data[i*bl+j] != 0 || r.data[i*bl2+3*j] != 0 || r.data[i*bl2+3*j+1] != 0 || r.data[i*bl2+3*j+2] != 0) { f.data[i*bl+j] = 255; if(f0.data[i*bl+j] != 0) r.data[i*bl2+3*j] = r.data[i*bl2+3*j+1] = r.data[i*bl2+3*j+2] = 255; } //Opening and Closing cv::erode(aux, aux, element); cv::dilate(aux, aux, element,cv::Point(-1,-1),2); cv::erode(aux, aux, element); //Recalculate Convex Hull cv::Canny(aux,border_aux, 50,100, 3); contours.clear(); hierarchy.clear(); big_contour.clear(); hull.clear(); #ifdef __OPENCV3__ cv::findContours(border_aux, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0) ); #else cv::findContours(border_aux, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) ); #endif for(i=0;i<contours.size(); i++) { if(hierarchy[i][2] < 0) { // No parent, so it's parent if(big_contour.empty()) big_contour = contours[i]; else big_contour.insert( big_contour.end(), contours[i].begin(), contours[i].end()); } } cv::convexHull( big_contour, hull, false ); newHull->local_hull = hull; newHull->off_x = j0; newHull->off_y = i0; newHull->id = (*it)->mobile_id; //Get principal/minor axis std::vector<cv::Point2f> data_aux(h0*w0); float mean_x = 0, mean_y = 0; int count = 0; for(i=0; i<h0; i++) for(j=0; j<w0; j++) if(cv::pointPolygonTest(hull, cv::Point2f(j, i), true) > - m_hullOffset) { data_aux[count++] = cv::Point2f(j, i); mean_x += j; mean_y += i; } //data_aux.resize(count); //cv::Mat data(2, count, CV_32FC1, &data_aux.front()); cv::Mat data(2, count, CV_32FC1); cv::Point2f x; for(i=0; i<count; i++) { data.at<float>(0,i) = data_aux[i].x; data.at<float>(1,i) = data_aux[i].y; } //cv::Mat data(); mean_x /= count; mean_y /= count; cv::Mat mean(2, 1, CV_32FC1); mean.at<float>(0) = mean_x; mean.at<float>(1) = mean_y; //2. perform PCA #ifdef __OPENCV3__ cv::PCA pca(data, mean, cv::PCA::DATA_AS_COL, maxComponents); #else cv::PCA pca(data, mean, CV_PCA_DATA_AS_COL, maxComponents); #endif //result is contained in pca.eigenvectors (as row vectors) //std::cout << pca.eigenvectors << std::endl; //3. get angle of principal axis float dx = pca.eigenvectors.at<float>(0, 0), dy = pca.eigenvectors.at<float>(0, 1), scale = 40.0; cv::Point3f rline; cv::Point2f r1, r2; //Get line general form from principal component getGeneralLineForm(cv::Point2f(mean_x, mean_y), cv::Point2f(mean_x + dx*scale, mean_y + dy*scale), rline); //Get segment from line int n1, n2; getContourToLineIntersection(hull, rline, r1, r2, &n1, &n2); //Get pixel intersections for normals std::vector< segment2D<float> > &segs = newHull->segs; std::vector< segment2D<float> > &hull_segs = newHull->hull_segs; //Get segments of movement normal to principal axis. Also reorders r1 and r2 in //coherence with segments order getNormalIntersections(aux, roi, hull, r1, r2, n1, n2, dx, dy, segs, hull_segs); newHull->axis1 = r1; newHull->axis2 = r2; //Set new representation m_data->hulls.push_back(newHull); //Get the pixel distance function std::vector<float> dfunction; //dfunction.resize((int)D_axis + 1); // //First and last are zero for sure (axis intersects contour). //dfunction[0] = 0.0; //dfunction[(int)D_axis] = 0.0; //for #ifdef RSEG_DEBUG /*std::cout << "Final Hull" << std::endl; for(i=0; i<hull.size(); i++) std::cout << i << " : " << hull[i].x << " ; " << hull[i].y << std::endl; */ /* std::cout << "Distances" << std::endl; for(i=0; i<segs.size(); i++) { double dx = segs[i].first.x - segs[i].last.x; double dy = segs[i].first.y - segs[i].last.y; std::cout << i << " : " << sqrt(dx*dx+dy*dy) << std::endl; } color = cv::Scalar( 0, 255, 255 ); std::vector<std::vector<cv::Point> > drawc; drawc.push_back(hull); cv::Mat raux(r, roi); cv::drawContours( raux, drawc, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() ); color = cv::Scalar( 0, 255, 0 ); cv::line(raux, r1, r2, color); cv::line(raux, cv::Point(mean_x - dx*scale, mean_y - dy*scale), cv::Point(mean_x + dx*scale, mean_y + dy*scale), color); cv::namedWindow( "Final", CV_WINDOW_AUTOSIZE ); cv::imshow( "Final", raux );*/ #endif } //Set datapool images memcpy(fg_p, f.data, h*bl); memcpy(m_data->rFgImage->bits(), r.data, h*bl2); }