void final_magic_crop_rotate ( Mat& mat, vector<Point>& points4 ) { Size size_mat = mat.size(); corners_magick_do ( points4 /*ref*/ ); /*sorts corner points4*/ vector<Point2f> points4f; // this here is probably closest to the size of the original invoice... well, let's try... tension :) RotatedRect rect_minAreaRect = minAreaRect ( points4 ); // RNG rng(12345); Point2f rect_points[4]; rect_minAreaRect.points( rect_points ); for ( int i=0; i<(int)points4.size(/*4*/); ++i ) { points4f.push_back(points4[i]); } bool is_mat_width = size_mat.width>size_mat.height; /*is width larger*/ int small = min(rect_minAreaRect.size.width, rect_minAreaRect.size.height); int large = max(rect_minAreaRect.size.width, rect_minAreaRect.size.height); !is_mat_width && (small=small^large) && (large=small^large) && (small=small^large); /*XOR swap*/ // Mat quad = Mat::zeros ( small, large, CV_8UC3 ); Mat quad = Mat::zeros ( small, large, CV_8U ); vector<Point2f> quad_pts; quad_pts.push_back(Point2f(0, 0)); quad_pts.push_back(Point2f(quad.cols, 0)); quad_pts.push_back(Point2f(quad.cols, quad.rows)); quad_pts.push_back(Point2f(0, quad.rows)); if ( points4f.size()==4 ) { outfile << "ok, doing pers transform and warp..." << points4f << endl; Mat transmtx = getPerspectiveTransform ( points4f, quad_pts ); warpPerspective ( mat, quad, transmtx, quad.size() ); imwrite ( IMG_PATH, quad ) ; ocr_doit ( quad ); } else { outfile << "checking points4f... not 4 of number " << points4f << endl; // TODO rotate if ( mat.cols>mat.rows ){ rot90 ( mat, 1 ); } imwrite ( IMG_PATH, mat ) ; outfile << "DISPLAY_IMG" << endl; } }
Mat InversePerspectiveMapper::MapInversePerspectiveFromVanishingPoint(const Mat &image, const Point2f &vanishingPoint) { float abstand = abs(vanishingPoint.y - (image.rows / 2.0f)); float focalLength = 575; float alphaKlein = 23 * CV_PI_F / 180; float alphaGross = 29 * CV_PI_F / 180; float cameraTilt = atan(abstand / focalLength); float remainingHeight = image.rows - cvRound(vanishingPoint.y); float xDistance = cvCeil(image.cols / 3.0f); float yDistance = cvCeil(remainingHeight / 3.0f); pair<vector<cv::Point2f>, vector<cv::Point2f> > sourceDestinationPairs; for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { float x = xDistance + i * xDistance; float y = vanishingPoint.y + yDistance + j * yDistance; float u = 22 * HTWKMath::cotf((cameraTilt - alphaKlein) + y * (2 * alphaKlein / (image.rows - 1))) * sinf(-alphaGross + x * (2 * alphaGross / (image.cols - 1))) - 3; float v = 22 * HTWKMath::cotf((cameraTilt - alphaKlein) + y * (2 * alphaKlein / (image.rows - 1))) * cosf(-alphaGross + x * (2 * alphaGross / (image.cols - 1))) - 50; // set from 50 to 55 remove hood for car 20 after crash // Moved coordinates to IPM u = u + 150; v = 200 - v; sourceDestinationPairs.first.push_back(Point2f(x, y)); sourceDestinationPairs.second.push_back(Point2f(u, v)); } } Mat lambda = getPerspectiveTransform(sourceDestinationPairs.first, sourceDestinationPairs.second); Mat output(200, 300, CV_8UC1, CV_RGB(0, 0, 0)); warpPerspective(image, output, lambda, output.size(), INTER_LINEAR, BORDER_CONSTANT, CV_RGB(5, 5, 5)); return output.clone(); }
Mat getTransformMat(Point2f* srcq ) { Point2f srcQuad[4],dstQuad[4]; Mat warp_perspective; srcQuad[0] = srcq[0]; srcQuad[1] = srcq[1]; srcQuad[2] = srcq[2]; srcQuad[3] = srcq[3]; dstQuad[0] = Point2f(192.0/5.0,0.0); dstQuad[1] = Point2f(3008.0/5.0,0.0); dstQuad[2] = Point2f(3008.0/5.0,float(image.rows)); dstQuad[3] = Point2f(192.0/5.0,float(image.rows)); warp_perspective= getPerspectiveTransform(srcQuad,dstQuad); //warpPerspective(src, warped, warp_perspective, Size(640,480)); return warp_perspective; }
void Calibrador::calcHomography() { if(ptsSrc.size() >= 4) { vector<Point2f> srcPoints, dstPoints; for(int i = 0; i < ptsSrc.size(); i++) { srcPoints.push_back(Point2f(ptsSrc[i].x, ptsSrc[i].x)); dstPoints.push_back(Point2f(ptsDst[i].x, ptsDst[i].y)); } // generate a homography from the two sets of points homography = findHomography(Mat(srcPoints), Mat(dstPoints)); homographyReady = true; cv::invert(homography, homography_inv); // getPerspective // esto necesita 4 puntos (ni mas ni menos) srcPoints.erase(srcPoints.begin()+4,srcPoints.end()); dstPoints.erase(dstPoints.begin()+4,dstPoints.end()); map_matrix = getPerspectiveTransform(srcPoints, dstPoints); } }
Mat ScreenDetector::transformImage(std::vector<Point> rect) { Point tl,tr, bl, br; int width, height; // order Points : top-left, top-right, bottom-left, bottom-right rect = getOrderedPoints(rect); tl = rect.at(0); tr = rect.at(1); br = rect.at(2); bl = rect.at(3); Point2f src[4]; src[0] = rect.at(0); src[1] = rect.at(1); src[2] = rect.at(2); src[3] = rect.at(3); Point2f dst[4]; dst[0] = Point(0,0); dst[1] = Point(interfaceWidth-1, 0); dst[2] = Point(interfaceWidth-1, interfaceHeight-1); dst[3] = Point(0, interfaceHeight-1); Mat transformMatrix; transformMatrix = getPerspectiveTransform(src, dst); if(DEBUG) { Mat warped; namedWindow("warped", WINDOW_KEEPRATIO); warpPerspective(img, warped, transformMatrix, Size(interfaceWidth, interfaceHeight)); imshow("warped", warped); } return transformMatrix; }
cv::Mat PerspectiveTransform::getTransformationMatrix(vector<ofVec2f> src, vector<ofVec2f> dst) { cv::Mat mat; if (src.size() < 4 || dst.size() < 4 || src.size() != dst.size()) { return mat; } cv::Point2f cvSrc[4]; for (int i=0; i<4; i++) { cvSrc[i].x = src[i].x; cvSrc[i].y = src[i].y; } cv::Point2f cvDst[4]; for (int i=0; i<4; i++) { cvDst[i].x = dst[i].x; cvDst[i].y = dst[i].y; } return getPerspectiveTransform(cvSrc, cvDst); }
/* * * **************************************************/ void QuadObj::takeTexture() { // Calculate Matrix using the parent matrixes too std::stack<QMatrix4x4> mats; QMatrix4x4 cmat; // Store previous matrixes in stack QuadObj *po = this; bool notroot = true; while ( notroot ) { if ( po != pro.objectRoot ) { mats.push( po->getMatrix() ); // qDebug() << po->m_itemData; po = (QuadObj * ) po->parentItem(); } else { mats.push( po->getMatrix() ); //qDebug() << po->m_itemData; notroot = false; } } // cmat = manager->fixCamera->getMatrix(); TODO for ( int i=0 , e = mats.size(); i < e ; i ++ ) { cmat *= mats.top(); mats.pop(); // qDebug() << "mat"; } cmat.scale(scale); QVector3D vect; qDebug() << "verticies " << vertices.size(); qDebug() << "TexCoords " << texCoords.size(); for ( int i = 0; i < vertices.size(); i++) { // verticies.size vect = cmat * vertices[i]; texCoords[i] = QVector2D ( ( vect.x() + 1 ) / 2 , ( 1 - vect.y() ) / 2 ); } // The output texture image coordinate and size int w,h; w = textureMat->cols; h = textureMat->rows; cv::Point2f ocoords[4]; ocoords[0] = cv::Point2f (0, 0); ocoords[1] = cv::Point2f (0, h); ocoords[2] = cv::Point2f (w, h); ocoords[3] = cv::Point2f (w, 0); // cv::Point2f textpoint[4]; // UvtoCvCoordinate(manager->mainproject->actualBackground, textpoint); /* for ( int i = 0; i < 4; i++) { qDebug() << textpoint[i].x << " : " << textpoint[i].y; cv::circle( manager->mainproject->actualBackground, textpoint[i],5, cv::Scalar(1,255,1), 3 ); }*/ // sortCorners(textpoint); // manager->mainwindow->glWidget->UploadTexture(manager->mainproject->actualBackground.cols , // manager->mainproject->actualBackground.rows, // manager->mainproject->actualBackground.data ); cv::Mat ptransform = getPerspectiveTransform ( textpoint, ocoords); // cv::Mat transmtx = cv::getPerspectiveTransform(corners, quad_pts); // cv::warpPerspective( *manager->mainproject->actualBackground, *textureMat, ptransform, textureMat->size() ); GLWidget & gl = pro.getManger().getMainGLW(); if ( textureID == 0 ) { gl.glGenBuffers( 1, &textureID); } gl.glBindTexture ( GL_TEXTURE_2D , textureID); gl.glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, textureMat->cols, textureMat->rows, 0, GL_RGB, GL_UNSIGNED_BYTE, ( GLvoid * ) textureMat->data ); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); //qDebug() << "Background ID:" << manager->mainproject->backtextID; qDebug() << "TextureId:" << textureID; //texCoords.clear(); texCoords[0] = QVector2D ( 0,0); // Set Texture coordinate texCoords[1] = QVector2D ( 0,1); texCoords[2] = QVector2D ( 1,1); texCoords[3] = QVector2D ( 1,0); textPixmap = QPixmap::fromImage( this->cvMatToQImage( *textureMat) ); }
/* * * **************************************************/ void CubeObj::takeTexture() { // Calculate Matrix using the parent matrixes too std::stack<QMatrix4x4> mats; QMatrix4x4 cmat; // Store previous matrixes in stack CubeObj *po = this; bool notroot = true; while ( notroot ) { if ( po != pro.objectRoot ) { mats.push( po->getMatrix() ); // qDebug() << po->m_itemData; po = (CubeObj * ) po->parentItem(); } else { mats.push( po->getMatrix() ); //qDebug() << po->m_itemData; notroot = false; } } // camera mat * parent mat * parent mat ..... * parent mat * this mat * this scale // cmat = pro.getManger().fixCamera->getMatrix(); // TODO for ( int i=0 , e = mats.size(); i < e ; i ++ ) { cmat *= mats.top(); mats.pop(); // qDebug() << "mat"; } cmat.scale(scale); // Project points to screen QVector3D vect; qDebug() << "verticies " << vertices.size(); qDebug() << "TexCoords " << texCoords.size(); QVector<QVector2D> projectedPoints; for ( int i = 0; i < vertices.size(); i++) { // verticies.size vect = cmat * vertices[i]; projectedPoints.append( QVector2D ( ( vect.x() + 1 ) / 2 , ( 1 - vect.y() ) / 2 ) ); } // The output texture image coordinate and size int w,h; w = textureMat->cols; h = textureMat->rows; cv::Point2f ocoords[ 4 ]; ocoords[0] = cv::Point2f (0, 0); ocoords[1] = cv::Point2f (0, h); ocoords[2] = cv::Point2f (w, h); ocoords[3] = cv::Point2f (w, 0); cv::Point2f textpoints[24]; // TODO // UvtoCvCoordinate( *pro.actualBackground, projectedPoints, textpoints ); TODO /* // for test for ( int i = 0; i < 24 ; i++) { //qDebug() << textpoints[i].x << " : " << textpoints[i].y; cv::circle( pro.actualBackground, textpoints[i],3, cv::Scalar(1,255,1), 2 ); pro.reLoadback(); } // -------------------*/ GLWidget & gl = pro.getManger().getMainGLW(); for ( int i =0; i < 6; i++) { cv::Mat ptransform = getPerspectiveTransform ( &textpoints[i*4], ocoords); // cv::warpPerspective( *pro.actualBackground, *textureMat, ptransform, textureMat->size() ); if ( textureIDs[i] == 0 ) { gl.glGenBuffers( 1, &textureIDs[i]); qDebug() << "TextureId:" << textureIDs[i]; } gl.glBindTexture ( GL_TEXTURE_2D , textureIDs[i]); gl.glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, textureMat->cols, textureMat->rows, 0, GL_RGB, GL_UNSIGNED_BYTE, ( GLvoid * ) textureMat->data ); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); } /* texCoords[0] = QVector2D ( 0,0); // Set Texture coordinate texCoords[1] = QVector2D ( 0,1); texCoords[2] = QVector2D ( 1,1); texCoords[3] = QVector2D ( 1,0);*/ /* textPixmap = QPixmap::fromImage( this->cvMatToQImage( *textureMat) );*/ }
/** * Find a list of candidate marker from a given scene * * @param current frame, in grayscale 8UC1 format * @return a list of marker candidates **/ vector<Marker> MarkerDetector::findMarkerCandidates( Mat& frame ) { vector<Marker> candidates; /* Do some thresholding, in fact you should tune the parameters here a bit */ Mat thresholded; threshold( frame, thresholded, 50.0, 255.0, CV_THRESH_BINARY ); /* Find contours */ vector<vector<Point>> contours; findContours( thresholded.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE ); for( vector<Point> contour: contours ) { /* Approximate polygons out of these contours */ vector<Point> approxed; approxPolyDP( contour, approxed, contour.size() * 0.05, true ); /* Make sure it passes our first candidate check */ if( !checkPoints( approxed ) ) continue; /* Do some perspective transformation on the candidate marker to a predetermined square */ Marker marker; marker.matrix = Mat( markerHeight, markerWidth, CV_8UC1 ); std::copy( approxed.begin(), approxed.end(), back_inserter( marker.poly ) ); /* Apply sub pixel search */ cornerSubPix( thresholded, marker.poly, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001) ); /* Projection target */ const static vector<Point2f> target_corners = { Point2f( -0.5f, -0.5f ), Point2f( +5.5f, -0.5f ), Point2f( +5.5f, +5.5f ), Point2f( -0.5f, +5.5f ), }; /* Apply perspective transformation, to project our 3D marker to a predefined 2D coords */ Mat projection = getPerspectiveTransform( marker.poly, target_corners ); warpPerspective( thresholded, marker.matrix, projection, marker.matrix.size() ); /* Ignore those region that's fully black, or not surrounded by black bars */ if( sum(marker.matrix) == Scalar(0) || countNonZero( marker.matrix.row(0)) != 0 || countNonZero( marker.matrix.row(markerHeight - 1)) != 0 || countNonZero( marker.matrix.col(0)) != 0 || countNonZero( marker.matrix.col(markerWidth - 1)) != 0 ) { continue; } /* Find the rotation that has the smallest hex value */ pair<unsigned int, unsigned int> minimum = { numeric_limits<unsigned int>::max(), 0 }; vector<unsigned int> codes(markerHeight); unsigned int power = 1 << (markerWidth - 3); /* Rotate the marker 4 times, store the hex code upon each rotation */ for( int rotation = 0; rotation < 4; rotation++ ) { stringstream ss; codes[rotation] = 0; for( int i = 1; i < markerHeight - 1; i++ ) { unsigned int code = 0; for ( int j = 1; j < markerWidth - 1; j++ ){ int value = static_cast<int>(marker.matrix.at<uchar>(i, j)); if( value == 0 ) code = code + ( power >> j ); } ss << hex << code; } ss >> codes[rotation]; if( minimum.first > codes[rotation] ) { minimum.first = codes[rotation]; minimum.second = rotation; } flip( marker.matrix, marker.matrix, 1 ); marker.matrix = marker.matrix.t(); } rotate( marker.poly.begin(), marker.poly.begin() + ((minimum.second + 2) % 4), marker.poly.end() ); for( int i = 0; i < minimum.second; i++ ) { flip( marker.matrix, marker.matrix, 1 ); marker.matrix = marker.matrix.t(); } marker.code = minimum.first; candidates.push_back( marker ); } return candidates; }
int Img2IPM::process(Point pt[4], string save_path, int img_cols, int img_rows) { int pointNum = 4; Point2f * srcPoint = new Point2f[pointNum]; Point2f * dstPoint = new Point2f[pointNum]; CvSize ipmSize; //v2 0514 ipmSize.width = 260; ipmSize.height = 300; //sample4 °üº¬³µÍ· { /// sample1 and sample3 srcPoint[0] = cvPoint(pt[0].x, pt[0].y); srcPoint[1] = cvPoint(pt[1].x, pt[1].y); srcPoint[2] = cvPoint(pt[2].x, pt[2].y); srcPoint[3] = cvPoint(pt[3].x, pt[3].y); //v2 0514 dstPoint[0] = cvPoint(105, 284); dstPoint[1] = cvPoint(154, 284); dstPoint[2] = cvPoint(154, 11); dstPoint[3] = cvPoint(105, 11); } Mat img2ipmMatrix = getPerspectiveTransform(srcPoint, dstPoint); Mat ipm2imgMatrix = getPerspectiveTransform(dstPoint, srcPoint); ofstream ipm2iniFile((save_path + "ipm2ini.txt").c_str()); for (int i = 0; i < ipm2imgMatrix.rows; i++) { double * aa = ipm2imgMatrix.ptr<double>(i); for (int j = 0; j < ipm2imgMatrix.cols; j++) { ipm2iniFile<<aa[j]<<" "; } ipm2iniFile<<endl; } ipm2iniFile.close(); ofstream ini2ipmFile((save_path + "ini2ipm.txt").c_str()); for (int i = 0; i < img2ipmMatrix.rows; i++) { double * aa = img2ipmMatrix.ptr<double>(i); for (int j = 0; j < img2ipmMatrix.cols; j++) { ini2ipmFile<<aa[j]<<" "; } ini2ipmFile<<endl; } ini2ipmFile.close(); ofstream outlierFile((save_path +"outlier.txt").c_str()); vector<Point2f> allIpmPoints; vector<Point2f> allIpmPoints2IniImg; int num2 = 0; for (int m = 0; m < ipmSize.height; m++) { for (int n = 0; n < ipmSize.width; n++) { allIpmPoints.push_back(cvPoint(n,m)); num2++; } } perspectiveTransform(allIpmPoints, allIpmPoints2IniImg, ipm2imgMatrix); num2 = 0; for (int m = 0; m < ipmSize.height; m++) { for (int n = 0; n < ipmSize.width; n++) { Point2f tmpPt = allIpmPoints2IniImg[num2]; if (tmpPt.x < 5 || tmpPt.x > img_cols - 5 || tmpPt.y < 5 || tmpPt.y > img_rows - 5) { // outlier outlierFile<<n<<" "<<m<<endl; } num2++; } } outlierFile.close(); delete [] srcPoint; delete [] dstPoint; return 0; }
void Train::warpMatrix(Size sz, double yaw, double pitch, double roll, double scale, double fovy, Mat &M, vector<Point2f>* corners) { double st=sin(deg2Rad(roll)); double ct=cos(deg2Rad(roll)); double sp=sin(deg2Rad(pitch)); double cp=cos(deg2Rad(pitch)); double sg=sin(deg2Rad(yaw)); double cg=cos(deg2Rad(yaw)); double halfFovy=fovy*0.5; double d=hypot(sz.width,sz.height); double sideLength=scale*d/cos(deg2Rad(halfFovy)); double h=d/(2.0*sin(deg2Rad(halfFovy))); double n=h-(d/2.0); double f=h+(d/2.0); Mat F=Mat(4,4,CV_64FC1); Mat Rroll=Mat::eye(4,4,CV_64FC1); Mat Rpitch=Mat::eye(4,4,CV_64FC1); Mat Ryaw=Mat::eye(4,4,CV_64FC1); Mat T=Mat::eye(4,4,CV_64FC1); Mat P=Mat::zeros(4,4,CV_64FC1); Rroll.at<double>(0,0)=Rroll.at<double>(1,1)=ct; Rroll.at<double>(0,1)=-st;Rroll.at<double>(1,0)=st; Rpitch.at<double>(1,1)=Rpitch.at<double>(2,2)=cp; Rpitch.at<double>(1,2)=-sp;Rpitch.at<double>(2,1)=sp; Ryaw.at<double>(0,0)=Ryaw.at<double>(2,2)=cg; Ryaw.at<double>(0,2)=sg;Ryaw.at<double>(2,0)=sg; T.at<double>(2,3)=-h; P.at<double>(0,0)=P.at<double>(1,1)=1.0/tan(deg2Rad(halfFovy)); P.at<double>(2,2)=-(f+n)/(f-n); P.at<double>(2,3)=-(2.0*f*n)/(f-n); P.at<double>(3,2)=-1.0; F=P*T*Rpitch*Rroll*Ryaw; double ptsIn [4*3]; double ptsOut[4*3]; double halfW=sz.width/2, halfH=sz.height/2; ptsIn[0]=-halfW;ptsIn[ 1]= halfH; ptsIn[3]= halfW;ptsIn[ 4]= halfH; ptsIn[6]= halfW;ptsIn[ 7]=-halfH; ptsIn[9]=-halfW;ptsIn[10]=-halfH; ptsIn[2]=ptsIn[5]=ptsIn[8]=ptsIn[11]=0; Mat ptsInMat(1,4,CV_64FC3,ptsIn);Mat ptsOutMat(1,4,CV_64FC3,ptsOut); perspectiveTransform(ptsInMat,ptsOutMat,F); Point2f ptsInPt2f[4];Point2f ptsOutPt2f[4]; for(int i=0;i<4;i++) { Point2f ptIn(ptsIn[i*3+0],ptsIn[i*3+1]); Point2f ptOut(ptsOut[i*3+0],ptsOut[i*3+1]); ptsInPt2f[i]=ptIn+Point2f(halfW,halfH); ptsOutPt2f[i]=(ptOut+Point2f(1,1))*(sideLength*0.5); } M=getPerspectiveTransform(ptsInPt2f,ptsOutPt2f); if(corners!=NULL) { corners->clear(); corners->push_back(ptsOutPt2f[0]); corners->push_back(ptsOutPt2f[1]); corners->push_back(ptsOutPt2f[2]); corners->push_back(ptsOutPt2f[3]); } }