/** * @brief Slot triggers drag-and-drop. * The user start draging the pattern he wants to add to the timeline. * @param[in] mouse pressed event. */ void BARPatternBarScrollAreaContents::mousePressEvent(QMouseEvent *event) { int x=event->pos().x(); /**< retrieves the position of the cursor. */ BARPatternBar *pBar = static_cast<BARPatternBar*>(childAt(event->pos())); /**< childAt returns a pointer to the child that was clicked. This pointer, of type "widget", is then converted into a BARPatternBar type. */ if (!pBar){return;} /**< checks that the object created isn't empty (NULL). */ QSize patternSize(100,60); /**< the following lines created a pixmap that will be displayed on the cursor during drag-and-drop. */ QPixmap pixmap(patternSize); pixmap.fill(pBar->getBgColor()); QByteArray itemData; /**< the following lines pack up the data to be sent through the drag-and-drop. */ QDataStream dataStream(&itemData, QIODevice::WriteOnly); dataStream << pBar->getBgColor(); /**< stores the color of the bar being dragged. */ dataStream << pBar->getPatternLength(); /**< stores the duration of the pattern associated to the bar being dragged. */ QMimeData *mimeData = new QMimeData; mimeData->setData("application/x-dnditemdata", itemData); /**< store the data we prepared into the QMimeFile. */ mimeData->setText(childAt(event->pos())->accessibleName()); /**< store the name of the pattern associated to the bar being dragged. */ QDrag *drag = new QDrag(this); /**< crates the QDrag object. */ drag->setMimeData(mimeData); /**< stores the data we packed up into the drag object. */ drag->setPixmap(pixmap); /**< sets the image to be displayed on the cursor during the drag-and-drop. */ drag->setHotSpot(event->pos() - pBar->pos()); /**< actually displays the pixmap on the cursor during drag-and-drop. */ if (drag->exec(Qt::CopyAction | Qt::MoveAction, Qt::CopyAction) == Qt::MoveAction){pBar->close();} /**< lines found on the Internet... */ else {pBar->show();} }
int main(int argc, char **argv) { /*** command line arguments ***/ BasicAppOptions appopt(argc, argv); if (!appopt.gotCalibStorageDir) { pcl::console::print_error( "No calibration storage provided. --calibstorage <path>\n"); exit(1); } if (!appopt.gotRigConfigFile) { pcl::console::print_error( "No rig config provided. --rigconfig <file>\n"); exit(1); } /* 3d marker properties */ float boardWidth, boardHeight; if (pcl::console::parse(argc, argv, "-bw", boardWidth) == -1) { print_usage(); exit(1); } if (pcl::console::parse(argc, argv, "-bh", boardHeight) == -1) { print_usage(); exit(1); } /* 2d marker properties */ int patternWidth, patternHeight; float squareSize; if (pcl::console::parse(argc, argv, "-pw", patternWidth) == -1) { print_usage(); exit(1); } if (pcl::console::parse(argc, argv, "-ph", patternHeight) == -1) { print_usage(); exit(1); } if (pcl::console::parse(argc, argv, "-ps", squareSize) == -1) { print_usage(); exit(1); } /* tolerance for the 3d marker */ float boardSigma = 0.05; pcl::console::parse(argc, argv, "-bs", boardSigma); /* confirm */ bool doConfirm = true; doConfirm = !pcl::console::find_switch(argc, argv, "-nc"); bool storeResults = true; storeResults = !pcl::console::find_switch(argc, argv, "-nosave"); /* help */ if (pcl::console::find_switch(argc, argv, "-h") || pcl::console::find_switch(argc, argv, "--help")) { print_usage(); exit(0); } /*****************************/ /* the calibdation storage */ CalibStorageContract calibStorage(appopt.calibStorageDir); /* the rig config */ RigConfig rigConfig; rigConfig.loadFromFile(appopt.rigConfigFile); /* setup visualizer */ CalibVisualizer visualizer; visualizer.start(); /* image windows */ cv::namedWindow("camera", CV_WINDOW_NORMAL|CV_GUI_EXPANDED); /* the captured file pairs */ std::vector<CalibStorageContract::FilePair> pairPaths; pairPaths = calibStorage.getExtrinsicFiles(); int pairCounter = 0; for (auto fpair : pairPaths) { /* print loop information */ std::stringstream ss; ss << "[Find Pairs] ==== PROCESSING PAIR " << ++pairCounter << "/" << pairPaths.size() << " ====" << std::endl; printSimpleInfo(ss.str()); /* load image from file */ cv::Mat img = cv::imread(fpair.first, CV_LOAD_IMAGE_COLOR); /* load cloud from file */ Cloud::Ptr cloud( new Cloud); pcl::io::loadPCDFile<PointT>(fpair.second, *cloud); /* display image */ cv::imshow("camera", img); // spin once cv::waitKey(1); /* dispaly cloud */ visualizer.setMainCloud(cloud); visualizer.setDrawMarker(false); /*** extract points from pattern ***/ cv::Size patternSize(patternWidth, patternHeight); std::vector<cv::Point2f> patternCorners; bool found2d = cv::findChessboardCorners (img, patternSize, patternCorners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE); /* display results on image */ if (found2d) { printSimpleInfo("[Chessboard] ", "found.\n"); /* draw center point */ cv::Point2f patternCenter = patternCorners[patternCorners.size()/2]; cv::circle(img, patternCenter, 15.0, cv::Scalar(0, 255, 0), -1); } else { printWarning("[Chessboard] ", "not found.\n"); } /* draw the 2d pattern */ cv::drawChessboardCorners(img, patternSize, patternCorners, found2d); cv::imshow("camera", img); cv::waitKey(1); /*** extract 3d marker ***/ PointT bPoint; PlaneMarker<PointT> planeMarker(boardWidth, boardHeight, boardSigma); bool found3d = planeMarker.computeMarkerCenter(cloud, bPoint); pcl::PointXYZ boardPoint = pointRGBAtoXYZ(bPoint); /* display results in cloud viewer */ if (found3d) { printSimpleInfo("[BoardMarker] ", "found.\n"); /* draw the 3d marker */ visualizer.setMarkerCenter(boardPoint, true); visualizer.setDrawMarker(true); } else { printWarning("[BoardMarker] ", "not found.\n"); visualizer.setMarkerCenter(boardPoint, false); } /* a marker pair was found */ if (found2d && found3d) { printBrightInfo("[Marker Pair] ", "found.\n"); /* set the center point of the 3d marker */ cv::Point3f point3d; point3d.x = boardPoint.x; point3d.y = boardPoint.y; point3d.z = boardPoint.z; /* get the middle point from 2d pattern */ cv::Point2f point2d = patternCorners[patternCorners.size()/2]; /* get the transformation of the pattern * and it's middle point in 3d */ cv::Mat patternTvec, patternRvec; getPatternTransform(patternSize, squareSize, patternCorners, rigConfig.cameraMatrix, rigConfig.cameraDistortionCoefficients, patternTvec, patternRvec); //FIXME transform point properly // center point of the marker in object space cv::Point3f objectPoint(0.0, 0.0, 0.0); // point in camera space cv::Point3f patternPoint3d; cv::Point3f t; t.x = patternTvec.at<double>(0,0); t.y = patternTvec.at<double>(1,0); t.z = patternTvec.at<double>(2,0); cv::Mat rmat; cv::Rodrigues(patternRvec, rmat); cv::Matx33f rotation_matrix = rmat; //pre rotation // -> FIXME check again // at the moment this looks correct. // also float/double convertion is correct patternPoint3d = (rotation_matrix * objectPoint) + t; std::stringstream ss; ss << "Chessboard at tvec:" << patternTvec << " rvec: "; ss << patternRvec << std::endl; printSimpleInfo("[Chessboard 3D] ", ss.str()); /* query the user if the sample should be stored */ bool doAddPointPair = true; if (doConfirm) { printBrightInfo("[Add Marker Pair] ", "store? [y]/[n]\n"); for (;;) { int key = cv::waitKey(1); if (key == KEY_y) { doAddPointPair = true; break; } else if (key == KEY_n) { doAddPointPair = false; break; } else if (key > 0) { printBrightInfo("[Add Marker Pair] ", "store? [y]/[n]\n"); } } } /* store point pair */ if (doAddPointPair) { calibStorage.addExtrinsicPointPair(point3d, point2d); calibStorage.addExtrinsicPointPair3d( point3d, patternPoint3d); //FIXME don't store every time, only on exit if (storeResults) { calibStorage.saveExtrinsicPointPairs(); calibStorage.saveExtrinsicPointPairs3d(); } } } else { // no marker pair printWarning("[Marker Pair] ", "not found.\n"); cv::waitKey(1); /* confirm before next iteration */ if (doConfirm) { printWarning("[Continue] ", "Hit any key.\n"); while (true) { int key = cv::waitKey(1); if (key > 0) { break; } } } } } // for all pairs /* finaly save to calibration storage */ if (storeResults) { calibStorage.saveExtrinsicPointPairs(); calibStorage.saveExtrinsicPointPairs3d(); } }
CalibrationData CalibratorLocHom::calibrate() { QSettings settings("SLStudio"); //Checkerboard parameters float checkerSize = settings.value("calibration/checkerSize").toFloat(); std::cout << "checkerSize== "<< checkerSize <<std::endl; unsigned int checkerRows = settings.value("calibration/checkerRows").toInt(); unsigned int checkerCols = settings.value("calibration/checkerCols").toInt(); // Number of saddle points on calibration pattern cv::Size patternSize(checkerCols,checkerRows); // Number of calibration sequences unsigned nFrameSeq = frameSeqsFromFile.size(); vector<cv::Mat> up(nFrameSeq), vp(nFrameSeq), shading(nFrameSeq), mask(nFrameSeq); // Decode frame sequences std::cout << "Decode frames: Num="<< nFrameSeq<< ", and begin.....>> "; for(unsigned int i=0; i<nFrameSeq; i++) { //vector<cv::Mat> frames = frameSeqs[i]; vector<cv::Mat> frames; vector<std::string> framesFromFile= frameSeqsFromFile[i]; for(unsigned int m=0; m<framesFromFile.size();m++) { cv::Mat curFrame = cv::imread(framesFromFile[m],CV_LOAD_IMAGE_GRAYSCALE); curFrame = curFrame.clone(); frames.push_back(curFrame); } for(unsigned int f=0; f<frames.size(); f++){ decoder->setFrame(f, frames[f]); #if 0 cv::imwrite(QString("m_frames_%1_%2.png").arg(i,2,10,QChar('0')).arg(f).toStdString(), frames[f]); #endif } decoder->decodeFrames(up[i], vp[i], mask[i], shading[i]); std::cout << i << ","; #if 0 cvtools::writeMat(shading[i], QString("shading[%1].mat").arg(i).toLocal8Bit()); cvtools::writeMat(up[i], QString("up[%1].mat").arg(i).toLocal8Bit()); cvtools::writeMat(vp[i], QString("vp[%1].mat").arg(i).toLocal8Bit()); #endif } std::cout << "-->end||."<<std::endl; unsigned int frameWidth = up[0].cols; unsigned int frameHeight = up[0].rows; // Generate local calibration object coordinates [mm] std::cout << "Generate local calibration object coordinates"<<std::endl; vector<cv::Point3f> Qi; for (int h=0; h<patternSize.height; h++) for (int w=0; w<patternSize.width; w++) Qi.push_back(cv::Point3f(checkerSize * w, checkerSize* h, 0.0)); // Find calibration point coordinates for camera and projector std::cout<< "Find calibration point coordinates for camera and projector!" <<std::endl; vector< vector<cv::Point2f> > qc, qp; vector< vector<cv::Point3f> > Q; for(unsigned int i=0; i<nFrameSeq; i++) { //std::cout << i << " 1" << std::endl; vector<cv::Point2f> qci; // Aid checkerboard extraction by slight blur //cv::GaussianBlur(shading[i], shading[i], cv::Size(5,5), 2, 2); // Extract checker corners std::cout << i << ": findChessboardCorners......" << std::endl; bool success = cv::findChessboardCorners(shading[i], patternSize, qci, cv::CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE+CALIB_CB_FAST_CHECK ); std::cout << " cv::findChessboardCorners: sucess = " << success << std::endl; if(!success) std::cout << "Calibrator: could not extract chess board corners on frame seqence " << i << std::endl << std::flush; else { std::cout << i << ": cornerSubPix......" << std::endl; // Refine corner locations cv::cornerSubPix(shading[i], qci, cv::Size(5, 5), cv::Size(1, 1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 20, 0.01)); } // Draw colored chessboard cv::Mat shadingColor; cv::cvtColor(shading[i], shadingColor, cv::COLOR_GRAY2RGB); cv::drawChessboardCorners(shadingColor, patternSize, qci, success); #if 1 QString filename = QString("am_shadingColor%1.bmp").arg(i, 2, 10, QChar('0')); cv::imwrite(filename.toStdString(), shadingColor); // filename = QString("am_shadingColor%1.png").arg(i, 2, 10, QChar('0')); // cv::imwrite(filename.toStdString(), shadingColor); #endif //Emit chessboard results emit newSequenceResult(shadingColor, i, success); if(success) { // Vectors of accepted points for current view vector<cv::Point2f> qpi_a; vector<cv::Point2f> qci_a; vector<cv::Point3f> Qi_a; // Loop through checkerboard corners for(unsigned int j=0; j<qci.size(); j++) { const cv::Point2f &qcij = qci[j]; // Collect neighbor points const unsigned int WINDOW_SIZE = 10; std::vector<cv::Point2f> N_qcij, N_qpij; // avoid going out of bounds unsigned int starth = max(int(qcij.y+0.5)-WINDOW_SIZE, 0u); unsigned int stoph = min(int(qcij.y+0.5)+WINDOW_SIZE, frameHeight-1); unsigned int startw = max(int(qcij.x+0.5)-WINDOW_SIZE, 0u); unsigned int stopw = min(int(qcij.x+0.5)+WINDOW_SIZE, frameWidth-1); for(unsigned int h=starth; h<=stoph; h++) { for(unsigned int w=startw; w<=stopw; w++) { // stay within mask if(mask[i].at<bool>(h,w)) { N_qcij.push_back(cv::Point2f(w, h)); float upijwh = up[i].at<float>(h,w); float vpijwh = vp[i].at<float>(h,w); N_qpij.push_back(cv::Point2f(upijwh, vpijwh)); } } } //std::cout << i << " findHomography " << N_qcij.size() << " " << N_qpij.size() << std::endl; // if enough valid points to build homography if(N_qpij.size() >= 50) { // std::cout << i << " findHomography" << std::endl; // translate qcij into qpij using local homography cv::Mat H = cv::findHomography(N_qcij, N_qpij, cv::LMEDS); if(!H.empty()) { cv::Point3d Q = cv::Point3d(cv::Mat(H*cv::Mat(cv::Point3d(qcij.x, qcij.y, 1.0)))); cv::Point2f qpij = cv::Point2f(Q.x/Q.z, Q.y/Q.z); qpi_a.push_back(qpij); qci_a.push_back(qci[j]); Qi_a.push_back(Qi[j]); } } } if(!Qi_a.empty()) { // Store projector corner coordinates qp.push_back(qpi_a); // Store camera corner coordinates qc.push_back(qci_a); // Store world corner coordinates Q.push_back(Qi_a); } } } if(Q.size() < 1){ std::cerr << "Error: not enough calibration sequences!" << std::endl; CalibrationData nanData; return nanData; } //calibrate the camera std::cout<< "calibrate the camera!" <<std::endl; cv::Mat Kc, kc; std::vector<cv::Mat> cam_rvecs, cam_tvecs; cv::Size frameSize(frameWidth, frameHeight); double cam_error = cv::calibrateCamera(Q, qc, frameSize, Kc, kc, cam_rvecs, cam_tvecs, cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_PRINCIPAL_POINT + cv::CALIB_FIX_K2 + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST, cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 50, DBL_EPSILON)); //calibrate the projector std::cout<< "calibrate the projector!" <<std::endl; cv::Mat Kp, kp; std::vector<cv::Mat> proj_rvecs, proj_tvecs; cv::Size screenSize(screenCols, screenRows); double proj_error = cv::calibrateCamera(Q, qp, screenSize, Kp, kp, proj_rvecs, proj_tvecs, cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_K2 + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST, cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 50, DBL_EPSILON)); //stereo calibration std::cout<< "stereo calibrate!" <<std::endl; cv::Mat Rp, Tp, E, F; //Opencv2.x version //double stereo_error = cv::stereoCalibrate(Q, qc, qp, Kc, kc, Kp, kp, frameSize, Rp, Tp, E, F, // cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON), cv::CALIB_FIX_INTRINSIC); //Opencv3.x version double stereo_error = cv::stereoCalibrate(Q, qc, qp, Kc, kc, Kp, kp, frameSize, Rp, Tp, E, F, cv::CALIB_FIX_INTRINSIC,cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)); CalibrationData calData(Kc, kc, cam_error, Kp, kp, proj_error, Rp, Tp, stereo_error); calData.print(std::cout); // Determine per-view reprojection errors: std::vector<float> cam_error_per_view(Q.size()); cam_error_per_view.resize(Q.size()); std::vector<float> proj_error_per_view(Q.size()); proj_error_per_view.resize(Q.size()); for(unsigned int i = 0; i < (unsigned int)Q.size(); ++i){ int n = (int)Q[i].size(); vector<cv::Point2f> qc_proj; cv::projectPoints(cv::Mat(Q[i]), cam_rvecs[i], cam_tvecs[i], Kc, kc, qc_proj); float err = 0; for(int j=0; j<qc_proj.size(); j++){ cv::Point2f d = qc[i][j] - qc_proj[j]; err += cv::sqrt(d.x*d.x + d.y*d.y); } cam_error_per_view[i] = (float)err/n; vector<cv::Point2f> qp_proj; cv::projectPoints(cv::Mat(Q[i]), proj_rvecs[i], proj_tvecs[i], Kp, kp, qp_proj); err = 0; for(int j=0; j<qc_proj.size(); j++){ cv::Point2f d = qp[i][j] - qp_proj[j]; err += cv::sqrt(d.x*d.x + d.y*d.y); } proj_error_per_view[i] = (float)err/n; std::cout << "Seq error " << i+1 << " cam:" << cam_error_per_view[i] << " proj:" << proj_error_per_view[i] << std::endl; } return calData; // CalibrationData nanData; // return nanData; }
void CCalibrateKinect::locate2DCorners ( const std::vector< cv::Mat >& vImages_, const int& nX_, const int& nY_, std::vector< std::vector<cv::Point2f> >* pvv2DCorners_, int nPatternType_ ) const //nPatternType_ = SQUARE { CHECK ( !vImages_.empty(), "locate2DCorners(): no images.\n" ); if ( SQUARE == nPatternType_ ) { std::cout << " locate chessboard corners.\n "; pvv2DCorners_->clear(); cv::Size patternSize ( nX_, nY_ ); for ( unsigned int i = 0; i < vImages_.size(); i++ ) { const cv::Mat& cvFrame = vImages_[i] ; std::vector<cv::Point2f> vCurrentCorners;//float 2d point is required by the OpenCV API. //locate corners roughly bool _bChessBoardCornersFoundThisFrame = cv::findChessboardCorners ( cvFrame, patternSize, vCurrentCorners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS ); CHECK ( _bChessBoardCornersFoundThisFrame, " No corners are found.\n" ); PRINT( vCurrentCorners.size() ); //locate corners in sub-pixel level cv::Mat cvFrameGrey; cv::cvtColor ( cvFrame, cvFrameGrey, CV_BGR2GRAY ); cv::cornerSubPix ( cvFrameGrey, vCurrentCorners, cv::Size ( 9, 9 ), cv::Size ( -1, -1 ), cv::TermCriteria ( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1 ) ); //store the corners inpto a std::vector pvv2DCorners_->push_back ( vCurrentCorners ); } } else if ( CIRCLE == nPatternType_ ) { std::cout << "locate circle centers.\n" ; pvv2DCorners_->clear(); PRINT( nX_ ); PRINT( nY_ ); cv::Size patternSize ( nX_, nY_ ); for ( unsigned int i = 0; i < vImages_.size(); i++ ) { const cv::Mat& cvFrame = vImages_[i] ; cv::Mat cvmTmp; cv::cvtColor ( cvFrame, cvmTmp, CV_RGB2GRAY ); std::vector<cv::Point2f> vCurrentCorners;//float 2d point is required by the OpenCV API. //locate corners roughly bool _bChessBoardCornersFoundThisFrame = cv::findCirclesGrid ( cvmTmp, patternSize, vCurrentCorners, cv::CALIB_CB_ASYMMETRIC_GRID ); PRINT( vCurrentCorners.size() ); //CHECK ( _bChessBoardCornersFoundThisFrame, " No corners are found.\n" ); //store the corners inpto a std::vector pvv2DCorners_->push_back ( vCurrentCorners ); } } return; }