int main(int argc, char * argv[]) { int width = 640; int height = 480; parse_argument(argc, argv, "-w", width); parse_argument(argc, argv, "-h", height); Resolution::getInstance(width, height); Bytef * decompressionBuffer = new Bytef[Resolution::getInstance().numPixels() * 2]; IplImage * deCompImage = 0; std::string logFile; assert(parse_argument(argc, argv, "-l", logFile) > 0 && "Please provide a log file"); RawLogReader logReader(decompressionBuffer, deCompImage, logFile, find_argument(argc, argv, "-f") != -1); cv::Mat1b tmp(height, width); cv::Mat3b depthImg(height, width); while(logReader.hasMore()) { logReader.getNext(); cv::Mat3b rgbImg(height, width, (cv::Vec<unsigned char, 3> *)logReader.deCompImage->imageData); cv::Mat1w depth(height, width, (unsigned short *)&decompressionBuffer[0]); cv::normalize(depth, tmp, 0, 255, cv::NORM_MINMAX, 0); cv::cvtColor(tmp, depthImg, CV_GRAY2RGB); cv::imshow("RGB", rgbImg); cv::imshow("Depth", depthImg); char key = cv::waitKey(1); if(key == 'q') break; else if(key == ' ') key = cv::waitKey(0); if(key == 'q') break; } delete [] decompressionBuffer; if(deCompImage) { cvReleaseImage(&deCompImage); } return 0; }
bool KinectCapture::GetDepthImage(cv::Mat & image) { NUI_IMAGE_FRAME imageFrame; NUI_LOCKED_RECT lockedRect; HRESULT hr; image = cv::Mat::zeros(m_ColorHeight, m_ColorWidth, CV_16U); // Get a depth frame from Kinect hr = m_NuiSensor->NuiImageStreamGetNextFrame(m_DepthStreamHandle, 30, &imageFrame); if (FAILED(hr)) { if (hr == E_NUI_FRAME_NO_DATA) { return false; } else { std::cout << "Kinect NuiImageStreamGetNextFrame call failed." << std::endl; } return false; } INuiFrameTexture* depthTexture = imageFrame.pFrameTexture; // Lock the frame data to access depth data hr = depthTexture->LockRect(0, &lockedRect, NULL, 0); if (FAILED(hr) || lockedRect.Pitch == 0) { std::cout << "Error getting depth texture pixels." << std::endl; return false; } // Copy the depth pixels so we can return the image frame errno_t err = memcpy_s(m_depthImagePixelBuffer, m_DepthImagePixels * sizeof(USHORT), lockedRect.pBits, depthTexture->BufferLen()); depthTexture->UnlockRect(0); if (0 != err) { std::cout << "Error copying depth texture pixels." << std::endl; return false; } // Release the Kinect camera frame m_NuiSensor->NuiImageStreamReleaseFrame(m_DepthStreamHandle, &imageFrame); if (FAILED(hr)) { std::cout << "Kinect Depth stream NuiImageStreamReleaseFrame call failed." << std::endl; return false; } cv::Mat depthImg(m_DepthHeight,m_DepthWidth,CV_16U, m_depthImagePixelBuffer); image = depthImg; return true; }
//--------------------------------------------------------------------------------------- int main(int argc, char** argv) { // Setup the kinect ethos::cpr::Kinect kinect; if (!kinect.Initialize()) { return -1; } bClicked = false; // The image holders cv::Mat depthImg, colorImg, skeletalImg, maskOverColorImg, maskImg, maskedDepthImg, labelImg; bool foundSkeleton; cv::Mat CroppedDepth, CroppedColor; cv::Mat CaliDepth; cv::Size dSize = cv::Size(320, 240); // 3-channel versions of image holders for depth and color // (skeleton already 3-channels) cv::Mat depthImg3, colorImg3; // Save images? const bool saveDebugImages = true; char imgFilename[1024*1024]; int imgFrameCounter = 0; if (saveDebugImages) { #ifdef USE_DEPTH_IMAGES _mkdir("depth"); #endif #ifdef USE_COLOR_IMAGES _mkdir("color"); #endif #ifdef USE_SKELETAL_IMAGES _mkdir("skeleton"); #endif } // Save joint values? const bool saveJoints = true; char jointFilename[1024]; int jointFrameCounter = 0; if (saveJoints) { #ifdef USE_SKELETAL_IMAGES _mkdir("joints"); #endif } // Grab images until user says to stop while (true) { #ifdef USE_DEPTH_IMAGES // Grab next frames from the kinect if (!kinect.GetDepthImage(depthImg)) { break; } cv::imshow("Depth", depthImg); #endif #ifdef USE_COLOR_IMAGES if (!kinect.GetColorImage(colorImg)) { break; } cv::imshow("Color", colorImg); #endif #ifdef USE_SKELETAL_IMAGES if (!kinect.GetSkeletalImage(skeletalImg, &foundSkeleton)) { break; } //const ethos::cpr::SkeletonJoints& joints = kinect.GetSkeletalJoints(); cv::imshow("Skeleton", skeletalImg); #endif #ifdef USE_COLOR_AND_DEPTH_IMAGES if (!kinect.GetDepthImage(depthImg)) { break; } cv::imshow("Depth", depthImg); if (!kinect.GetColorImage(colorImg)) { break; } cv::imshow("Color", colorImg); #endif cvSetMouseCallback( "Depth", on_mouse, 0 ); /* *************************************************************************************************** //Calibration { cv::Point3f depthPt; cv::Point3f colorPt; CaliDepth = depthImg; CaliDepth.setTo(0); for (int j = 0; j < depthImg.rows; j ++) { uchar* data_depth= depthImg.ptr<uchar>(j); for (int i = 0; i < depthImg.cols; i ++) { depthPt.z = data_depth[i] * 5.0f; //z, according to the previous convert factor: depthF /= 1300.0f; depthF *= 255.0f. Revert back; depthPt.x = depthPt.z * (i - 324.3) / 526.7; //x depthPt.y = depthPt.z * (i - 247.8) / 525.8; //y //convert to color camera 2D colorPt.x = 509.98f * depthPt.x - 18.124f * depthPt.y + 349.569f * depthPt.z - 11661.2f; //x colorPt.y = -11.765f * depthPt.x + 512.72f * depthPt.y + 273.624f * depthPt.z + 153.29f; //y colorPt.z = -0.0496f * depthPt.x - 0.0502f * depthPt.y + 0.9975f * depthPt.z + 7.4660f; //w colorPt.x = (int)(colorPt.x / colorPt.z); //normalize by w colorPt.y = (int)(colorPt.y / colorPt.z); //normalize by w //check boundary if (colorPt.x > depthImg.cols || colorPt.x < 0 || colorPt.y > depthImg.cols || colorPt.y < 0) continue; int ii = colorPt.y; //ii is the transformed x in color image int jj = colorPt.x; //jj is the transformed y in color image CaliDepth.ptr<uchar>(jj)[ii] = data_depth[i]; //assign the same depth value from the original depth image with a new position (ii,jj) } } cv::imshow("Cali_Depth", CaliDepth ); } *************************************************************************************************** */ LONG m_depthWidth = 640; LONG m_depthHeight = 480; LONG m_colorWidth = 640; LONG m_colorHeight = 480; LONG* m_colorCoordinates = kinect.GetColorCoordinates(); cv::Mat display(480,640,CV_8UC3,cv::Scalar(0)); // loop over each row and column of the color for (LONG y = 0; y < m_colorHeight; ++y) { LONG* pDest = (LONG*)((BYTE*)colorImg.data + colorImg.cols * y); unsigned char* pColor = display.ptr<unsigned char>(y); for (LONG x = 0; x < m_colorWidth; ++x, pColor += 3) { // calculate index into depth array int depthIndex = x + y * m_depthWidth; // retrieve the depth to color mapping for the current depth pixel LONG colorInDepthX = m_colorCoordinates[depthIndex * 2]; LONG colorInDepthY = m_colorCoordinates[depthIndex * 2 + 1]; // make sure the depth pixel maps to a valid point in color space if ( colorInDepthX >= 0 && colorInDepthX < m_colorWidth && colorInDepthY >= 0 && colorInDepthY < m_colorHeight ) { // calculate index into color array LONG colorIndex = colorInDepthX + colorInDepthY * m_colorWidth; // set source for copy to the color pixel LONG* pSrc = (LONG *)(BYTE*)colorImg.data + colorIndex; *pDest = *pSrc; } else { *pDest = 0; } // Fill-in color image { LONG val = *pDest; unsigned char* pVal = (unsigned char*)&val; pColor[0] = *pVal++; pColor[1] = *pVal++; pColor[2] = *pVal++; } pDest++; } } cv::imshow("Display", display); cv::waitKey(); //set Callback function, only call once per run if (bClicked == true) { //We start to process the image cv::Point leftUpCorner = ROI_Vertices[0]; cv::Point rightDownCorner = ROI_Vertices[1]; cv::Rect myROI(leftUpCorner.x, leftUpCorner.y, rightDownCorner.x - leftUpCorner.x, rightDownCorner.y - leftUpCorner.y); CroppedDepth = depthImg(myROI); CroppedColor = colorImg(myROI); //cv::bilateralFilter(CroppedDepth, CroppedDepth, CV_BILATERAL, 3, 0); for (int i = 0; i < 2; i ++) { cv::GaussianBlur(CroppedDepth, CroppedDepth, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT); } cv::imshow("new_Depth", CroppedDepth ); cv::imshow("new_Color", CroppedColor); /* std::vector<cv::Point> ROI_Poly; cv::approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, true); cv::fillConvexPoly(maskImg, &ROI_Poly[0], ROI_Poly.size(), cv::Scalar(255,255,255)); colorImg.copyTo(maskOverColorImg, maskImg); inRange(maskOverColorImg, cv::Scalar(15, 15, 15), cv::Scalar(124, 154, 95), maskedDepthImg); //inRange(maskOverColorImg, cv::Scalar(50, 65, 40), cv::Scalar(124, 154, 95), maskedDepthImg); cv::imshow("Mask", maskOverColorImg); int erosion_size = 1; cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1), cv::Point(erosion_size, erosion_size) ); for (int j=0; j<maskedDepthImg.rows; j++) { uchar* data_depth= maskedDepthImg.ptr<uchar>(j); uchar* data_mask = maskImg.ptr<uchar>(j); for (int i=0; i<maskedDepthImg.cols; i++) { if (data_depth[i] == data_mask[i]) { data_depth[i] = 0; } else { data_depth[i] = 255; } } } for (int i = 0 ; i < 4; i ++) cv::erode(maskedDepthImg, maskedDepthImg, element); for (int i = 0; i < 5; i ++) cv::dilate(maskedDepthImg, maskedDepthImg, element); std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy; findContours( maskedDepthImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE); if( !contours.empty() && !hierarchy.empty() ) { for (int idx=0;idx < contours.size();idx++) { drawContours(maskedDepthImg,contours,idx,cv::Scalar::all(255),CV_FILLED,8); } } cv::imshow("Depth_Mask", maskedDepthImg); maskImg.setTo(0); depthImg.copyTo(maskImg, maskedDepthImg); cv::imshow("Final", maskImg); resize(maskImg, maskImg, dSize); */ } // (Optionally) save output debugging images if (saveDebugImages) { #ifdef USE_DEPTH_IMAGES sprintf(imgFilename, "depth/depthFrame_%04d.png", imgFrameCounter); prepareToSave(depthImg, depthImg3); cv::imwrite(imgFilename, depthImg3); #endif #ifdef USE_COLOR_IMAGES sprintf(imgFilename, "color/colorFrame_%04d.png", imgFrameCounter); prepareToSave(colorImg, colorImg3); cv::imwrite(imgFilename, colorImg3); #endif #ifdef USE_SKELETAL_IMAGES sprintf(imgFilename, "skeleton/skeletonFrame_%04d.png", imgFrameCounter); cv::imwrite(imgFilename, skeletalImg); #endif #ifdef USE_COLOR_AND_DEPTH_IMAGES int key = cv::waitKey(1); if (key == 's') //if (bClicked == true) { // sprintf(imgFilename, "depth/depthFrame_%04d.png", imgFrameCounter); // prepareToSave(depthImg, depthImg3); // cv::imwrite(imgFilename, depthImg3); sprintf(imgFilename, "depth_mask/depthFrame_%04d.jpg", imgFrameCounter); prepareToSave(CroppedDepth, depthImg3); cv::imwrite(imgFilename, depthImg3); sprintf(imgFilename, "color/colorFrame_%04d.png", imgFrameCounter); prepareToSave(CroppedColor, colorImg3); cv::imwrite(imgFilename, colorImg3); } #endif ++imgFrameCounter; } // (Optionally) save joint values //if (saveJoints && foundSkeleton) //{ #ifdef USE_SKELETAL_IMAGES sprintf(jointFilename, "joints/jointFrame_%04d.txt", jointFrameCounter); std::ofstream ofs(jointFilename); kinect.SaveSkeletalJoints(ofs); ofs.close(); #endif // ++jointFrameCounter; // } // Check for user keyboard input to quit early int key = cv::waitKey(1); if (key == 'q') { break; } } // Exit application return 0; }
void MainWindow::timerCallback() { int lastDepth = logger->latestDepthIndex.getValue(); if(lastDepth == -1) { return; } int bufferIndex = lastDepth % 10; if(bufferIndex == lastDrawn) { return; } if(lastFrameTime == logger->frameBuffers[bufferIndex].second) { return; } memcpy(&depthBuffer[0], logger->frameBuffers[bufferIndex].first.first, 640 * 480 * 2); memcpy(rgbImage.bits(), logger->frameBuffers[bufferIndex].first.second, 640 * 480 * 3); cv::Mat1w depth(480, 640, (unsigned short *)&depthBuffer[0]); normalize(depth, tmp, 0, 255, cv::NORM_MINMAX, 0); cv::Mat3b depthImg(480, 640, (cv::Vec<unsigned char, 3> *)depthImage.bits()); cv::cvtColor(tmp, depthImg, CV_GRAY2RGB); painter->setPen(recording ? Qt::red : Qt::green); painter->setFont(QFont("Arial", 30)); painter->drawText(10, 50, recording ? "Recording" : "Viewing"); frameStats.push_back(abs(logger->frameBuffers[bufferIndex].second - lastFrameTime)); if(frameStats.size() > 15) { frameStats.erase(frameStats.begin()); } int64_t speedSum = 0; for(unsigned int i = 0; i < frameStats.size(); i++) { speedSum += frameStats[i]; } int64_t avgSpeed = (float)speedSum / (float)frameStats.size(); float fps = 1.0f / ((float)avgSpeed / 1000000.0f); fps = floor(fps * 10.0f); fps /= 10.0f; std::stringstream str; str << fps << "fps"; lastFrameTime = logger->frameBuffers[bufferIndex].second; painter->setFont(QFont("Arial", 24)); painter->drawText(10, 455, QString::fromStdString(str.str())); depthLabel->setPixmap(QPixmap::fromImage(depthImage)); imageLabel->setPixmap(QPixmap::fromImage(rgbImage)); }
void MainWindow::timerCallback() { int64_t usedMemory = MemoryBuffer::getUsedSystemMemory(); int64_t totalMemory = MemoryBuffer::getTotalSystemMemory(); int64_t processMemory = MemoryBuffer::getProcessMemory(); float usedGB = (usedMemory / (float)1073741824); float totalGB = (totalMemory / (float)1073741824); #ifdef __APPLE__ float processGB = (processMemory / (float)1073741824); #else float processGB = (processMemory / (float)1048576); #endif QString memoryInfo = QString::number(usedGB, 'f', 2) + "/" + QString::number(totalGB, 'f', 2) + "GB memory used, " + QString::number(processGB, 'f', 2) + "GB by Logger2"; memoryStatus->setText(memoryInfo); if(!logger) { if(frameStats.size() >= 15) { logger = new Logger2(width, height, fps, tcp); if(!logger->getOpenNI2Interface()->ok()) { memset(depthImage.bits(), 0, width * height * 3); painter->setPen(Qt::red); painter->setFont(QFont("Arial", 30)); painter->drawText(10, 50, "Attempting to start OpenNI2... failed!"); depthLabel->setPixmap(QPixmap::fromImage(depthImage)); QMessageBox msgBox; msgBox.setText("Sorry, OpenNI2 is having trouble (it's still in beta). Please try running Logger2 again."); msgBox.setDetailedText(QString::fromStdString(logger->getOpenNI2Interface()->error())); msgBox.exec(); exit(-1); } else { memset(depthImage.bits(), 0, width * height * 3); painter->setPen(Qt::green); painter->setFont(QFont("Arial", 30)); painter->drawText(10, 50, "Starting stream..."); depthLabel->setPixmap(QPixmap::fromImage(depthImage)); } return; } else { frameStats.push_back(0); return; } } int lastDepth = logger->getOpenNI2Interface()->latestDepthIndex.getValue(); if(lastDepth == -1) { return; } int bufferIndex = lastDepth % OpenNI2Interface::numBuffers; if(bufferIndex == lastDrawn) { return; } if(lastFrameTime == logger->getOpenNI2Interface()->frameBuffers[bufferIndex].second) { return; } memcpy(&depthBuffer[0], logger->getOpenNI2Interface()->frameBuffers[bufferIndex].first.first, width * height * 2); if(!(tcp && recording)) { memcpy(rgbImage.bits(), logger->getOpenNI2Interface()->frameBuffers[bufferIndex].first.second, width * height * 3); } cv::Mat1w depth(height, width, (unsigned short *)&depthBuffer[0]); normalize(depth, tmp, 0, 255, cv::NORM_MINMAX, 0); cv::Mat3b depthImg(height, width, (cv::Vec<unsigned char, 3> *)depthImage.bits()); cv::cvtColor(tmp, depthImg, CV_GRAY2RGB); painter->setPen(recording ? Qt::red : Qt::green); painter->setFont(QFont("Arial", 30)); painter->drawText(10, 50, recording ? (tcp ? "Streaming & Recording" : "Recording") : "Viewing"); frameStats.push_back(abs(logger->getOpenNI2Interface()->frameBuffers[bufferIndex].second - lastFrameTime)); if(frameStats.size() > 15) { frameStats.erase(frameStats.begin()); } int64_t speedSum = 0; for(unsigned int i = 0; i < frameStats.size(); i++) { speedSum += frameStats[i]; } int64_t avgSpeed = (float)speedSum / (float)frameStats.size(); float fps = 1.0f / ((float)avgSpeed / 1000000.0f); fps = floor(fps * 10.0f); fps /= 10.0f; std::stringstream str; str << (int)ceil(fps) << "fps"; lastFrameTime = logger->getOpenNI2Interface()->frameBuffers[bufferIndex].second; painter->setFont(QFont("Arial", 24)); #ifdef __APPLE__ int offset = 20; #else int offset = 10; #endif painter->drawText(10, height - offset, QString::fromStdString(str.str())); if(tcp) { cv::Mat3b modelImg(height / 4, width / 4); cv::Mat3b modelImgBig(height, width, (cv::Vec<unsigned char, 3> *)rgbImage.bits()); std::string dataStr = comms->tryRecv(); if(dataStr.length()) { std::vector<char> data(dataStr.begin(), dataStr.end()); modelImg = cv::imdecode(cv::Mat(data), 1); cv::Size bigSize(width, height); cv::resize(modelImg, modelImgBig, bigSize, 0, 0); } } depthLabel->setPixmap(QPixmap::fromImage(depthImage)); imageLabel->setPixmap(QPixmap::fromImage(rgbImage)); if(logger->getMemoryBuffer().memoryFull.getValue()) { assert(recording); recordToggle(); QMessageBox msgBox; msgBox.setText("Recording has been automatically stopped to prevent running out of system memory."); msgBox.exec(); } std::pair<bool, int64_t> dropping = logger->dropping.getValue(); if(!tcp && dropping.first) { assert(recording); recordToggle(); std::stringstream strs; strs << "Recording has been automatically stopped. Logging experienced a jump of " << dropping.second / 1000 << "ms, indicating a drop below 10fps. Please try enabling compression or recording to RAM to prevent this."; QMessageBox msgBox; msgBox.setText(QString::fromStdString(strs.str())); msgBox.exec(); } }
int main(int argc, char * argv[]) { int width = 640; int height = 480; Resolution::getInstance(width, height); Intrinsics::getInstance(528, 528, 320, 240); cv::Mat intrinsicMatrix = cv::Mat(3,3,CV_64F); intrinsicMatrix.at<double>(0,0) = Intrinsics::getInstance().fx(); intrinsicMatrix.at<double>(1,1) = Intrinsics::getInstance().fy(); intrinsicMatrix.at<double>(0,2) = Intrinsics::getInstance().cx(); intrinsicMatrix.at<double>(1,2) = Intrinsics::getInstance().cy(); intrinsicMatrix.at<double>(0,1) =0; intrinsicMatrix.at<double>(1,0) =0; intrinsicMatrix.at<double>(2,0) =0; intrinsicMatrix.at<double>(2,1) =0; intrinsicMatrix.at<double>(2,2) =1; Bytef * decompressionBuffer = new Bytef[Resolution::getInstance().numPixels() * 2]; IplImage * deCompImage = 0; std::string logFile="/home/lili/Kinect_Logs/2015-11-05.00.klg"; // assert(pcl::console::parse_argument(argc, argv, "-l", logFile) > 0 && "Please provide a log file"); RawLogReader logReader(decompressionBuffer, deCompImage, logFile, true); cv::Mat1b tmp(height, width); cv::Mat3b depthImg(height, width); PlaceRecognition placeRecognition(&intrinsicMatrix); iSAMInterface iSAM; //Keyframes KeyframeMap map(true); Eigen::Vector3f lastPlaceRecognitionTrans = Eigen::Vector3f::Zero(); Eigen::Matrix3f lastPlaceRecognitionRot = Eigen::Matrix3f::Identity(); int64_t lastTime = 0; OdometryProvider * odom = 0; //int frame_index = 0; // uint64_t timestamp; /*if(true) { odom = new FOVISOdometry; if(logReader.hasMore()) { logReader.getNext(); Eigen::Matrix3f Rcurr = Eigen::Matrix3f::Identity(); Eigen::Vector3f tcurr = Eigen::Vector3f::Zero(); odom->getIncrementalTransformation(tcurr, Rcurr, logReader.timestamp, (unsigned char *)logReader.deCompImage->imageData, (unsigned short *)&decompressionBuffer[0]); } }*/ //else // { odom = new DVOdometry; if(logReader.hasMore()) { logReader.getNext(); DVOdometry * dvo = static_cast<DVOdometry *>(odom); dvo->firstRun((unsigned char *)logReader.deCompImage->imageData, (unsigned short *)&decompressionBuffer[0]); } //} ofstream fout1("camera_pose_DVOMarch28.txt"); ofstream fout2("camera_pose_KeyframeMotionMetric0.1March28.txt"); ofstream fout3("loop_closure_transformationMarch28.txt"); ofstream fout4("camera_pose_after_optimizationMarch28.txt"); ofstream fout5("camera_pose_after_optimizationMarch28DVOCov.txt"); ofstream fout6("camera_pose_after_optimizationMarch28DVOLoopTransCov.txt"); /* pcl::visualization::PCLVisualizer cloudViewer; cloudViewer.setBackgroundColor(1, 1, 1); cloudViewer.initCameraParameters(); cloudViewer.addCoordinateSystem(0.1, 0, 0, 0); */ //pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> color(cloud->makeShared()); //cloudViewer.addPointCloud<pcl::PointXYZRGB>(cloud->makeShared(), color, "Cloud Viewer"); int loopClosureCount=0; while(logReader.hasMore()) { logReader.getNext(); cv::Mat3b rgbImg(height, width, (cv::Vec<unsigned char, 3> *)logReader.deCompImage->imageData); cv::Mat1w depth(height, width, (unsigned short *)&decompressionBuffer[0]); cv::normalize(depth, tmp, 0, 255, cv::NORM_MINMAX, 0); cv::cvtColor(tmp, depthImg, CV_GRAY2RGB); cv::imshow("RGB", rgbImg); cv::imshow("Depth", depthImg); char key = cv::waitKey(1); if(key == 'q') { break; } else if(key == ' ') { key = cv::waitKey(0); } if(key == 'q') { break; } Eigen::Matrix3f Rcurr = Eigen::Matrix3f::Identity(); Eigen::Vector3f tcurr = Eigen::Vector3f::Zero(); // #1 odom->getIncrementalTransformation(tcurr, Rcurr, logReader.timestamp, (unsigned char *)logReader.deCompImage->imageData, (unsigned short *)&decompressionBuffer[0]); fout1<<tcurr[0]<<" "<<tcurr[1]<<" "<<tcurr[2]<<" "<<Rcurr(0,0)<<" "<<Rcurr(0,1)<<" "<<Rcurr(0,2)<<" "<<Rcurr(1,0)<<" "<<Rcurr(1,1)<<" "<<Rcurr(1,2)<<" "<<Rcurr(2,0)<<" "<<Rcurr(2,1)<<" "<<Rcurr(2,2)<<endl; Eigen::Matrix3f Rdelta = Rcurr.inverse() * lastPlaceRecognitionRot; Eigen::Vector3f tdelta = tcurr - lastPlaceRecognitionTrans; //Eigen::MatrixXd covariance = odom->getCovariance(); //Eigen::MatrixXd covariance=Eigen::Matrix<double, 6, 6>::Identity()* 1e-3; if((Projection::rodrigues2(Rdelta).norm() + tdelta.norm()) >= 0.1) { Eigen::MatrixXd covariance = odom->getCovariance(); iSAM.addCameraCameraConstraint(lastTime, logReader.timestamp, lastPlaceRecognitionRot, lastPlaceRecognitionTrans, Rcurr, tcurr); //covariance); printCovariance(fout5, covariance); lastTime = logReader.timestamp; lastPlaceRecognitionRot = Rcurr; lastPlaceRecognitionTrans = tcurr; cout<<"before add keyframe"<<endl; // #2 map.addKeyframe((unsigned char *)logReader.deCompImage->imageData, (unsigned short *)&decompressionBuffer[0], Rcurr, tcurr, logReader.timestamp); fout2<<tcurr[0]<<" "<<tcurr[1]<<" "<<tcurr[2]<<" "<<Rcurr(0,0)<<" "<<Rcurr(0,1)<<" "<<Rcurr(0,2)<<" "<<Rcurr(1,0)<<" "<<Rcurr(1,1)<<" "<<Rcurr(1,2)<<" "<<Rcurr(2,0)<<" "<<Rcurr(2,1)<<" "<<Rcurr(2,2)<<endl; /* //Save keyframe { cv::Mat3b rgbImgKeyframe(height, width, (cv::Vec<unsigned char, 3> *)logReader.deCompImage->imageData); cv::Mat1w depthImgKeyframe(height, width, (unsigned short *)&decompressionBuffer[0]); //save keyframe depth char fileName[1024] = {NULL}; sprintf(fileName, "keyframe_depth_%06d.png", frame_index); cv::imwrite(fileName, depthImgKeyframe); //save keyframe rgb sprintf(fileName, "keyframe_rgb_%06d.png", frame_index); cv::imwrite(fileName, rgbImgKeyframe); frame_index ++; } */ int64_t matchTime; Eigen::Matrix4d transformation; // Eigen::MatrixXd cov(6,6); //isam::Covariance(0.001 * Eigen::Matrix<double, 6, 6>::Identity())) Eigen::MatrixXd cov=0.001 * Eigen::Matrix<double, 6, 6>::Identity(); cout<<"map.addKeyframe is OK"<<endl; // #3 if(placeRecognition.detectLoop((unsigned char *)logReader.deCompImage->imageData, (unsigned short *)&decompressionBuffer[0], logReader.timestamp, matchTime, transformation, cov, loopClosureCount)) { //printCovariance(fout6, cov); cout<<"logReader.timestamp "<<logReader.timestamp<<endl; cout<<"matchTime "<<matchTime<<endl; /* transformation << -0.2913457145219732, 0.228056050293173, -0.9290361201559172, 2.799184934345601, 0.6790194052589797, 0.7333821627861707, -0.03291277242681545, 1.310438143604587, 0.673832562222562, -0.6404225489719699, -0.3685222338703895, 6.988973505496276, 0, 0, 0, 0.999999999999998; */ /* transformation << 0.9998996846969838, 0.003948215234314986, -0.01360265192291004, 0.05847011404293689, -0.004032877285312574, 0.9999726343121815, -0.006202138950136233, 0.04528938486109094, 0.01357779229749574, 0.006256374606648019, 0.9998882444218992, 0.02203456132723125, 0, 0, 0, 1; */ iSAM.addLoopConstraint(logReader.timestamp, matchTime, transformation);//, cov); fout3<<transformation(0,0)<<" "<<transformation(0,1)<<" "<<transformation(0,2)<<" "<<transformation(0,3)<<" "<<transformation(1,0)<<" "<<transformation(1,1)<<" "<<transformation(1,2)<<" "<<transformation(1,3)<<" "<<transformation(2,0)<<" "<<transformation(2,1)<<" "<<transformation(2,2)<<" "<<transformation(2,3)<<" "<<transformation(3,0)<<" "<<transformation(3,1)<<" "<<transformation(3,2)<<" "<<transformation(3,3)<<endl; loopClosureCount++; } } if(loopClosureCount>=1) { break; } } /* for(int i=0; i<loopClosureCount;i++) { iSAM.addLoopConstraint(placeRecognition.loopClosureConstraints.at(i)->time1, placeRecognition.loopClosureConstraints.at(i)->time2, placeRecognition.loopClosureConstraints.at(i)->constraint); }*/ std::vector<std::pair<uint64_t, Eigen::Matrix4f> > posesBefore; iSAM.getCameraPoses(posesBefore); cout<<"It works good before optimization"<<endl; // #4 double residual =iSAM.optimise(); cout<<"It works good after optimize and before map.applyPoses"<<endl; // map.applyPoses(isam); //cout<<"It works good before *cloud=map.getMap and after map.applyPoses(isam)"<<endl; /* pcl::PointCloud<pcl::PointXYZRGB> *cloud = map.getMap(); // Write it back to disk under a different name. // Another possibility would be "savePCDFileBinary()". cout<<"before storing the point cloud map"<<endl; pcl::io::savePCDFileASCII ("outputCloudMap03DVODensity005.pcd", *cloud); cout << "Saved data points to outputMap.pcd." << std::endl; cout<<"copy data into octomap..."<<endl; octomap::ColorOcTree tree( 0.05 ); for (size_t i=0; i<(*cloud).points.size(); i++) { // 将点云里的点插入到octomap中 tree.updateNode( octomap::point3d((*cloud).points[i].x, (*cloud).points[i].y, (*cloud).points[i].z), true ); } for (size_t i=0; i<(*cloud).points.size(); i++) { tree.integrateNodeColor( (*cloud).points[i].x, (*cloud).points[i].y, (*cloud).points[i].z, (*cloud).points[i].r, (*cloud).points[i].g, (*cloud).points[i].b); } tree.updateInnerOccupancy(); tree.write("OctomapColorLab03DVODensity005.ot"); cout<<"please see the done."<<endl; */ //pcl::visualization::PCLVisualizer cloudViewer; // cloudViewer.setBackgroundColor(1, 1, 1); //cloudViewer.initCameraParameters(); // cloudViewer.addCoordinateSystem(0.1, 0, 0, 0); //pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> color(cloud->makeShared()); //cloudViewer.addPointCloud<pcl::PointXYZRGB>(cloud->makeShared(), color, "Cloud Viewer"); std::vector<std::pair<uint64_t, Eigen::Matrix4f> > newPoseGraph; iSAM.getCameraPoses(newPoseGraph); /* for(unsigned int i = 0; i < newPoseGraph.size(); i++) { // file << std::setprecision(6) << std::fixed << (double)newPoseGraph.at(i).first / 1000000.0 << " "; Eigen::Vector3f trans = newPoseGraph.at(i).second.topRightCorner(3, 1); Eigen::Matrix3f rot = newPoseGraph.at(i).second.topLeftCorner(3, 3); fout4 << trans(0) << " " << trans(1) << " " << trans(2) << " "; Eigen::Quaternionf currentCameraRotation(rot); //file << currentCameraRotation.x() << " " << currentCameraRotation.y() << " " << currentCameraRotation.z() << " " << currentCameraRotation.w() << "\n"; }*/ for(std::vector<std::pair<uint64_t, Eigen::Matrix4f> >::iterator ite=newPoseGraph.begin(); ite!=newPoseGraph.end(); ite++) { Eigen::Matrix3f Roptimized; Roptimized<<ite->second(0,0), ite->second(0,1), ite->second(0,2), ite->second(1,0), ite->second(1,1), ite->second(1,2), ite->second(2,0), ite->second(2,1), ite->second(2,2); Eigen::Quaternionf quatOptimized(Roptimized); fout4<<ite->second(0,3)<<" "<<ite->second(1,3)<<" "<<ite->second(2,3)<<" "<<quatOptimized.w()<<" "<<quatOptimized.x()<<" "<<quatOptimized.y()<<" "<<quatOptimized.z()<<endl; } cout<<"The number of optimized poses"<<newPoseGraph.size()<<endl; // drawPoses(poses, cloudViewer, 1.0, 0, 0); //drawPoses(posesBefore, cloudViewer, 0, 0, 1.0); //cloudViewer.spin(); delete [] decompressionBuffer; return 0; }