///Connect Signals and Slots only relevant for the graphical interface void gui_connections(Graphical_UI* gui, GraphManager* graph_mgr, OpenNIListener* listener) { QObject::connect(listener, SIGNAL(newVisualImage(QImage)), gui, SLOT(setVisualImage(QImage))); QObject::connect(listener, SIGNAL(newFeatureFlowImage(QImage)), gui, SLOT(setFeatureFlowImage(QImage))); QObject::connect(listener, SIGNAL(newDepthImage(QImage)), gui, SLOT(setDepthImage(QImage))); QObject::connect(graph_mgr, SIGNAL(sendFinished()), gui, SLOT(sendFinished())); QObject::connect(graph_mgr, SIGNAL(iamBusy(int, const char*, int)), gui, SLOT(showBusy(int, const char*, int))); QObject::connect(graph_mgr, SIGNAL(progress(int, const char*, int)), gui, SLOT(setBusy(int, const char*, int))); QObject::connect(graph_mgr, SIGNAL(setGUIInfo(QString)), gui, SLOT(setInfo(QString))); QObject::connect(graph_mgr, SIGNAL(setGUIStatus(QString)), gui, SLOT(setStatus(QString))); QObject::connect(gui, SIGNAL(printEdgeErrors(QString)), graph_mgr, SLOT(printEdgeErrors(QString))); QObject::connect(gui, SIGNAL(pruneEdgesWithErrorAbove(float)), graph_mgr, SLOT(pruneEdgesWithErrorAbove(float))); QObject::connect(gui, SIGNAL(clearClouds()), graph_mgr, SLOT(clearPointClouds())); if (ParameterServer::instance()->get<bool>("use_glwidget") && gui->getGLViewer() != NULL) { GLViewer* glv = gui->getGLViewer(); QObject::connect(graph_mgr, SIGNAL(setPointCloud(pointcloud_type *, QMatrix4x4)), glv, SLOT(addPointCloud(pointcloud_type *, QMatrix4x4)), Qt::BlockingQueuedConnection ); //Needs to block, otherwise the opengl list compilation makes the app unresponsive. This effectively throttles the processing rate though typedef const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >* cnst_ft_vectors; QObject::connect(graph_mgr, SIGNAL(setFeatures(const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >*)), glv, SLOT(addFeatures(const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >*))); //, Qt::DirectConnection); QObject::connect(graph_mgr, SIGNAL(setGraphEdges(const QList<QPair<int, int> >*)), glv, SLOT(setEdges(const QList<QPair<int, int> >*))); QObject::connect(graph_mgr, SIGNAL(updateTransforms(QList<QMatrix4x4>*)), glv, SLOT(updateTransforms(QList<QMatrix4x4>*))); QObject::connect(graph_mgr, SIGNAL(deleteLastNode()), glv, SLOT(deleteLastNode())); QObject::connect(graph_mgr, SIGNAL(resetGLViewer()), glv, SLOT(reset())); if(!ParameterServer::instance()->get<bool>("store_pointclouds")) { QObject::connect(glv, SIGNAL(cloudRendered(pointcloud_type const *)), graph_mgr, SLOT(clearPointCloud(pointcloud_type const *))); // } else if(ParameterServer::instance()->get<double>("voxelfilter_size") > 0.0) { QObject::connect(glv, SIGNAL(cloudRendered(pointcloud_type const *)), graph_mgr, SLOT(reducePointCloud(pointcloud_type const *))); // } } QObject::connect(listener, SIGNAL(setGUIInfo(QString)), gui, SLOT(setInfo(QString))); QObject::connect(listener, SIGNAL(setGUIStatus(QString)), gui, SLOT(setStatus(QString))); QObject::connect(graph_mgr, SIGNAL(setGUIInfo2(QString)), gui, SLOT(setInfo2(QString))); }
inline DesktopServerUI::DesktopServerUI() : impl(new Impl(this)) { const uint8 blankColor [] = { 0, 0, 0, 0 }; setColorImage(blankColor, 1, 1); const uint16 blankDepth [] = { 0 }; setDepthImage(blankDepth, 1, 1); }
void storeNuiDepth(void) { NUI_IMAGE_FRAME depthFrame; if(WAIT_OBJECT_0 != WaitForSingleObject(hNextDepthFrameEvent, 0)) return; HRESULT hr = pNuiSensor->NuiImageStreamGetNextFrame( pDepthStreamHandle, 0, &depthFrame ); if( FAILED( hr ) ){ return; } if(depthFrame.eImageType != NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX) STDERR("Depth type is not match with the depth and players\r\n"); INuiFrameTexture *pTexture = depthFrame.pFrameTexture; NUI_LOCKED_RECT LockedRect; pTexture->LockRect( 0, &LockedRect, NULL, 0 ); if( LockedRect.Pitch != 0 ){ unsigned short *pBuffer = (unsigned short *)LockedRect.pBits; memcpy(depth, LockedRect.pBits, pTexture->BufferLen()); #if defined(USE_FACETRACKER) setDepthImage(LockedRect.pBits, LockedRect.size); #endif NUI_SURFACE_DESC pDesc; pTexture->GetLevelDesc(0, &pDesc); //printf("w: %d, h: %d, byte/pixel: %d\r\n", pDesc.Width, pDesc.Height, LockedRect.Pitch/pDesc.Width); unsigned short *p = (unsigned short *)pBuffer; for(int i=0;i<pTexture->BufferLen()/2;i++){ //*p = (unsigned short)((*p & 0xff00)>>8) | ((*p & 0x00ff)<<8); // for test //*p = (unsigned short)((*p & 0xfff8)>>3); *p = (unsigned short)(NuiDepthPixelToDepth(*pBuffer)); p++; } glBindTexture(GL_TEXTURE_2D, bg_texture[DEPTH_TEXTURE]); glPixelStorei(GL_UNPACK_ALIGNMENT, 2); glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, pDesc.Width, pDesc.Height, 0, GL_LUMINANCE, GL_UNSIGNED_SHORT, pBuffer); pTexture->UnlockRect(0); } else{ STDERR("Buffer length of received texture is bogus\r\n"); } pNuiSensor->NuiImageStreamReleaseFrame( pDepthStreamHandle, &depthFrame ); }
const Img8u &ObjectEdgeDetectorCPU::calculate(const Img32f &depthImage, bool filter, bool average, bool gauss) { if (filter == false) { setFilteredDepthImage(depthImage); } else { setDepthImage(depthImage); applyMedianFilter(); } m_data->params.useNormalAveraging = average; m_data->params.useGaussSmoothing = gauss; applyNormalCalculation(); applyAngleImageCalculation(); applyImageBinarization(); return getBinarizedAngleImage(); }
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent), ui(new Ui::MainWindow), scaleFactor(1.0), currentX(0), currentY(0), colorViewType(COLOR_RGB), currentCamera{-1,-1}, workingDir(QDir::currentPath()), dmapSettingsModel(0, depthMapBuilder) { ui->setupUi(this); scaleStatusLabel = new QLabel(this); coordsStatusLabel = new QLabel(this); ui->statusbar->addWidget(scaleStatusLabel); ui->statusbar->addWidget(coordsStatusLabel); ui->imageLabel1->installEventFilter(this); ui->imageLabel1->setMouseTracking(true); ui->imageLabel2->installEventFilter(this); ui->imageLabel2->setMouseTracking(true); ui->depthMapLabel->installEventFilter(this); resize(QGuiApplication::primaryScreen()->availableSize() * 3 / 5); updateActions(); //initialze camera connections camera[0].setFrameCallback(std::bind(&FrameProcessor::setFrame, &frameProcessor[0], std::placeholders::_1)); converter[0].setFrameSource(frameProcessor[0]); connect(&converter[0], SIGNAL(imageReady(QImage)), this, SLOT(setImage1(QImage))); camera[1].setFrameCallback(std::bind(&FrameProcessor::setFrame, &frameProcessor[1], std::placeholders::_1)); converter[1].setFrameSource(frameProcessor[1]); connect(&converter[1], SIGNAL(imageReady(QImage)), this, SLOT(setImage2(QImage))); depthMapBuilder.setLeftSource(frameProcessor[0]); depthMapBuilder.setRightSource(frameProcessor[1]); converter[2].setFrameSource(depthMapBuilder); connect(&converter[2], SIGNAL(imageReady(QImage)), this, SLOT(setDepthImage(QImage))); converterThread[0].start(); converter[0].moveToThread(&converterThread[0]); converterThread[1].start(); converter[1].moveToThread(&converterThread[1]); converterThread[2].start(); converter[2].moveToThread(&converterThread[2]); ui->depthSettingsTableView->setModel(&dmapSettingsModel); ui->actionCameraView->setChecked(true); this->converter[0].pause(false); this->converter[1].pause(false); this->converter[2].pause(true); depthMapBuilder.stopProcessing(); ui->viewStackedWidget->setCurrentIndex(0); cloud.reset (new PointCloudT); viewer.reset (new pcl::visualization::PCLVisualizer ("viewer", false)); viewer->setBackgroundColor (0.1, 0.1, 0.1); viewer->addCoordinateSystem (1.0); ui->pc3dVtk->SetRenderWindow (viewer->getRenderWindow ()); viewer->setupInteractor (ui->pc3dVtk->GetInteractor (), ui->pc3dVtk->GetRenderWindow ()); ui->pc3dVtk->update (); //viewer->addPointCloud (cloud, "cloud"); //viewer->resetCamera (); //ui->pc3dVtk->update (); }
void DenseMap::processEvoSlamData(std::string inKFFileName, std::string ImageFileName, std::string outKFFileName) { // prepare rof instance ROF rofFilter(width, height); // get image name file list std::vector<std::string> files; getdir(ImageFileName.c_str(), files); // load KF track data std::vector<FrameInfo> KFInfo; loadKFData(inKFFileName, KFInfo); // open in and output file std::ifstream inFileKF(inKFFileName.c_str(), std::ios::binary); std::ofstream outFileKF(outKFFileName.c_str(), std::ios::binary); float* iDepthKF = new float[width*height]; float* iDepthVarKF = new float[width*height]; float* imageRGBKF = new float[width*height * 3]; float* iDepthKFFilled = new float[width * height]; float* iDepthDenoised = new float[width * height]; bool* idepthMask = new bool[width*height]; float* idepthWeight = new float[width * height]; float* pixelGradientWeight = new float[width*height]; FrameInfo Kftmp; int count = 0; while (!inFileKF.eof()) { Kftmp.id = -1; inFileKF.read((char*)&Kftmp, sizeof(FrameInfo)); printf("loading data from kf id: %d ... \n", Kftmp.id); if (Kftmp.id == -1) break; inFileKF.read((char*)iDepthKF, sizeof(float)* width * height); inFileKF.read((char*)iDepthVarKF, sizeof(float)* width * height); inFileKF.read((char*)imageRGBKF, sizeof(float)* width * height * 3); outFileKF.write((char*)&Kftmp, sizeof(FrameInfo)); // load scaled rotmat and transvec Eigen::Matrix3f KF2W_rotMat = KFInfo[count].rotMatUnscaled * KFInfo[count].scale; Eigen::Vector3f KF2W_transVec = KFInfo[count].transVec; // load kf image cv::Mat imageKF = cv::Mat(height, width, CV_8U); imageKF = cv::imread(files[KFInfo[count].id], CV_LOAD_IMAGE_GRAYSCALE); // printf("image loaded ... \n"); cv::Mat imageKF_filtered = cv::Mat(height, width, CV_8U); // load image for volume computation cv::Mat image = cv::Mat(height, width, CV_8U); cv::Mat image_filtered = cv::Mat(height, width, CV_8U); int d = 3; double sigmaColor = 2.0; double sigmaSpace = 2.0; if (doBilateralFilter) cv::bilateralFilter(imageKF, imageKF_filtered, d, sigmaColor, sigmaSpace); // printf("bilateral filtered for kf... \n"); if (count == 0) { image = cv::imread(files[KFInfo[count + 1].id], CV_LOAD_IMAGE_GRAYSCALE); if (doBilateralFilter) cv::bilateralFilter(image, image_filtered, d, sigmaColor, sigmaSpace); // printf("bilateral filtered for stereo kf... \n"); FrameInfo Ftmp = KFInfo[count + 1]; Eigen::Matrix3f W2F_rotMat = 1.0f / Ftmp.scale * Ftmp.rotMatUnscaled.transpose(); Eigen::Vector3f W2F_transVec = -W2F_rotMat * Ftmp.transVec; Eigen::Matrix3f KF2F_rotMat = W2F_rotMat * KF2W_rotMat; Eigen::Vector3f KF2F_transVec = W2F_rotMat * KF2W_transVec + W2F_transVec; if (doBilateralFilter) fillHolesCostMin(iDepthKF, iDepthKFFilled, iDepthVarKF, idepthMask, imageKF_filtered.data, image_filtered.data, KF2F_rotMat, KF2F_transVec); else fillHolesCostMin(iDepthKF, iDepthKFFilled, iDepthVarKF, idepthMask, imageKF.data, image.data, KF2F_rotMat, KF2F_transVec); } else { image = cv::imread(files[KFInfo[count - 1].id], CV_LOAD_IMAGE_GRAYSCALE); if (doBilateralFilter) cv::bilateralFilter(image, image_filtered, d, sigmaColor, sigmaSpace); FrameInfo Ftmp = KFInfo[count - 1]; Eigen::Matrix3f W2F_rotMat = 1.0f / Ftmp.scale * Ftmp.rotMatUnscaled.transpose(); Eigen::Vector3f W2F_transVec = -W2F_rotMat * Ftmp.transVec; Eigen::Matrix3f KF2F_rotMat = W2F_rotMat * KF2W_rotMat; Eigen::Vector3f KF2F_transVec = W2F_rotMat * KF2W_transVec + W2F_transVec; if (doBilateralFilter) fillHolesCostMin(iDepthKF, iDepthKFFilled, iDepthVarKF, idepthMask, imageKF_filtered.data, image_filtered.data, KF2F_rotMat, KF2F_transVec); else fillHolesCostMin(iDepthKF, iDepthKFFilled, iDepthVarKF, idepthMask, imageKF.data, image.data, KF2F_rotMat, KF2F_transVec); } printf("holes filled for kf id %d... \n", Kftmp.id); count++; // write image for debug cv::Mat imageToWrite = cv::Mat(height, width, CV_8U); std::stringstream ss; ss << count; setDepthImage(imageToWrite.data, iDepthKF); cv::imwrite("/home/evocloud/workspace_huang/EvoRecData/DepthOrg" + ss.str() + ".jpg", imageToWrite); setDepthImage(imageToWrite.data, iDepthKFFilled); cv::imwrite("/home/evocloud/workspace_huang/EvoRecData/DepthFilled" + ss.str() + ".jpg", imageToWrite); getDepthWeight(iDepthVarKF, idepthWeight); getPixelGradientWeight(imageKF.data, pixelGradientWeight); rofFilter.setMask(idepthMask); rofFilter.setWeight(idepthWeight); rofFilter.setGradientWeight(pixelGradientWeight); rofFilter.denoise(iDepthDenoised, iDepthKFFilled); setDepthImage(imageToWrite.data, iDepthDenoised); cv::imwrite("/home/evocloud/workspace_huang/EvoRecData/DepthDenoised" + ss.str() + ".jpg", imageToWrite); // fill idepthVarKF fillHolesVar(iDepthVarKF, 1.0f); outFileKF.write((char*)iDepthDenoised, sizeof(float)* width * height); outFileKF.write((char*)iDepthVarKF, sizeof(float)* width * height); outFileKF.write((char*)imageRGBKF, sizeof(float)* width * height * 3); } delete iDepthKF; delete iDepthVarKF; delete imageRGBKF; delete iDepthKFFilled; delete idepthMask; delete iDepthDenoised; delete idepthWeight; delete pixelGradientWeight; outFileKF.close(); }