Ejemplo n.º 1
0
 void
 extractLibmvReconstructionData(InputOutputArray K,
                                OutputArray Rs,
                                OutputArray Ts,
                                OutputArray points3d)
 {
   getCameras(Rs, Ts);
   getPoints(points3d);
   getIntrinsics().copyTo(K.getMat());
 }
Ejemplo n.º 2
0
void Scene::render(Renderer* renderer)
{
    auto director = Director::getInstance();
    Camera* defaultCamera = nullptr;
    const auto& transform = getNodeToParentTransform();

    for (const auto& camera : getCameras())
    {
        if (!camera->isVisible())
            continue;
        
        Camera::_visitingCamera = camera;
        if (Camera::_visitingCamera->getCameraFlag() == CameraFlag::DEFAULT)
        {
            defaultCamera = Camera::_visitingCamera;
        }
        
        director->pushMatrix(MATRIX_STACK_TYPE::MATRIX_STACK_PROJECTION);
        director->loadMatrix(MATRIX_STACK_TYPE::MATRIX_STACK_PROJECTION, Camera::_visitingCamera->getViewProjectionMatrix());
        camera->apply();
        //clear background with max depth
        camera->clearBackground();
        //visit the scene
        visit(renderer, transform, 0);
#if CC_USE_NAVMESH
        if (_navMesh && _navMeshDebugCamera == camera)
        {
            _navMesh->debugDraw(renderer);
        }
#endif
        
        renderer->render();
        
        director->popMatrix(MATRIX_STACK_TYPE::MATRIX_STACK_PROJECTION);
    }
    
#if CC_USE_3D_PHYSICS && CC_ENABLE_BULLET_INTEGRATION
    if (_physics3DWorld && _physics3DWorld->isDebugDrawEnabled())
    {
        director->pushMatrix(MATRIX_STACK_TYPE::MATRIX_STACK_PROJECTION);
        director->loadMatrix(MATRIX_STACK_TYPE::MATRIX_STACK_PROJECTION, _physics3dDebugCamera != nullptr ? _physics3dDebugCamera->getViewProjectionMatrix() : defaultCamera->getViewProjectionMatrix());
        _physics3DWorld->debugDraw(renderer);
        renderer->render();
        director->popMatrix(MATRIX_STACK_TYPE::MATRIX_STACK_PROJECTION);
    }
#endif

    Camera::_visitingCamera = nullptr;
    experimental::FrameBuffer::applyDefaultFBO();
}
Ejemplo n.º 3
0
AScene3D *ASceneDecoder::createNewSceneObject()
{
  DEBUG_OUT<<"ASceneDecoder::createNewSceneObject() starting...\n";
  AScene3D *s=new AScene3D();
  DEBUG_OUT<<"Meshes...\n";
  s->setMeshes((AMesh3D **)allocArrayBigEnoughForList(getMeshes()));
  s->setNumMeshes(buildArrayFromList((void **)s->getMeshes(),getMeshes()));
  DEBUG_OUT<<"Lights...\n";
  s->setLights((ALight3D **)allocArrayBigEnoughForList(getLights()));
  s->setNumLights(buildArrayFromList((void **)s->getLights(),getLights()));
  DEBUG_OUT<<"Cameras...\n";
  s->setCameras((ACamera3D **)allocArrayBigEnoughForList(getCameras()));
  s->setNumCameras(buildArrayFromList((void **)s->getCameras(),getCameras()));
  //
  //DEBUG_OUT<<"Sky...\n";
  //theSky.surf=readFile->getTheSky()->surf;
  //theSky.fRed=readFile->getTheSky()->fRed;
  //theSky.fBlue=readFile->getTheSky()->fBlue;
  //theSky.fGreen=readFile->getTheSky()->fGreen;
  //DEBUG_OUT<<"Ground...\n";
  //createGroundMesh(readFile->getTheGround());
  DEBUG_OUT<<"Done.\n";
  return s;
}
void MainWindow::on_pb_savecloud_clicked()
{
    std::vector<cv::Point3d> save_point = getPointCloud();
    std::vector<cv::Vec3b> save_rgb = getPointCloudRGB();
    std::vector<cv::Matx34d> save_camera = getCameras();
    cv::Matx34d tmp;
    cv::FileStorage fs("/home/sy/set/data/savingcoud.yml", cv::FileStorage::WRITE);
    for(int i = 0; i < filelist.size(); i++)
    {
        write( fs , "frame" + QString::number(i).toStdString(), Mat(save_camera.at(i)));
    }
    cout  << "saving camera finished " << endl;
    cv::write(fs,"points",save_point);
    cout  << "saving cloud finished " << endl;
    cv::write(fs,"rgb",save_rgb);
    cout  << "saving rgb finished " << endl;


    fs.release();

}
Ejemplo n.º 5
0
void EventDispatcher::dispatchTouchEventToListeners(EventListenerVector* listeners, const std::function<bool(EventListener*)>& onEvent)
{
    bool shouldStopPropagation = false;
    auto fixedPriorityListeners = listeners->getFixedPriorityListeners();
    auto sceneGraphPriorityListeners = listeners->getSceneGraphPriorityListeners();
    
    ssize_t i = 0;
    // priority < 0
    if (fixedPriorityListeners)
    {
        CCASSERT(listeners->getGt0Index() <= static_cast<ssize_t>(fixedPriorityListeners->size()), "Out of range exception!");
        
        if (!fixedPriorityListeners->empty())
        {
            for (; i < listeners->getGt0Index(); ++i)
            {
                auto l = fixedPriorityListeners->at(i);
                if (l->isEnabled() && !l->isPaused() && l->isRegistered() && onEvent(l))
                {
                    shouldStopPropagation = true;
                    break;
                }
            }
        }
    }
    
    auto scene = Director::getInstance()->getRunningScene();
    if (scene && sceneGraphPriorityListeners)
    {
        if (!shouldStopPropagation)
        {
            // priority == 0, scene graph priority
            
            // first, get all enabled, unPaused and registered listeners
            std::vector<EventListener*> sceneListeners;
            for (auto& l : *sceneGraphPriorityListeners)
            {
                if (l->isEnabled() && !l->isPaused() && l->isRegistered())
                {
                    sceneListeners.push_back(l);
                }
            }
            // second, for all camera call all listeners
            // get a copy of cameras, prevent it's been modified in listener callback
            // if camera's depth is greater, process it earlier
            auto cameras = scene->getCameras();
            Camera* camera;
            for (int j = int(cameras.size()) - 1; j >= 0; --j)
            {
                camera = cameras[j];
                if (camera->isVisible() == false)
                {
                    continue;
                }
                
                Camera::_visitingCamera = camera;
                auto cameraFlag = (unsigned short)camera->getCameraFlag();
                for (auto& l : sceneListeners)
                {
                    if (nullptr == l->getAssociatedNode() || 0 == (l->getAssociatedNode()->getCameraMask() & cameraFlag))
                    {
                        continue;
                    }
                    if (onEvent(l))
                    {
                        shouldStopPropagation = true;
                        break;
                    }
                }
                if (shouldStopPropagation)
                {
                    break;
                }
            }
            Camera::_visitingCamera = nullptr;
        }
    }
    
    if (fixedPriorityListeners)
    {
        if (!shouldStopPropagation)
        {
            // priority > 0
            ssize_t size = fixedPriorityListeners->size();
            for (; i < size; ++i)
            {
                auto l = fixedPriorityListeners->at(i);
                
                if (l->isEnabled() && !l->isPaused() && l->isRegistered() && onEvent(l))
                {
                    shouldStopPropagation = true;
                    break;
                }
            }
        }
    }
}
/* ------------------------------------------------------------------------- */
void MainWindow::on_method2_clicked()
{

    int index_prev;
    int index_now;
    QString ymlFile;

    vector<KeyPoint> imgpts1_tmp;
    vector<KeyPoint> imgpts2_tmp;

    imgpts.resize(filelist.size());

    on_PB_Sift_clicked();

    cout << endl << endl << endl << "Using Method 2:" <<endl;

    vector<DMatch> matches;
    cv::Matx34d P_0;
    cv::Matx34d P_1;

    P_0 = cv::Matx34d(1,0,0,0,
                      0,1,0,0,
                      0,0,1,0);

    cv::Mat_<double> t_prev = (cv::Mat_<double>(3,1) << 0, 0, 0);
    cv::Mat_<double> R_prev = (cv::Mat_<double>(3,3) << 0, 0, 0,
                               0, 0, 0,
                               0, 0, 0);
    cv::Mat_<double> R_prev_inv = (cv::Mat_<double>(3,3) << 0, 0, 0,
                                   0, 0, 0,
                                   0, 0, 0);
    cv::Mat_<double> t_now = (cv::Mat_<double>(3,1) << 0, 0, 0);
    cv::Mat_<double> R_now = (cv::Mat_<double>(3,3) << 0, 0, 0,
                              0, 0, 0,
                              0, 0, 0);
    cv::Mat_<double> t_new = (cv::Mat_<double>(3,1) << 0, 0, 0);
    cv::Mat_<double> R_new = (cv::Mat_<double>(3,3) << 0, 0, 0,
                              0, 0, 0,
                              0, 0, 0);

    reconstruct_first_two_view();
    std::cout << "Pmat[0]  = " << endl << Pmats[0]<<endl;
    std::cout << "Pmat[1]  = " << endl << Pmats[1]<<endl;
    for(index_now = 2; index_now<filelist.size(); index_now++)
    {
        cout << endl << endl << endl <<endl;
        cout << "dealing with " << filelist.at(index_now).fileName().toStdString() << endl;
        cout << endl;

        index_prev = index_now - 1;
        descriptors1.release();
        descriptors1 = descriptors2;
        descriptors2.release();

        ymlFile = ymlFileDir;
        ymlFile.append(filelist.at(index_now).fileName()).append(".yml");
        restore_descriptors_from_file(ymlFile.toStdString(),imgpts[index_now],descriptors2);

        matches.clear();
        //matching
        matching_fb_matcher(descriptors1,descriptors2,matches);
        matching_good_matching_filter(matches);

        imggoodpts1.clear();
        imggoodpts2.clear();
        P_0 = cv::Matx34d(1,0,0,0,
                          0,1,0,0,
                          0,0,1,0);
        P_1 = cv::Matx34d(1,0,0,0,
                          0,1,0,0,
                          0,0,1,0);
        outCloud.clear();

        if(FindCameraMatrices(K,Kinv,distcoeff,
                              imgpts[index_prev],imgpts[index_now],
                              imggoodpts1,imggoodpts2,
                              P_0,P_1,
                              matches,
                              outCloud))

        {//if can find camera matries
            R_prev(0,0) = Pmats[index_prev](0,0);
            R_prev(0,1) = Pmats[index_prev](0,1);
            R_prev(0,2) = Pmats[index_prev](0,2);
            R_prev(1,0) = Pmats[index_prev](1,0);
            R_prev(1,1) = Pmats[index_prev](1,1);
            R_prev(1,2) = Pmats[index_prev](1,2);
            R_prev(2,0) = Pmats[index_prev](2,0);
            R_prev(2,1) = Pmats[index_prev](2,1);
            R_prev(2,2) = Pmats[index_prev](2,2);
            t_prev(0) = Pmats[index_prev](0,3);
            t_prev(1) = Pmats[index_prev](1,3);
            t_prev(2) = Pmats[index_prev](2,3);


            R_now(0,0) = P_1(0,0);
            R_now(0,1) = P_1(0,1);
            R_now(0,2) = P_1(0,2);
            R_now(1,0) = P_1(1,0);
            R_now(1,1) = P_1(1,1);
            R_now(1,2) = P_1(1,2);
            R_now(2,0) = P_1(2,0);
            R_now(2,1) = P_1(2,1);
            R_now(2,2) = P_1(2,2);
            t_now(0) = P_1(0,3);
            t_now(1) = P_1(1,3);
            t_now(2) = P_1(2,3);

            invert(R_prev, R_prev_inv);

            t_new = t_prev + R_prev * t_now ;
            R_new = R_now * R_prev;

            //        //store estimated pose
            Pmats[index_now] = cv::Matx34d	(R_new(0,0),R_new(0,1),R_new(0,2),t_new(0),
                                             R_new(1,0),R_new(1,1),R_new(1,2),t_new(1),
                                             R_new(2,0),R_new(2,1),R_new(2,2),t_new(2));
            cout << "Pmats[index_now]:" << endl << Pmats[index_now]  << endl;

        }
        else
        {
            break;
        }
    }

    cout << endl;
    cout << endl;
    cout << endl;

    //visualization

    imgpts.clear();
    imgpts.resize(filelist.size());
    for( index_now = 1; index_now<filelist.size(); index_now++)
    {
        index_prev = index_now - 1;
        descriptors1.release();
        descriptors2.release();

        ymlFile = ymlFileDir;
        ymlFile.append(filelist.at(index_prev).fileName()).append(".yml");
        cout << ymlFile.toStdString()<< endl;
        restore_descriptors_from_file(ymlFile.toStdString(),imgpts[index_prev],descriptors1);

        ymlFile = ymlFileDir;
        ymlFile.append(filelist.at(index_now).fileName()).append(".yml");
        cout << ymlFile.toStdString()<< endl;
        restore_descriptors_from_file(ymlFile.toStdString(),imgpts[index_now],descriptors2);

        matches.clear();
        //matching
        matching_fb_matcher(descriptors1,descriptors2,matches);
        matching_good_matching_filter(matches);

        imgpts1_tmp.clear();
        imgpts2_tmp.clear();

        GetAlignedPointsFromMatch(imgpts[index_prev], imgpts[index_now], matches, imgpts1_tmp, imgpts2_tmp);

        cout << imgpts1_tmp.size() << endl;
        cout << imgpts1_tmp.size() << endl;

        outCloud.clear();
        std::vector<cv::KeyPoint> correspImg1Pt;
        double mean_proj_err = TriangulatePoints(imgpts1_tmp, imgpts2_tmp, K, Kinv,distcoeff, Pmats[index_prev], Pmats[index_now], outCloud, correspImg1Pt);

        std::vector<CloudPoint> outCloud_tmp;
        outCloud_tmp.clear();
        for (unsigned int i=0; i<outCloud.size(); i++)
        {
            if(outCloud[i].reprojection_error <= 3){
                //cout << "surving" << endl;
                outCloud[i].imgpt_for_img.resize(filelist.size());
                for(int j = 0; j<filelist.size();j++)
                {
                    outCloud[i].imgpt_for_img[j] = -1;
                }
                outCloud[i].imgpt_for_img[index_now] = matches[i].trainIdx;
                outCloud_tmp.push_back(outCloud[i]);
            }
        }
        outCloud.clear();
        outCloud= outCloud_tmp;

        cout << outCloud_tmp.size() << endl;

        for(unsigned int i=0;i<outCloud.size();i++)
        {
            outCloud_all.push_back(outCloud[i]);
        }

        outCloud_new = outCloud;

    }
    for( int i = 0; i<filelist.size(); i++)
        Pmats[i](1,3) =0;

    GetRGBForPointCloud(outCloud_all,pointCloudRGB);

    ui->widget->update(getPointCloud(),
                       getPointCloudRGB(),
                       getCameras());

    cout << "finished" <<endl <<endl;
}
/* ------------------------------------------------------------------------- */
void MainWindow::on_PB_Reconstruction_clicked()
{
    on_PB_Sift_clicked();

    cv::Mat_<double> rvec(1,3);
    vector<KeyPoint> imgpts1_tmp;
    vector<KeyPoint> imgpts2_tmp;

    imgpts.resize(filelist.size());

    reconstruct_first_two_view();

    cv::Mat_<double> t = (cv::Mat_<double>(1,3) << Pmats[1](0,3), Pmats[1](1,3), Pmats[1](2,3));
    cv::Mat_<double> R = (cv::Mat_<double>(3,3) << Pmats[1](0,0), Pmats[1](0,1), Pmats[1](0,2),
            Pmats[1](1,0), Pmats[1](1,1), Pmats[1](1,2),
            Pmats[1](2,0), Pmats[1](2,1), Pmats[1](2,2));
    cv::Matx34d P_0;
    cv::Matx34d P_1;

    P_0 = cv::Matx34d(1,0,0,0,
                      0,1,0,0,
                      0,0,1,0);
    int img_prev;
    for( int img_now = 2; img_now<filelist.size(); img_now++)
    {
        cout << endl << endl << endl <<endl;
        cout << "dealing with " << filelist.at(img_now).fileName().toStdString() << endl;
        cout << endl;

        img_prev = img_now - 1;

        descriptors1.release();
        descriptors1 = descriptors2;

        QString ymlFile;
        ymlFile = ymlFileDir;
        ymlFile.append(filelist.at(img_now).fileName()).append(".yml");
        restore_descriptors_from_file(ymlFile.toStdString(),imgpts[img_now],descriptors2);

        matches_prev.clear();
        matches_prev = matches_new;
        matches_new.clear();
        //matching
        matching_fb_matcher(descriptors1,descriptors2,matches_new);
        matching_good_matching_filter(matches_new);

        outCloud_prev = outCloud_new;
        outCloud_new.clear();

        vector<cv::Point3f> tmp3d; vector<cv::Point2f> tmp2d;

        for (unsigned int i=0; i < matches_new.size(); i++) {
            int idx_in_prev_img = matches_new[i].queryIdx;
            for (unsigned int pcldp=0; pcldp<outCloud_prev.size(); pcldp++)
            {
                if(idx_in_prev_img == outCloud_prev[pcldp].imgpt_for_img[img_prev])
                {
                    tmp3d.push_back(outCloud_prev[pcldp].pt);
                    tmp2d.push_back(imgpts[img_now][matches_new[i].trainIdx].pt);
                    break;
                }
            }
        }

        bool pose_estimated = FindPoseEstimation(rvec,t,R,tmp3d,tmp2d);
        if(!pose_estimated)
        {
            cout << "error"<<endl;
        }
        //store estimated pose
        Pmats[img_now] = cv::Matx34d	(R(0,0),R(0,1),R(0,2),t(0),
                                         R(1,0),R(1,1),R(1,2),t(1),
                                         R(2,0),R(2,1),R(2,2),t(2));
        cout << "Pmats:" << endl << Pmats[img_now] << endl;
        imgpts1_tmp.clear();
        imgpts2_tmp.clear();

        GetAlignedPointsFromMatch(imgpts[img_prev], imgpts[img_now], matches_new, imgpts1_tmp, imgpts2_tmp);

        std::vector<cv::KeyPoint> correspImg1Pt;
        double mean_proj_err = TriangulatePoints(imgpts1_tmp, imgpts2_tmp, K, Kinv,distcoeff, Pmats[img_prev], Pmats[img_now], outCloud, correspImg1Pt);
        std::vector<CloudPoint> outCloud_tmp;
        outCloud_tmp.clear();
        for (unsigned int i=0; i<outCloud.size(); i++)
        {
            if(outCloud[i].reprojection_error <= 5){
                //cout << "surving" << endl;
                outCloud[i].imgpt_for_img.resize(filelist.size());
                for(int j = 0; j<filelist.size();j++)
                {
                    outCloud[i].imgpt_for_img[j] = -1;
                }
                outCloud[i].imgpt_for_img[img_now] = matches_new[i].trainIdx;
                outCloud_tmp.push_back(outCloud[i]);
            }
        }
        outCloud.clear();
        outCloud= outCloud_tmp;

        for(unsigned int i=0;i<outCloud.size();i++)
        {
            outCloud_all.push_back(outCloud[i]);
        }

        outCloud_new = outCloud;
    }

    GetRGBForPointCloud(outCloud_all,pointCloudRGB);

    ui->widget->update(getPointCloud(),
                       getPointCloudRGB(),
                       getCameras());

}