Example #1
0
Images FrameData::startReadback( const Frame& frame,
                                 util::ObjectManager& glObjects,
                                 const DrawableConfig& config,
                                 const PixelViewports& regions )
{
    if( _impl->data.buffers == Frame::BUFFER_NONE )
        return Images();

    const Zoom& zoom = frame.getZoom();
    if( !zoom.isValid( ))
    {
        LBWARN << "Invalid zoom factor, skipping frame" << std::endl;
        return Images();
    }

    const eq::PixelViewport& framePVP = getPixelViewport();
    const PixelViewport      absPVP   = framePVP + frame.getOffset();
    if( !absPVP.isValid( ))
        return Images();

    Images images;

    // readback the whole screen when using textures
    if( getType() == eq::Frame::TYPE_TEXTURE )
    {
        Image* image = newImage( getType(), config );
        if( image->startReadback( getBuffers(), absPVP, zoom, glObjects ))
            images.push_back( image );
        image->setOffset( 0, 0 );
        return images;
    }
    //else read only required regions

#if 0
    // TODO: issue #85: move automatic ROI detection to eq::Channel
    PixelViewports regions;
    if( _impl->data.buffers & Frame::BUFFER_DEPTH && zoom == Zoom::NONE )
        regions = _impl->roiFinder->findRegions( _impl->data.buffers, absPVP,
                                                 zoom, frame.getAssemblyStage(),
                                                 frame.getFrameID(), glObjects);
    else
        regions.push_back( absPVP );
#endif

    LBASSERT( getType() == eq::Frame::TYPE_MEMORY );
    const eq::Pixel& pixel = getPixel();

    for( uint32_t i = 0; i < regions.size(); ++i )
    {
        PixelViewport pvp = regions[ i ] + frame.getOffset();
        pvp.intersect( absPVP );
        if( !pvp.hasArea( ))
            continue;

        Image* image = newImage( getType(), config );
        if( image->startReadback( getBuffers(), pvp, zoom, glObjects ))
            images.push_back( image );

        pvp -= frame.getOffset();
        pvp.apply( zoom );
        image->setOffset( (pvp.x - framePVP.x) * pixel.w,
                          (pvp.y - framePVP.y) * pixel.h );
    }
    return images;
}
Example #2
0
void W_BUTTON::setImages(Image image, Image imageDown, Image imageHighlight, Image imageDisabled)
{
	dirty = true;
	setImages(Images(image, imageDown, imageHighlight, imageDisabled));
}
pair< bool , Word > FreeMetabelianGroupAlgorithms::conjugate( int N , Word w1 , Word w2 )
{
    // A. Compute the tails for w1 and w2
    vector< int > T1 = getTail( N , w1 );
    vector< int > T2 = getTail( N , w2 );

    // B. If tails are different then the answer is no
    if( T1!=T2 )
        return pair< bool , Word >( false , Word() );

    //----------------------------------------------------------
    // C. If the tails are trivial
    if( T1==vector< int >( N, 0 ) ) {
        // 1. Compute the edge-maps and check if they are of the same size
        map< vector< int > , int > EM1 = dropTrivialEdgesInMap( getEdgeMap( N , w1 ) );
        map< vector< int > , int > EM2 = dropTrivialEdgesInMap( getEdgeMap( N , w2 ) );
        if( EM1.size( )!=EM2.size( ) )
            return pair< bool , Word >( false , Word() );

        // 2. Check if the elements are trivial
        if( EM1.size( )==0 )
            return pair< bool , Word >( true , Word() );

        // 3. Determine the required shift
        pair< vector< int > , int > C1 = *EM1.begin( );
        pair< vector< int > , int > C2 = *EM2.begin( );
        vector< int > S( N , 0 );
        for( int i=0 ; i<N ; ++i )
            S[i] = C2.first[i]-C1.first[i];
        EM1 = shiftEdgeMap( N , S , EM1 );
        if( EM1==EM2)
            return pair< bool , Word >( true , -getTailWord( N , S ) );
        else
            return pair< bool , Word >( false , Word() );
    }

    //----------------------------------------------------------
    // D. Move the space so that x1-component is positive and others are non-negative
    vector< Word > Images(N);
    for( int j=0 ; j<N ; ++j )
        Images[j] = Word( j+1 );
    if( T1[0]==0 ) {

        // 1. Find the first non-trivial component xi and swap(x1,xi)
        for( int i=1 ; i<N ; ++i ) {
            if( T1[i]!=0 ) {
                Images[0] = Word( i+1 );
                Images[i] = Word( 1 );
                swap( T1[0] , T1[i] );
                break;
            }
        }
    }
    Map M1( N , N , Images );
    w1 = M1.imageOf( w1 );
    w2 = M1.imageOf( w2 );

    //----------------------------------------------------------
    // E. Make negative components positive
    vector< Word > Images2( N );
    for( int i=0 ; i<N ; ++i ) {
        if( T1[i]>=0 )
            Images2[i] = Word(+i+1);
        else
            Images2[i] = Word(-i-1);
        T1[i] = abs(T1[i]);
    }
    Map M2( N , N , Images2 );
    w1 = M2.imageOf( w1 );
    w2 = M2.imageOf( w2 );

    //----------------------------------------------------------
    // F. Compute the strip-forms for elements
    map< vector< int > , int > EM1 = dropTrivialEdgesInMap( modEdgeMap( N , T1 , getEdgeMap( N , w1 ) ) );
    map< vector< int > , int > EM2 = dropTrivialEdgesInMap( modEdgeMap( N , T1 , getEdgeMap( N , w2 ) ) );

    //----------------------------------------------------------
    // G. For all possible x1-transformations:
    int u1 = T1[0];
    for( int i=0 ; i<u1 ; ++i ) {

        // 1. Apply an x1-shift
        EM1 = x1shiftEdgeMap( N , T1 , EM1 );

        // 2. Suggest a tail-conjugator
        pair< bool , vector< int > > r = suggestTailConjugator( N , EM1 , EM2 );
        r.second[0] += i+1;

        // 3. Test a tail conjugator
        if( r.first ) {
            if( testVector( N , T1 , w1 , w2 , r.second ) ) {
                // Find an actual conjugator
                Word C = getTailWord( N , r.second );
                Word D = getLoopConjugator( N , T1 , w1 , -C*w2*C );
                return pair< bool , Word >( true , M1.imageOf( M2.imageOf( D*-C ) ) );
            }
        }
    }

    return pair< bool , Word >( false , Word() );
}
int main(int argc, char **argv[])
{
	string name;
	vector<Mat>Images(100), TestImages(50);
	vector<Mat> Descriptor(100), TestDescriptor(50), TestPcafeature(50);
	vector<vector<KeyPoint>>Keypoints(100), TestKeypoint(50);
	Mat histogram = Mat::zeros(100, Cluster, CV_32F);
	Mat Testhistogram = Mat::zeros(50, Cluster, CV_32F);
	Mat Keyword = Mat::zeros(Cluster, 20, CV_32F);
	Mat full_Descriptor, Pcafeature, Pcaduplicate, clusteridx, trainlabels(100, 1, CV_32F);
	vector<vector<DMatch>> matches(50);
	Mat predicted(Testhistogram.rows, 1, CV_32F);

	// Read Training Images.
	read_train(Images, name);

	//Calculate SIFT features for the Training Images.
	calculate_SIFT(Images,Keypoints,Descriptor);
	merge_descriptor(full_Descriptor,Descriptor);

	//Compute PCA for all the features across all Images.
	PCA pca;
	perform_PCA(full_Descriptor, Pcafeature, pca);
	
	//Perform K-Means on all the PCA reduced features.
	Pcafeature.convertTo(Pcaduplicate, CV_32F);
	calculate_Kmeans(Pcaduplicate, clusteridx);

	//Calculate the Keywords in the Feature Space.
	make_dictionary(clusteridx, Pcaduplicate, Keyword);

	//Get the Histogram for each Training Image.
	hist(Descriptor, clusteridx, histogram);

	//Read Test Image
	read_test(TestImages, name);

	//Calculate the SIFT feature for all the test Images.
	calculate_SIFT(TestImages, TestKeypoint, TestDescriptor);

	//Project the SIFT feature of each feature on the lower dimensional PCA plane calculated above. 
	pca_testProject(TestDescriptor, TestPcafeature, pca);

	//Find the Label by searching for keywords closest to current feature.
	get_matches(TestPcafeature,Keyword,matches);

	//Calculate Histogram for each test Image.
	hist_test(TestDescriptor, matches, Testhistogram);
	
	//Perform classification through Knn Classifier. 
	train_labels(trainlabels);
	KNearest knn;
	train_classifier(histogram, trainlabels, knn);
	test_classify(Testhistogram,predicted,knn);

	//Calculate Accuracy for each class.
	calculate_accuracy(predicted);
	
	getchar();
	return 0;
}
Example #5
0
void Pipeline::run(const bool save_clouds, const bool show_clouds)
{
	Logger _log("Pipeline");

	/**
	 * Stage 0: Load images from file
	 */
	Images images;
	load_images(folder_path, images);


	/**
	 * Stage 1: Detect features in loaded images
	 */
	CamFrames cam_Frames;
	DescriptorsVec descriptors_vec;
	extract_features(images, cam_Frames, descriptors_vec);

	// Free Image.gray
	for (int i = 0; i < images.size(); i++)
		const_cast<cv::Mat&>(images[i].gray).release();


	/**
	 * Stage 2: Calculate descriptors and find image pairs through matching
	 */
	ImagePairs image_pairs;
	find_matching_pairs(images, cam_Frames, descriptors_vec, image_pairs);


	// Free some memory
	DescriptorsVec().swap(descriptors_vec);



	/**
	 * State 3: Compute pairwise R and t
	 */
	register_camera(image_pairs, cam_Frames);


	/**
	 * Stage 4: Construct associativity matrix and spanning tree
	 */
	Associativity assocMat(cam_Frames.size());
	for (int p = 0; p < image_pairs.size(); p++)
	{
		ImagePair* pair = &image_pairs[p];
		int i = pair->pair_index.first,
		    j = pair->pair_index.second;

		if (pair -> R.empty()) continue;

		assocMat(i, j) = pair;
		assocMat(j, i) = pair;

		assert(assocMat(i, j) == assocMat(j, i));
		assert(assocMat(i, j)->pair_index.first  == i &&
		       assocMat(i, j)->pair_index.second == j);
	}

	Associativity tree;
	const int camera_num = build_spanning_tree(image_pairs, assocMat, tree);


	/**
	 * Stage 5: Compute global Rs and ts
	 */
	CameraPoses gCameraPoses;
	glo_cam_poses(images, gCameraPoses, image_pairs, tree);


	/**
	 * Stage 6: Find and cluster depth points from local camera frame to global camera frame
	 */
	PointClusters pointClusters;
	PointMap pointMap;
	find_clusters(assocMat, gCameraPoses, cam_Frames, pointClusters, pointMap);

	// Free some memory
	ImagePairs().swap(image_pairs);


	/**
	 * Stage 7: get center of mass from clusters
	 */
	PointCloud pointCloud(pointClusters.size());
	find_CoM(pointClusters, pointCloud);

	// Free pointClusters
	for (int i = 0; i < pointClusters.size(); i++)
		PointCluster().swap(pointClusters[i]);
	PointClusters().swap(pointClusters);


	// Save cloud before BA
	Viewer viewer("Before BA");
	auto cloud  = viewer.createPointCloud(images, gCameraPoses, cameraMatrix);
	int  n      = cloud->points.size();
	auto time   = ptime::second_clock::local_time();
	auto tstamp = ptime::to_iso_string(time);
	auto folder = fs::path(folder_path).filename().string();
	auto fname  = (fmt("%s_%s_%d_noBA.pcd") % folder % tstamp % n).str();

	if (save_clouds)
		viewer.saveCloud(cloud, fname);
	if (show_clouds)
		viewer.showCloudPoints(cloud, false);


	/**
	 * State 8: Bundle Adjustment
	 */
	bundle_adjustment(pointMap, cam_Frames, false, gCameraPoses, pointCloud);
	_log.tok();

	/**
	 * Show calculated point cloud
	 */
	Viewer viewer_ba("After BA no Depth");
	cloud = viewer_ba.createPointCloud(images, gCameraPoses, cameraMatrix);
	n     = cloud->points.size();
	fname = (fmt("%s_%s_%d_BA_noD.pcd") % folder % tstamp % n).str();


	if (save_clouds)
		viewer_ba.saveCloud(cloud, fname);
	if (show_clouds)
		viewer_ba.showCloudPoints(cloud,false);

	bundle_adjustment(pointMap, cam_Frames, true, gCameraPoses, pointCloud);

	// Free some memory
	PointMap().swap(pointMap);
	CamFrames().swap(cam_Frames);
	PointCloud().swap(pointCloud);

	/**
	 * Show calculated point cloud
	 */
	Viewer viewer_baD("After BA with Depth");
	cloud = viewer_baD.createPointCloud(images, gCameraPoses, cameraMatrix);
	n     = cloud->points.size();
	fname = (fmt("%s_%s_%d_BA_D.pcd") % folder % tstamp % n).str();

	// Free some memory
	Images().swap(images);
	CameraPoses().swap(gCameraPoses);

	if (save_clouds)
		viewer_baD.saveCloud(cloud, fname);
	if (show_clouds)
		viewer_baD.showCloudPoints(cloud);



}