void SceneManager::drawDepth(Depth & depth, int x, int y)
{
	depthShader.begin();
	depthShader.setUniform1i("isClipping", 0);
	depth.draw(x, y, depth.getWidth() * 0.6, depth.getHeight() * 0.6);
	depthShader.end();
}
    double SecondOrderOptimizeFusionMove::evaluateEnergy(const Depth &disp) const {
        CHECK_EQ(disp.getWidth(), width);
        CHECK_EQ(disp.getHeight(), height);
        double e = 0.0;
        for (auto i = 0; i < width * height; ++i) {
            int l = (int) disp[i];
	        e += (model->operator()(i, l) / model->MRFRatio);
        }
        auto tripleE = [&](int id1, int id2, int id3, double w){
            double lam = w * model->weight_smooth;
//            if (refSeg[id1] == refSeg[id2] && refSeg[id1] == refSeg[id3])
//                lam = lamh;
//            else
//                lam = laml;
            return lapE(disp[id1], disp[id2], disp[id3]) * lam;
        };

        for (auto x = 1; x < width - 1; ++x) {
            for (auto y = 1; y < height - 1; ++y) {
                e += tripleE(y * width + x - 1, y * width + x, y * width + x + 1, model->hCue[y*width+x]);
                e += tripleE((y - 1) * width + x, y * width + x, (y + 1) * width + x, model->vCue[y*width+x]);
            }
        }
        return e;
    }
Пример #3
0
void vm::scanner::cuda::computeNormalsAndMaskDepth(const Intr& intr, Depth& depth, Normals& normals)
{
  normals.create(depth.rows(), depth.cols());

  device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy);

  device::Normals& n = (device::Normals&)normals;
  device::computeNormalsAndMaskDepth(reproj, depth, n);
}
Пример #4
0
void kf::cuda::computeNormalsAndMaskDepth(const Intr& intr, Depth& depth, Normals& normals)
{
    normals.create(depth.rows(), depth.cols());

    impl::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy);

    impl::Normals& n = (impl::Normals&)normals;
    impl::computeNormalsAndMaskDepth(reproj, depth, n);
}
Пример #5
0
void kf::cuda::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
    depth_out.create (depth.rows()/2, depth.cols()/2);
    normals_out.create (normals.rows()/2, normals.cols()/2);

    impl::Normals& nsrc = (impl::Normals&)normals;
    impl::Normals& ndst = (impl::Normals&)normals_out;

    impl::resizeDepthNormals(depth, nsrc, depth_out, ndst);
}
Пример #6
0
void vm::scanner::cuda::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
  depth_out.create (depth.rows()/2, depth.cols()/2);
  normals_out.create (normals.rows()/2, normals.cols()/2);

  device::Normals& nsrc = (device::Normals&)normals;
  device::Normals& ndst = (device::Normals&)normals_out;

  device::resizeDepthNormals(depth, nsrc, depth_out, ndst);
}
Пример #7
0
void kf::cuda::computePointNormals(const Intr& intr, const Depth& depth, Points& points, Normals& normals)
{
    points.create(depth.rows(), depth.cols());
    normals.create(depth.rows(), depth.cols());

    impl::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy);

    impl::Points& p = (impl::Points&)points;
    impl::Normals& n = (impl::Normals&)normals;
    impl::computePointNormals(reproj, depth, p, n);
}
Пример #8
0
void vm::scanner::cuda::computePointNormals(const Intr& intr, const Depth& depth, Cloud& points, Normals& normals)
{
  points.create(depth.rows(), depth.cols());
  normals.create(depth.rows(), depth.cols());

  device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy);

  device::Points& p = (device::Points&)points;
  device::Normals& n = (device::Normals&)normals;
  device::computePointNormals(reproj, depth, p, n);
}
Пример #9
0
void vm::scanner::cuda::renderImage(const Depth& depth, const Normals& normals, const Intr& intr, const Vec3f& light_pose, Image& image)
{
  image.create(depth.rows(), depth.cols());

  const device::Depth& d = (const device::Depth&)depth;
  const device::Normals& n = (const device::Normals&)normals;
  device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.fy);
  device::Vec3f light = device_cast<device::Vec3f>(light_pose);

  device::Image& i = (device::Image&)image;
  device::renderImage(d, n, reproj, light, i);
  waitAllDefaultStream();
}
Пример #10
0
bool Depth::operator==(const Depth &other) const {
    if (getLevelCount() != other.getLevelCount()) {
        return false;
    }
    
    for (unsigned int i = 0; i < getLevelCount() &&
                             i < other.getLevelCount(); i++) {
        if (mvDepths[i] != other.mvDepths[i]) {
            return false;
        }
    }
    
    return true;
}
Пример #11
0
bool Depth::operator<(const Depth &other) const {
    unsigned int i;
    for (i = 0; i < getLevelCount() && i < other.getLevelCount(); i++) {
        if (mvDepths[i] < other.mvDepths[i]) {
            return true;
        } else if (mvDepths[i] > other.mvDepths[i]) {
            return false;
        }
    }
    
    if (i < getLevelCount()) {
        return true;
    } else if (i < other.getLevelCount()) {
        return false;
    }
    
    return false;
}
Пример #12
0
void
pcl::gpu::RayCaster::generateDepthImage(Depth& depth) const
{
  device::Intr intr (fx_, fy_, cx_, cy_);
  
  depth.create(rows, cols);    
  
  Matrix<float, 3, 3, RowMajor> R_inv = camera_pose_.linear().inverse();
  Vector3f t = camera_pose_.translation();
  
  device::generateDepth(device_cast<Mat33>(R_inv), device_cast<const float3>(t), vertex_map_, depth);
}
Пример #13
0
void
pcl::gpu::people::PeopleDetector::process(const Depth& depth, const Image& rgba)
{ 
  int cols;
  allocate_buffers(depth.rows(), depth.cols());

  depth_device1_ = depth;

  const device::Image& i = (const device::Image&)rgba;
  device::computeHueWithNans(i, depth_device1_, hue_device_);
  //TODO Hope this is temporary and after porting to GPU the download will be deleted  
  hue_device_.download(hue_host_.points, cols);
      
  device::Intr intr(fx_, fy_, cx_, cy_);
  intr.setDefaultPPIfIncorrect(depth.cols(), depth.rows());

  device::Cloud& c = (device::Cloud&)cloud_device_;
  device::computeCloud(depth, intr, c);  
  cloud_device_.download(cloud_host_.points, cols);    
    
  // uses cloud device, cloud host, depth device, hue device and other buffers
  process();
}
void SceneManager::update(Depth & depth, bool isPaused)
{
	if (player.isLoaded() && isPlayingSequence && !isPaused)
	{
		player.update();

		dancerSilhouette.updateVideoProgress(player.getCurrentFrame(), player.getTotalNumFrames());

		// check to see if the video has completed
		if (player.getCurrentFrame() >= player.getTotalNumFrames() - 2)
		{
			cout << "stopping video" << endl;
			player.stop();
			isPlayingSequence = false;
			isUserVisible = true;
			isDancerVisible = false;

			userSilhouette.startIntroFade();
			
			instructions.isVideoRunning = false;
			
			float ddd;
			ofNotifyEvent(videoCompleteEvent, ddd);	

		}
		
		// blur the video horizontally
		currentVidFbo = 1 - currentVidFbo;
		dancerFbo[currentVidFbo].begin();
		blurShaderH.begin();
		blurShaderH.setUniform1f("blurAmount", dancerBlurAmount);
		blurShaderH.setUniform1i("numSamples", dancerBlurSamples);
		blurShaderH.setUniform2f("resolution", dancerFbo[currentVidFbo].getWidth(), dancerFbo[currentVidFbo].getHeight());
		player.draw(0,0);
		blurShaderH.end();
		dancerFbo[currentVidFbo].end();
		
		// blur the video vertically
		currentVidFbo = 1 - currentVidFbo;
		dancerFbo[currentVidFbo].begin();
		blurShaderV.begin();
		blurShaderV.setUniform1f("blurAmount", dancerBlurAmount);
		blurShaderV.setUniform1i("numSamples", dancerBlurSamples);
		blurShaderV.setUniform2f("resolution", dancerFbo[currentVidFbo].getWidth(), dancerFbo[currentVidFbo].getHeight());
		dancerFbo[1 - currentVidFbo].draw(0,0);
		blurShaderV.end();
		dancerFbo[currentVidFbo].end();
		
		// pass the blurred image to the dancer silhouette and update
		
		dancerFbo[currentVidFbo].readToPixels(dancerPix);
		//dancerImg.setFromPixels(pix);
	}


	if (isLiveClipping)
	{
		depthFbo.begin();
		depthShader.begin();
		depthShader.setUniform1i("isClipping", (isClipping) ? 1 : 0);
		depthShader.setUniform2f("resolution", srcW, srcH);
		depthShader.setUniform1f("nearClip", liveNearClip);
		depthShader.setUniform1f("farClip", liveFarClip);
		depthShader.setUniform1f("nearFloorClip", liveNearFloorClip);
		depthShader.setUniform1f("farFloorClip", liveFarFloorClip);
		depthShader.setUniform1f("nearCeilingClip", liveNearCeilingClip);
		depthShader.setUniform1f("farCeilingClip", liveFarCeilingClip);
		depthShader.setUniform1f("nearLeftClip", liveNearLeftClip);
		depthShader.setUniform1f("farLeftClip", liveFarLeftClip);
		depthShader.setUniform1f("nearRightClip", liveNearRightClip);
		depthShader.setUniform1f("farRightClip", liveFarRightClip);
		depth.draw(0, 0, srcW, srcH);
		depthShader.end();
		depthFbo.end();
	}
	else
	{
		depthFbo.begin();
		depthShader.begin();
		depthShader.setUniform1i("isClipping", (isClipping) ? 1 : 0);
		depthShader.setUniform2f("resolution", srcW, srcH);
		depthShader.setUniform1f("nearClip", recNearClip);
		depthShader.setUniform1f("farClip", recFarClip);
		depthShader.setUniform1f("nearFloorClip", recNearFloorClip);
		depthShader.setUniform1f("farFloorClip", recFarFloorClip);
		depthShader.setUniform1f("nearCeilingClip", recNearCeilingClip);
		depthShader.setUniform1f("farCeilingClip", recFarCeilingClip);
		depthShader.setUniform1f("nearLeftClip", recNearLeftClip);
		depthShader.setUniform1f("farLeftClip", recFarLeftClip);
		depthShader.setUniform1f("nearRightClip", recNearRightClip);
		depthShader.setUniform1f("farRightClip", recFarRightClip);
		depth.draw(0, 0, srcW, srcH);
		depthShader.end();
		depthFbo.end();
	}


	// blur the user horizontally
	currentUserFbo = 1 - currentUserFbo;
	userFbo[currentUserFbo].begin();
	blurShaderH.begin();
	blurShaderH.setUniform1f("blurAmount", userBlurAmount);
	blurShaderH.setUniform1i("numSamples", userBlurSamples);
	blurShaderH.setUniform2f("resolution", userFbo[currentVidFbo].getWidth(), userFbo[currentVidFbo].getHeight());
	depthFbo.draw(0,0);
	blurShaderH.end();
	userFbo[currentUserFbo].end();
	
	// blur the user vertically
	currentUserFbo = 1 - currentUserFbo;
	userFbo[currentUserFbo].begin();
	blurShaderH.begin();
	blurShaderH.setUniform1f("blurAmount", userBlurAmount);
	blurShaderH.setUniform1i("numSamples", userBlurSamples);
	blurShaderH.setUniform2f("resolution", userFbo[currentVidFbo].getWidth(), userFbo[currentVidFbo].getHeight());
	userFbo[1 - currentUserFbo].draw(0,0);
	blurShaderH.end();
	userFbo[currentUserFbo].end();

	// pass blurred user image to user silhouette and update
	userFbo[currentVidFbo].readToPixels(userPix);
	
	if (dancerPix.getWidth() > 0)
	{
		if (isStartingVideo)
			dancerSilhouette.update(userPix);
		else
			dancerSilhouette.update(dancerPix);
	}
	if (userPix.getWidth() > 0)
	{
		if (isStartingVideo)
		{
			isStartingVideo = false;
			dancerSilhouette.startAnimation(userPix);
		}
		//if (player.isPlaying)
		userSilhouette.update(userPix);
	}
	else
	{
		// just in case the user becomes unrecogniseable 1 frame after the pose
		// is recognised. Unlikely but possible
		if (isStartingVideo)
		{
			isStartingVideo = false;
			player.play();
			player.setFrame(player.getTotalNumFrames() - 3);
			dancerSilhouette.isIntro = false;
		}
	}
}
Пример #15
0
	void DynamicConfidence::run(const int anchor, Depth &confidence) {
		const int framenum = file_io.getTotalNum();
		CHECK_LT(anchor, framenum);
		const int startid = anchor - max_tWindow / 2 >= 0 ? anchor - max_tWindow / 2 : 0;
		const int endid = anchor + max_tWindow / 2 < framenum ? anchor + max_tWindow / 2 : framenum - 1;

		char buffer[1024] = {};

		vector<FlowFrame> flow_forward((size_t) endid - startid + 1);
		vector<FlowFrame> flow_backward((size_t) endid - startid + 1);
		printf("=====================\nFrame: %d\n", anchor);
		cout << "Reading optical flow..." << endl;
		for (auto i = startid; i <= endid; ++i) {
			cout << '.' << flush;
			if (i < file_io.getTotalNum() - 1)
				flow_forward[i - startid].readFlowFile(file_io.getOpticalFlow_forward(i));
			if (i > 0)
				flow_backward[i - startid].readFlowFile(file_io.getOpticalFlow_backward(i));
		}
		cout << endl;
		const int width = (int)(flow_forward[0].width() / downsample);
		const int height = (int)(flow_forward[0].height() / downsample);
		const int widthFull = flow_forward[0].width();
		const int heightFull = flow_forward[0].height();

		Depth confidence_down(width, height, -1);
		confidence.initialize(widthFull, heightFull, 0);

		const int min_interval = 0;
		const double kth_ratio = 0.8;
		const size_t min_length = 5;
		double min_depth, max_depth;
		cout << "Computing min-max depth" << endl;
		computeMinMaxDepth(anchor, min_depth, max_depth);

		const int id = anchor - startid;
		const theia::Camera& refCam = reconstruction.View(orderedId[anchor].second)->Camera();

		cout << "Computing confidence..." << endl;
		const int unit = width * height / 10;
		const int testx = 1596 / downsample;
		const int testy = 472 / downsample;

		int startx = 0, endx = width-1, starty = 0, endy = height-1;
		if(testx >=0 && testy>=0){
			printf("Debug mode: %d, %d\n", testx, testy);
			startx = testx;
			endx = testx;
			starty = testy;
			endy = testy;
		}

		double max_line_length = 100;
		for (auto y = starty; y<= endy; ++y) {
			for (auto x = startx; x <= endx; ++x) {
				if((y*width+x) % unit == 0)
					cout << '.' << flush;
				vector<double> epiErr;
				Vector2d locL((double) x, (double) y);
				Vector3d ray = refCam.PixelToUnitDepthRay(locL * downsample);
				Vector3d minpt = refCam.GetPosition() + ray * min_depth;
				Vector3d maxpt = refCam.GetPosition() + ray * max_depth;
				if(testx >= 0 && testy >= 0){
					printf("min depth: %.3f, max depth: %.3f\n", min_depth, max_depth);
				}

				for (auto i = 0; i < flow_forward.size(); ++i) {
					const theia::Camera& cam2 = reconstruction.View(orderedId[i+startid].second)->Camera();
					if(i == id){
						Mat img = imread(file_io.getImage(i+startid));
						cv::circle(img, cv::Point(locL[0], locL[1]), 2, cv::Scalar(0,0,255), 2);
						sprintf(buffer, "%s/temp/conf_ref%05d_%05d.jpg", file_io.getDirectory().c_str(), anchor, i+startid);
						imwrite(buffer, img);
						continue;
					}
					if (std::abs(id - i) < min_interval)
						continue;

					Vector2d locR;
					if (id < i) {
						if (!flow_util::trackPoint(locL * downsample, flow_forward, id, i, locR))
							continue;
					} else {
						if (!flow_util::trackPoint(locL * downsample, flow_backward, id, i, locR))
							continue;
					}

					Vector2d spt, ept;
					cam2.ProjectPoint(minpt.homogeneous(), &spt);
					cam2.ProjectPoint(maxpt.homogeneous(), &ept);
//					Vector2d dir = spt - ept;
//					dir.normalize();
//					spt = ept + dir * max_line_length;

					if(x == testx && y == testy){
						Mat img = imread(file_io.getImage(i+startid));
						cv::circle(img, cv::Point(locR[0], locR[1]), 2, cv::Scalar(0,0,255), 2);
						printf("---------------------\nFrame %d, spt:(%.2f,%.2f), ept:(%.2f,%.2f)\n", i+startid, spt[0], spt[1], ept[0], ept[1]);
						cv::line(img, cv::Point(spt[0], spt[1]), cv::Point(ept[0], ept[1]), cv::Scalar(255,0,0), 2);
						sprintf(buffer, "%s/temp/conf_ref%05d_%05d.jpg", file_io.getDirectory().c_str(), anchor, i+startid);
						imwrite(buffer, img);

						theia::Matrix3x4d pMatrix;
						cam2.GetProjectionMatrix(&pMatrix);
						cout << "Projection matrix:" << endl << pMatrix << endl;
					}

					epiErr.push_back(geometry_util::distanceToLineSegment<2>(locR, spt, ept));
				}
				if (epiErr.size() < min_length) {
					confidence(x, y) =  0.0;
					continue;
				}
				const size_t kth = (size_t) (epiErr.size() * kth_ratio);
				nth_element(epiErr.begin(), epiErr.begin() + kth, epiErr.end());
				confidence_down.setDepthAtInt(x, y, epiErr[kth]);
			}
		}
		cout << endl;

		//upsample to original resolution
		for(auto x=0; x<widthFull-downsample; ++x){
			for(auto y=0; y<heightFull-downsample; ++y)
				confidence(x,y) = confidence_down.getDepthAt(Vector2d((double)x/downsample, (double)y/downsample));
		}

		confidence.updateStatics();
	}
Пример #16
0
void vm::scanner::cuda::computeDists(const Depth& depth, Dists& dists, const Intr& intr)
{
  dists.create(depth.rows(), depth.cols());
  device::compute_dists(depth, dists, make_float2(intr.fx, intr.fy), make_float2(intr.cx, intr.cy));
}
Пример #17
0
//--------------------------------------------------------------------------------------------------
/// Draw the legend using shader programs
//--------------------------------------------------------------------------------------------------
void OverlayColorLegend::renderLegend(OpenGLContext* oglContext, OverlayColorLegendLayoutInfo* layout, const MatrixState& matrixState)
{
    CVF_CALLSITE_OPENGL(oglContext);

    CVF_TIGHT_ASSERT(layout);
    CVF_TIGHT_ASSERT(layout->size.x() > 0);
    CVF_TIGHT_ASSERT(layout->size.y() > 0);

    Depth depth(false);
    depth.applyOpenGL(oglContext);

    // All vertices. Initialized here to set Z to zero once and for all.
    static float vertexArray[] = 
    {
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f
    };

    // Per vector convenience pointers
    float* v0 = &vertexArray[0]; 
    float* v1 = &vertexArray[3]; 
    float* v2 = &vertexArray[6]; 
    float* v3 = &vertexArray[9]; 
    float* v4 = &vertexArray[12];

    // Constant coordinates
    v0[0] = v3[0] = layout->x0;
    v1[0] = v4[0] = layout->x1;

    // Connects
    static const ushort trianglesConnects[] = { 0, 1, 4, 0, 4, 3 };

    ref<ShaderProgram> shaderProgram = oglContext->resourceManager()->getLinkedUnlitColorShaderProgram(oglContext);
    CVF_TIGHT_ASSERT(shaderProgram.notNull());

    if (shaderProgram->useProgram(oglContext))
    {
        shaderProgram->clearUniformApplyTracking();
        shaderProgram->applyFixedUniforms(oglContext, matrixState);
    }

    glBindBuffer(GL_ARRAY_BUFFER, 0);
    glEnableVertexAttribArray(ShaderProgram::VERTEX);
    glVertexAttribPointer(ShaderProgram::VERTEX, 3, GL_FLOAT, GL_FALSE, 0, vertexArray);

    // Render color bar as one colored quad per pixel

    int legendHeightPixelCount = static_cast<int>(layout->tickPixelPos->get(m_tickValues.size()-1) - layout->tickPixelPos->get(0) + 0.01);
    if (m_scalarMapper.notNull())
    {
        int iPx;
        for (iPx = 0; iPx < legendHeightPixelCount; iPx++)
        {
            const Color3ub& clr = m_scalarMapper->mapToColor(m_scalarMapper->domainValue((iPx+0.5)/legendHeightPixelCount));
            float y0 = static_cast<float>(layout->legendRect.min().y() + iPx);
            float y1 = static_cast<float>(layout->legendRect.min().y() + iPx + 1);

            // Dynamic coordinates for rectangle
            v0[1] = v1[1] = y0;
            v3[1] = v4[1] = y1;

            // Draw filled rectangle elements
            {
                UniformFloat uniformColor("u_color", Color4f(Color3f(clr)));
                shaderProgram->applyUniform(oglContext, uniformColor);

#ifdef CVF_OPENGL_ES
                glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, trianglesConnects);
#else
                glDrawRangeElements(GL_TRIANGLES, 0, 4, 6, GL_UNSIGNED_SHORT, trianglesConnects);
#endif
            }
        }
    }

    // Render frame

    // Dynamic coordinates for  tickmarks-lines
    bool isRenderingFrame = true;
    if (isRenderingFrame)
    {
        v0[0] = v2[0] = layout->legendRect.min().x()-0.5f;
        v1[0] = v3[0] = layout->legendRect.max().x()-0.5f;
        v0[1] = v1[1] = layout->legendRect.min().y()-0.5f;
        v2[1] = v3[1] = layout->legendRect.max().y()-0.5f;
        static const ushort frameConnects[] = { 0, 1, 1, 3, 3, 2, 2, 0};

        UniformFloat uniformColor("u_color", Color4f(m_color));
        shaderProgram->applyUniform(oglContext, uniformColor);

#ifdef CVF_OPENGL_ES
        glDrawElements(GL_LINES, 8, GL_UNSIGNED_SHORT, frameConnects);
#else
        glDrawRangeElements(GL_LINES, 0, 3, 8, GL_UNSIGNED_SHORT, frameConnects);
#endif
    }

    // Render tickmarks
    bool isRenderingTicks = true;

    if (isRenderingTicks)
    {
        // Constant coordinates
        v0[0] = layout->x0;
        v1[0] = layout->x1 - 0.5f*(layout->tickX - layout->x1) - 0.5f;
        v2[0] = layout->x1;
        v3[0] = layout->tickX - 0.5f*(layout->tickX - layout->x1) - 0.5f;
        v4[0] = layout->tickX;

        static const ushort tickLinesWithLabel[] = { 0, 4 };
        static const ushort tickLinesWoLabel[] = { 2, 3 };

        size_t ic;
        for (ic = 0; ic < m_tickValues.size(); ic++)
        {
                float y0 = static_cast<float>(layout->legendRect.min().y() + layout->tickPixelPos->get(ic) - 0.5f);

                // Dynamic coordinates for  tickmarks-lines
                v0[1] = v1[1] = v2[1] = v3[1] = v4[1] = y0;

                UniformFloat uniformColor("u_color", Color4f(m_color));
                shaderProgram->applyUniform(oglContext, uniformColor);
                const ushort * linesConnects;

                if ( m_visibleTickLabels[ic])
                {
                    linesConnects = tickLinesWithLabel;
                }
                else
                {
                    linesConnects = tickLinesWoLabel;
                }

#ifdef CVF_OPENGL_ES
                glDrawElements(GL_LINES, 2, GL_UNSIGNED_SHORT, linesConnects);
#else
                glDrawRangeElements(GL_LINES, 0, 4, 2, GL_UNSIGNED_SHORT, linesConnects);
#endif
        }
    }

    glDisableVertexAttribArray(ShaderProgram::VERTEX);

    CVF_TIGHT_ASSERT(shaderProgram.notNull());
    shaderProgram->useNoProgram(oglContext);

    // Reset render states
    Depth resetDepth;
    resetDepth.applyOpenGL(oglContext);

    CVF_CHECK_OGL(oglContext);
}
Пример #18
0
//--------------------------------------------------------------------------------------------------
/// Draw the legend using immediate mode OpenGL
//--------------------------------------------------------------------------------------------------
void OverlayColorLegend::renderLegendImmediateMode(OpenGLContext* oglContext, OverlayColorLegendLayoutInfo* layout)
{
#ifdef CVF_OPENGL_ES
    CVF_UNUSED(layout);
    CVF_FAIL_MSG("Not supported on OpenGL ES");
#else
    CVF_TIGHT_ASSERT(layout);
    CVF_TIGHT_ASSERT(layout->size.x() > 0);
    CVF_TIGHT_ASSERT(layout->size.y() > 0);

    Depth depth(false);
    depth.applyOpenGL(oglContext);

    Lighting_FF lighting(false);
    lighting.applyOpenGL(oglContext);

    // All vertices. Initialized here to set Z to zero once and for all.
    static float vertexArray[] = 
    {
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
    };

    // Per vector convenience pointers
    float* v0 = &vertexArray[0];    
    float* v1 = &vertexArray[3];    
    float* v2 = &vertexArray[6];    
    float* v3 = &vertexArray[9];    
    float* v4 = &vertexArray[12];   

    // Constant coordinates
    v0[0] = v3[0] = layout->x0;
    v1[0] = v4[0] = layout->x1;

    // Render color bar as one colored quad per pixel

    int legendHeightPixelCount = static_cast<int>(layout->tickPixelPos->get(m_tickValues.size() - 1) - layout->tickPixelPos->get(0) + 0.01);
    if (m_scalarMapper.notNull())
    {
        int iPx;
        for (iPx = 0; iPx < legendHeightPixelCount; iPx++)
        {
            const Color3ub& clr = m_scalarMapper->mapToColor(m_scalarMapper->domainValue((iPx+0.5)/legendHeightPixelCount));
            float y0 = static_cast<float>(layout->legendRect.min().y() + iPx);
            float y1 = static_cast<float>(layout->legendRect.min().y() + iPx + 1);

            // Dynamic coordinates for rectangle
            v0[1] = v1[1] = y0;
            v3[1] = v4[1] = y1;

            // Draw filled rectangle elements
            glColor3ubv(clr.ptr());
            glBegin(GL_TRIANGLE_FAN);
            glVertex3fv(v0);
            glVertex3fv(v1);
            glVertex3fv(v4);
            glVertex3fv(v3);
            glEnd();
        }
    }

    // Render frame

    // Dynamic coordinates for  tickmarks-lines
    bool isRenderingFrame = true;
    if (isRenderingFrame)
    {
        v0[0] = v2[0] = layout->legendRect.min().x()-0.5f;
        v1[0] = v3[0] = layout->legendRect.max().x()-0.5f;
        v0[1] = v1[1] = layout->legendRect.min().y()-0.5f;
        v2[1] = v3[1] = layout->legendRect.max().y()-0.5f;

        glColor3fv(m_color.ptr());
        glBegin(GL_LINES);
        glVertex3fv(v0);
        glVertex3fv(v1);
        glVertex3fv(v1);
        glVertex3fv(v3);
        glVertex3fv(v3);
        glVertex3fv(v2);
        glVertex3fv(v2);
        glVertex3fv(v0);
        glEnd();

    }

    // Render tickmarks
    bool isRenderingTicks = true;

    if (isRenderingTicks)
    {
        // Constant coordinates
        v0[0] = layout->x0;
        v1[0] = layout->x1 - 0.5f*(layout->tickX - layout->x1) - 0.5f;
        v2[0] = layout->x1;
        v3[0] = layout->tickX - 0.5f*(layout->tickX - layout->x1) - 0.5f;
        v4[0] = layout->tickX;

        size_t ic;
        for (ic = 0; ic < m_tickValues.size(); ic++)
        {
            float y0 = static_cast<float>(layout->legendRect.min().y() + layout->tickPixelPos->get(ic) - 0.5f);

            // Dynamic coordinates for  tickmarks-lines
            v0[1] = v1[1] = v2[1] = v3[1] = v4[1] = y0;

            glColor3fv(m_color.ptr());
            glBegin(GL_LINES);
            if ( m_visibleTickLabels[ic])
            {
                glVertex3fv(v0);
                glVertex3fv(v4); 
            }
            else
            {
                glVertex3fv(v2);
                glVertex3fv(v3);
            }
            glEnd();
        }
    }

    // Reset render states
    Lighting_FF resetLighting;
    resetLighting.applyOpenGL(oglContext);
    Depth resetDepth;
    resetDepth.applyOpenGL(oglContext);

    CVF_CHECK_OGL(oglContext);
#endif // CVF_OPENGL_ES
}
Пример #19
0
void vm::scanner::cuda::depthBilateralFilter(const Depth& in, Depth& out, int kernel_size, float sigma_spatial, float sigma_depth)
{ 
  out.create(in.rows(), in.cols());
  device::bilateralFilter(in, out, kernel_size, sigma_spatial, sigma_depth);
}
Пример #20
0
void vm::scanner::cuda::depthBuildPyramid(const Depth& depth, Depth& pyramid, float sigma_depth)
{ 
  pyramid.create (depth.rows () / 2, depth.cols () / 2);
  device::depthPyr(depth, pyramid, sigma_depth);
}
Пример #21
0
void kf::cuda::computeDists(const Depth& depth, Dists& dists, const Intr& intr)
{
    dists.create(depth.rows(), depth.cols());
    impl::compute_dists(depth, dists, make_float2(intr.fx, intr.fy), make_float2(intr.cx, intr.cy));
}
Пример #22
0
//#include <videoInput.h>
int Job1()
{
	//CvCapture* captureR = cvCreateCameraCapture( 1);
	//CvCapture* captureL = cvCreateCameraCapture( 2 );
	//CvCapture* captureR = cvCreateCameraCapture(CV_CAP_DSHOW);
    //CvCapture* captureL = cvCreateCameraCapture(CV_CAP_DSHOW + 2);
	//videoInput VI;  
	//int numDevices = VI.listDevices();   

    CvCapture* captureL =cvCaptureFromCAM(1); 
	cvWaitKey(100000);
	CvCapture* captureR =cvCaptureFromCAM(0);
	cvWaitKey(100000);

	//Camera Setting
	int w = 320, h = 240;
	cvSetCaptureProperty ( captureL, CV_CAP_PROP_FRAME_WIDTH,  w );  
	cvSetCaptureProperty ( captureL, CV_CAP_PROP_FRAME_HEIGHT, h );
	cvSetCaptureProperty ( captureR, CV_CAP_PROP_FRAME_WIDTH,  w );  
	cvSetCaptureProperty ( captureR, CV_CAP_PROP_FRAME_HEIGHT, h );

	cvNamedWindow( "Camera_L", CV_WINDOW_AUTOSIZE );
	cvNamedWindow( "Camera_R", CV_WINDOW_AUTOSIZE );
	cvNamedWindow( "Segmentation", CV_WINDOW_AUTOSIZE );
	cvNamedWindow( "Disparity", CV_WINDOW_AUTOSIZE );

	//Image buffer
	IplImage *imgL_O, *imgL;
	IplImage *imgR_O, *imgR;

	//segmentation setting
	FastSegmentation Z;
	Z.setting(0,h ,w, 1, 13, 13, 1,6);
	//Depth Setting
	Depth Y;
	Y.setting(h, w, 1, 13, 13, 1, 1, 0);
	Y.ROI(80,150,50,300);
	Y.SMask=Z.FilterMap;

	//debug
	IplImage *Zmap = cvCreateImage(cvSize(Z.width_R,Z.height_R),8,3);
	IplImage *Dmap = cvCreateImage(cvSize(Y.width_R,Y.height_R),8,1);
	
	while(true)
	{
		if( !(imgL= cvQueryFrame(captureL)) ) 
		{
			//printf("\n #Error(Panel_Display):READ_IMAGE_FAIL L"); 
			//getchar();
			//exit(-1);
			continue;
		}

		if( !(imgR= cvQueryFrame(captureR)) ) 
		{
			//printf("\n #Error(Panel_Display):READ_IMAGE_FAIL R"); 
			//getchar();
			//exit(-1);
			continue;
		}
		unsigned char* srcL=(unsigned char* )imgL->imageData;
		unsigned char* srcR=(unsigned char* )imgR->imageData;

		//Z.setting(srcL,h ,w, 1, 13, 13, 1,6);
		Z.Img=srcL;
		Z.filtering();
		Z.clustering2();

		Y.SMask=Z.FilterMap;
		Y.clusters=Z.clusters;
	    Y.ClusterNum=Z.ClusterNum;
        Y.update(srcL,srcR);

		cvShowImage( "Camera_L", imgL );
		cvShowImage( "Camera_R", imgR );
		Zmap->imageData=(char*)Z.FilterMap;
		cvShowImage( "Segmentation", Zmap );
		Dmap->imageData=(char*)Y.DisparityMap;
		cvShowImage( "Disparity", Dmap );

		int key = cvWaitKey(30);
		if( key == 27 ){
			cvSaveImage("L.bmp",imgL, 0);
			cvSaveImage("R.bmp",imgR, 0);
			break;
		}
	}

	cvReleaseCapture( &captureL );
	cvReleaseCapture( &captureR );
	cvDestroyWindow( "Camera_L" );
	cvDestroyWindow( "Camera_R" );

	return 0;
}
    void SecondOrderOptimizeFusionMove::fusionMove(Depth &p1, const Depth &p2) const {
        //create problem
        int nPix = width * height;
        kolmogorov::qpbo::QPBO<EnergyTypeT> qpbo(nPix*10, nPix*20);
	    const double& MRFRatio = model->MRFRatio;
        //construct graph
        auto addTripleToGraph = [&](int p, int q, int r, double w) {
            double vp1 = p1[p], vp2 = p2[p], vq1 = p1[q], vq2 = p2[q], vr1 = p1[r], vr2 = p2[r];
            double lam = w * model->weight_smooth;
//            if (refSeg[p] == refSeg[q] && refSeg[p] == refSeg[r])
//                lam = lamh;
//            else
//                lam = laml;
            EnergyTypeT A = (EnergyTypeT)(lapE(vp1, vq1, vr1) * lam * MRFRatio);
            EnergyTypeT B = (EnergyTypeT)(lapE(vp1, vq1, vr2) * lam * MRFRatio);
            EnergyTypeT C = (EnergyTypeT)(lapE(vp1, vq2, vr1) * lam * MRFRatio);
            EnergyTypeT D = (EnergyTypeT)(lapE(vp1, vq2, vr2) * lam * MRFRatio);
            EnergyTypeT E = (EnergyTypeT)(lapE(vp2, vq1, vr1) * lam * MRFRatio);
            EnergyTypeT F = (EnergyTypeT)(lapE(vp2, vq1, vr2) * lam * MRFRatio);
            EnergyTypeT G = (EnergyTypeT)(lapE(vp2, vq2, vr1) * lam * MRFRatio);
            EnergyTypeT H = (EnergyTypeT)(lapE(vp2, vq2, vr2) * lam * MRFRatio);
//            printf("=========================================================\n");
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp1,vq1,vr1,lam,lapE(vp1,vq1,vr1),A);
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp1,vq1,vr2,lam,lapE(vp1,vq1,vr2),B);
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp1,vq2,vr1,lam,lapE(vp1,vq2,vr1),C);
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp1,vq2,vr2,lam,lapE(vp1,vq2,vr2),D);
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp2,vq1,vr1,lam,lapE(vp2,vq1,vr1),E);
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp2,vq1,vr2,lam,lapE(vp2,vq1,vr2),F);
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp2,vq2,vr1,lam,lapE(vp2,vq2,vr1),G);
//            printf("%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n", vp2,vq2,vr2,lam,lapE(vp2,vq2,vr2),H);


            double pi = (A + D + F + G) - (B + C + E + H);
            if(pi >= 0){
                qpbo.AddPairwiseTerm(p,q,0,C-A,0,G-E);
                qpbo.AddPairwiseTerm(p,r,0,0,E-A,F-B);
                qpbo.AddPairwiseTerm(q,r,0,B-A,0,D-C);
                if(pi > 0) {
                    int w = qpbo.AddNode();
                    qpbo.AddUnaryTerm(w, A, A - (EnergyTypeT)pi);
                    qpbo.AddPairwiseTerm(p, w, 0, (EnergyTypeT)pi, 0, 0);
                    qpbo.AddPairwiseTerm(q, w, 0, (EnergyTypeT)pi, 0, 0);
                    qpbo.AddPairwiseTerm(r, w, 0, (EnergyTypeT)pi, 0, 0);
                }
            }else{
                qpbo.AddPairwiseTerm(p,q,B-D,0,F-H,0);
                qpbo.AddPairwiseTerm(p,r,C-G,D-H,0,0);
                qpbo.AddPairwiseTerm(q,r,E-F,0,G-H,0);
                int w = qpbo.AddNode();
                qpbo.AddUnaryTerm(w,H+(EnergyTypeT)pi,H);
                qpbo.AddPairwiseTerm(p, w, 0, 0, -1 * (EnergyTypeT)pi, 0);
                qpbo.AddPairwiseTerm(q, w, 0, 0, -1 * (EnergyTypeT)pi, 0);
                qpbo.AddPairwiseTerm(r, w, 0, 0, -1 * (EnergyTypeT)pi, 0);
            }
        };

        qpbo.AddNode(nPix);
        for(auto i=0; i<nPix; ++i) {
	        qpbo.AddUnaryTerm(i, (EnergyTypeT)model->operator()(i, (int)p1[i]), (EnergyTypeT)model->operator()(i, (int)p2[i]));
        }

        for(auto y=1; y<height-1; ++y){
            for(auto x=1; x<width-1; ++x) {
                addTripleToGraph(y * width + x - 1, y * width + x, y * width + x + 1, model->hCue[y*width+x]);
                addTripleToGraph((y - 1) * width + x, y * width + x, (y + 1) * width + x, model->vCue[y*width+x]);
            }
        }

        //solve
        float t = (float) getTickCount();
        qpbo.MergeParallelEdges();
        qpbo.Solve();
        qpbo.ComputeWeakPersistencies();

        //qpbo.Improve();

        //fusion
        float unlabeled = 0.0;
        float changed = 0.0;
        Depth orip1;
        orip1.initialize(width, height, -1);
        for(auto i=0; i<width * height; ++i)
            orip1.setDepthAtInd(i, p1[i]);
        for (auto i = 0; i < width * height; ++i) {
            int l = qpbo.GetLabel(i);
            double disp1 = orip1.getDepthAtInd(i);
            double disp2 = p2.getDepthAtInd(i);
            if (l == 0)
                p1.setDepthAtInd(i, disp1);
            else if (l < 0) {
                p1.setDepthAtInd(i, disp1);
                unlabeled += 1.0;
            }
            else {
                p1.setDepthAtInd(i, disp2);
                changed += 1.0;
            }
        }

        printf("Unlabeled pixels: %.2f, ratio: %.2f; label changed: %.2f, ratio: %.2f\n", unlabeled,
               unlabeled / (float)nPix,
               changed, changed / (float)nPix);
    }
Пример #24
0
void kf::cuda::depthBuildPyramid(const Depth& depth, Depth& pyramid, float sigma_depth)
{
    pyramid.create (depth.rows () / 2, depth.cols () / 2);
    impl::depthPyr(depth, pyramid, sigma_depth);
}
    void SecondOrderOptimizeFusionMove::optimize(Depth &result, const int max_iter) const {
        vector<Depth> proposals;
        genProposal(proposals);
        //proposals.push_back(noisyDisp);

        char buffer[1024] = {};

        //initialize by random
        result.initialize(width, height, -1);
	    const int nLabel = model->nLabel;

        std::default_random_engine generator;
        std::uniform_int_distribution<int> distribution(0, nLabel - 1);
        for (auto i = 0; i < width * height; ++i) {
            //result.setDepthAtInd(i, (double) distribution(generator));
            //result.setDepthAtInd(i, noisyDisp[i]);
            result[i] = 0;
        }
        sprintf(buffer, "%s/temp/init_result.jpg", file_io.getDirectory().c_str());
        result.saveImage(buffer, 256.0 / (double)nLabel);

        list<double> diffE;
        double lastEnergy = evaluateEnergy(result);
        double initialEnergy = lastEnergy;
        int iter = 0;

        const double termination = 0.1;
        float timming = (float) getTickCount();
        const int smoothInterval = 5;
        while (true) {
            if (iter == max_iter)
                break;
            cout << "======================" << endl;

            Depth newProposal;

//            if (iter > 0 && iter % smoothInterval == 0) {
//                Depth orip1;
//                newProposal.initialize(width, height, -1);
//                orip1.initialize(width, height, -1);
//                for (auto i = 0; i < width * height; ++i) {
//                    orip1.setDepthAtInd(i, result[i]);
//                    newProposal.setDepthAtInd(i, result[i]);
//                }
//                int direction = iter / smoothInterval;
//                if (direction % 2 == 0) {
//                    //horizontally
//                    for (auto y = 0; y < height; ++y) {
//                        for (auto x = 1; x < width - 1; ++x)
//                            newProposal.setDepthAtInt(x, y, (orip1(x + 1, y) + orip1(x - 1, y)) / 2);
//                        newProposal.setDepthAtInt(width - 1, y, orip1(width - 1, y));
//                    }
//                } else {
//                    //vertically
//                    for (auto x = 0; x < width; ++x) {
//                        for (auto y = 1; y < height - 1; ++y)
//                            newProposal.setDepthAtInt(x, y, (orip1(x, y + 1) + orip1(x, y - 1)) / 2);
//                        newProposal.setDepthAtInt(x, height - 1, orip1(x, height - 1));
//                    }
//                }
//                cout << "Iteration " << iter << " using smoothing proposal " << endl;
//            } else {
//          }
            newProposal = proposals[iter % (proposals.size())];
            printf("Fusing with proposal %d\n", (int)(iter % proposals.size()));
            //after several iteration, smooth the dispartiy
            fusionMove(result, newProposal);
            double e = evaluateEnergy(result);

            double energyDiff = lastEnergy - e;

            if (diffE.size() >= average_over)
                diffE.pop_front();
            diffE.push_back(energyDiff);
            double average_diffe = std::accumulate(diffE.begin(), diffE.end(), 0.0) / (double) diffE.size();

            printf("Done. Final energy: %.5f, energy decrease: %.5f average decrease: %.5f\n", e, energyDiff,
                   average_diffe);
            lastEnergy = e;

            sprintf(buffer, "%s/temp/fusionmove_iter%05d.jpg", file_io.getDirectory().c_str(), iter);
            result.saveImage(buffer, 256.0 / (double) nLabel);

            if (iter > proposals.size() * 2 && average_diffe < termination) {
                cout << "Converge!" << endl;
                break;
            }

            iter++;
        }
        timming = ((float) getTickCount() - timming) / (float) getTickFrequency();
        printf("All done. Initial energy: %.5f, final energy: %.5f, time usage: %.2fs\n", initialEnergy, lastEnergy,
               timming);
    }