コード例 #1
0
std::vector<TestIntegrator> GetIntegrators() {
    std::vector<TestIntegrator> integrators;

    Point2i resolution(10, 10);
    AnimatedTransform identity(new Transform, 0, new Transform, 1);

    for (auto scene : GetScenes()) {
        // Path tracing integrators
        for (auto sampler : GetSamplers(Bounds2i(Point2i(0, 0), resolution))) {
            std::unique_ptr<Filter> filter(new BoxFilter(Vector2f(0.5, 0.5)));
            Film *film =
                new Film(resolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)),
                         std::move(filter), 1., "test.exr", 1.);
            std::shared_ptr<Camera> camera =
                std::make_shared<PerspectiveCamera>(
                    identity, Bounds2f(Point2f(-1, -1), Point2f(1, 1)), 0., 1.,
                    0., 10., 45, film, nullptr);

            Integrator *integrator =
                new PathIntegrator(8, camera, sampler.first);
            integrators.push_back({integrator, film,
                                   "Path, depth 8, Perspective, " +
                                       sampler.second + ", " +
                                       scene.description,
                                   scene});
        }

        for (auto sampler : GetSamplers(Bounds2i(Point2i(0, 0), resolution))) {
            std::unique_ptr<Filter> filter(new BoxFilter(Vector2f(0.5, 0.5)));
            Film *film =
                new Film(resolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)),
                         std::move(filter), 1., "test.exr", 1.);
            std::shared_ptr<Camera> camera =
                std::make_shared<OrthographicCamera>(
                    identity, Bounds2f(Point2f(-.1, -.1), Point2f(.1, .1)), 0.,
                    1., 0., 10., film, nullptr);

            Integrator *integrator =
                new PathIntegrator(8, camera, sampler.first);
            integrators.push_back({integrator, film,
                                   "Path, depth 8, Ortho, " + sampler.second +
                                       ", " + scene.description,
                                   scene});
        }

        // Volume path tracing integrators
        for (auto sampler : GetSamplers(Bounds2i(Point2i(0, 0), resolution))) {
            std::unique_ptr<Filter> filter(new BoxFilter(Vector2f(0.5, 0.5)));
            Film *film =
                new Film(resolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)),
                         std::move(filter), 1., "test.exr", 1.);
            std::shared_ptr<Camera> camera =
                std::make_shared<PerspectiveCamera>(
                    identity, Bounds2f(Point2f(-1, -1), Point2f(1, 1)), 0., 1.,
                    0., 10., 45, film, nullptr);

            Integrator *integrator =
                new VolPathIntegrator(8, camera, sampler.first);
            integrators.push_back({integrator, film,
                                   "VolPath, depth 8, Perspective, " +
                                       sampler.second + ", " +
                                       scene.description,
                                   scene});
        }
        for (auto sampler : GetSamplers(Bounds2i(Point2i(0, 0), resolution))) {
            std::unique_ptr<Filter> filter(new BoxFilter(Vector2f(0.5, 0.5)));
            Film *film =
                new Film(resolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)),
                         std::move(filter), 1., "test.exr", 1.);
            std::shared_ptr<Camera> camera =
                std::make_shared<OrthographicCamera>(
                    identity, Bounds2f(Point2f(-.1, -.1), Point2f(.1, .1)), 0.,
                    1., 0., 10., film, nullptr);

            Integrator *integrator =
                new VolPathIntegrator(8, camera, sampler.first);
            integrators.push_back({integrator, film,
                                   "VolPath, depth 8, Ortho, " +
                                       sampler.second + ", " +
                                       scene.description,
                                   scene});
        }

        // BDPT
        for (auto sampler : GetSamplers(Bounds2i(Point2i(0, 0), resolution))) {
            std::unique_ptr<Filter> filter(new BoxFilter(Vector2f(0.5, 0.5)));
            Film *film =
                new Film(resolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)),
                         std::move(filter), 1., "test.exr", 1.);
            std::shared_ptr<Camera> camera =
                std::make_shared<PerspectiveCamera>(
                    identity, Bounds2f(Point2f(-1, -1), Point2f(1, 1)), 0., 1.,
                    0., 10., 45, film, nullptr);

            Integrator *integrator =
                new BDPTIntegrator(sampler.first, camera, 6, false, false);
            integrators.push_back({integrator, film,
                                   "BDPT, depth 8, Perspective, " +
                                       sampler.second + ", " +
                                       scene.description,
                                   scene});
        }
#if 0
    // Ortho camera not currently supported with BDPT.
    for (auto sampler : GetSamplers(Bounds2i(Point2i(0,0), resolution))) {
      std::unique_ptr<Filter> filter(new BoxFilter(Vector2f(0.5, 0.5)));
      Film *film = new Film(resolution, Bounds2f(Point2f(0,0), Point2f(1,1)),
                            std::move(filter), 1., "test.exr", 1.);
      std::shared_ptr<Camera> camera = std::make_shared<OrthographicCamera>(
          identity, Bounds2f(Point2f(-.1,-.1), Point2f(.1,.1)), 0., 1.,
          0., 10., film, nullptr);

      Integrator *integrator = new BDPTIntegrator(sampler.first, camera, 8,
                                            false, false);
      integrators.push_back({integrator, film,
              "BDPT, depth 8, Ortho, " + sampler.second + ", " +
              scene.description, scene});
    }
#endif

        // MLT
        {
            std::unique_ptr<Filter> filter(new BoxFilter(Vector2f(0.5, 0.5)));
            Film *film =
                new Film(resolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)),
                         std::move(filter), 1., "test.exr", 1.);
            std::shared_ptr<Camera> camera =
                std::make_shared<PerspectiveCamera>(
                    identity, Bounds2f(Point2f(-1, -1), Point2f(1, 1)), 0., 1.,
                    0., 10., 45, film, nullptr);

            Integrator *integrator = new MLTIntegrator(
                camera, 8 /* depth */, 100000 /* n bootstrap */,
                1000 /* nchains */, 1024 /* mutations per pixel */,
                0.01 /* sigma */, 0.3 /* large step prob */);
            integrators.push_back(
                {integrator, film,
                 "MLT, depth 8, Perspective, " + scene.description, scene});
        }
    }

    return integrators;
}
コード例 #2
0
ファイル: sphere.cpp プロジェクト: KojiNakamaru/pbrt-v3
bool Sphere::Intersect(const Ray &r, Float *tHit,
                       SurfaceInteraction *isect) const {
    Float phi;
    Point3f pHit;
    // Transform _Ray_ to object space
    Vector3f oErr, dErr;
    Ray ray = (*WorldToObject)(r, &oErr, &dErr);

    // Compute quadratic sphere coefficients

    // Initialize _EFloat_ ray coordinate values
    EFloat ox(ray.o.x, oErr.x), oy(ray.o.y, oErr.y), oz(ray.o.z, oErr.z);
    EFloat dx(ray.d.x, dErr.x), dy(ray.d.y, dErr.y), dz(ray.d.z, dErr.z);
    EFloat a = dx * dx + dy * dy + dz * dz;
    EFloat b = 2 * (dx * ox + dy * oy + dz * oz);
    EFloat c = ox * ox + oy * oy + oz * oz - EFloat(radius) * EFloat(radius);

    // Solve quadratic equation for _t_ values
    EFloat t0, t1;
    if (!Quadratic(a, b, c, &t0, &t1)) return false;

    // Check quadric shape _t0_ and _t1_ for nearest intersection
    if (t0.UpperBound() > ray.tMax || t1.LowerBound() <= 0) return false;
    EFloat tShapeHit = t0;
    if (t0.LowerBound() <= 0) {
        tShapeHit = t1;
        if (tShapeHit.UpperBound() > ray.tMax) return false;
    }

    // Compute sphere hit position and $\phi$
    pHit = ray((Float)tShapeHit);

    // Refine sphere intersection point
    pHit *= radius / Distance(pHit, Point3f(0, 0, 0));
    if (pHit.x == 0 && pHit.y == 0) pHit.x = 1e-5f * radius;
    phi = std::atan2(pHit.y, pHit.x);
    if (phi < 0) phi += 2 * Pi;

    // Test sphere intersection against clipping parameters
    if ((zMin > -radius && pHit.z < zMin) || (zMax < radius && pHit.z > zMax) ||
        phi > phiMax) {
        if (tShapeHit == t1) return false;
        if (t1.UpperBound() > ray.tMax) return false;
        tShapeHit = t1;
        // Compute sphere hit position and $\phi$
        pHit = ray((Float)tShapeHit);

        // Refine sphere intersection point
        pHit *= radius / Distance(pHit, Point3f(0, 0, 0));
        if (pHit.x == 0 && pHit.y == 0) pHit.x = 1e-5f * radius;
        phi = std::atan2(pHit.y, pHit.x);
        if (phi < 0) phi += 2 * Pi;
        if ((zMin > -radius && pHit.z < zMin) ||
            (zMax < radius && pHit.z > zMax) || phi > phiMax)
            return false;
    }

    // Find parametric representation of sphere hit
    Float u = phi / phiMax;
    Float theta = std::acos(Clamp(pHit.z / radius, -1, 1));
    Float v = (theta - thetaMin) / (thetaMax - thetaMin);

    // Compute sphere $\dpdu$ and $\dpdv$
    Float zRadius = std::sqrt(pHit.x * pHit.x + pHit.y * pHit.y);
    Float invZRadius = 1 / zRadius;
    Float cosPhi = pHit.x * invZRadius;
    Float sinPhi = pHit.y * invZRadius;
    Vector3f dpdu(-phiMax * pHit.y, phiMax * pHit.x, 0);
    Vector3f dpdv =
        (thetaMax - thetaMin) *
        Vector3f(pHit.z * cosPhi, pHit.z * sinPhi, -radius * std::sin(theta));

    // Compute sphere $\dndu$ and $\dndv$
    Vector3f d2Pduu = -phiMax * phiMax * Vector3f(pHit.x, pHit.y, 0);
    Vector3f d2Pduv =
        (thetaMax - thetaMin) * pHit.z * phiMax * Vector3f(-sinPhi, cosPhi, 0.);
    Vector3f d2Pdvv = -(thetaMax - thetaMin) * (thetaMax - thetaMin) *
                      Vector3f(pHit.x, pHit.y, pHit.z);

    // Compute coefficients for fundamental forms
    Float E = Dot(dpdu, dpdu);
    Float F = Dot(dpdu, dpdv);
    Float G = Dot(dpdv, dpdv);
    Vector3f N = Normalize(Cross(dpdu, dpdv));
    Float e = Dot(N, d2Pduu);
    Float f = Dot(N, d2Pduv);
    Float g = Dot(N, d2Pdvv);

    // Compute $\dndu$ and $\dndv$ from fundamental form coefficients
    Float invEGF2 = 1 / (E * G - F * F);
    Normal3f dndu = Normal3f((f * F - e * G) * invEGF2 * dpdu +
                             (e * F - f * E) * invEGF2 * dpdv);
    Normal3f dndv = Normal3f((g * F - f * G) * invEGF2 * dpdu +
                             (f * F - g * E) * invEGF2 * dpdv);

    // Compute error bounds for sphere intersection
    Vector3f pError = gamma(5) * Abs((Vector3f)pHit);

    // Initialize _SurfaceInteraction_ from parametric information
    *isect = (*ObjectToWorld)(SurfaceInteraction(pHit, pError, Point2f(u, v),
                                                 -ray.d, dpdu, dpdv, dndu, dndv,
                                                 ray.time, this));

    // Update _tHit_ for quadric intersection
    *tHit = (Float)tShapeHit;
    return true;
}
コード例 #3
0
ファイル: triangle.cpp プロジェクト: Graphics3D/pbrt-v3
std::vector<std::shared_ptr<Shape>> CreateTriangleMeshShape(
    const Transform *o2w, const Transform *w2o, bool reverseOrientation,
    const ParamSet &params,
    std::map<std::string, std::shared_ptr<Texture<Float>>> *floatTextures) {
    int nvi, npi, nuvi, nsi, nni;
    const int *vi = params.FindInt("indices", &nvi);
    const Point3f *P = params.FindPoint3f("P", &npi);
    const Point2f *uvs = params.FindPoint2f("uv", &nuvi);
    if (!uvs) uvs = params.FindPoint2f("st", &nuvi);
    std::vector<Point2f> tempUVs;
    if (!uvs) {
        const Float *fuv = params.FindFloat("uv", &nuvi);
        if (!fuv) fuv = params.FindFloat("st", &nuvi);
        if (fuv) {
            nuvi /= 2;
            tempUVs.reserve(nuvi);
            for (int i = 0; i < nuvi; ++i)
                tempUVs.push_back(Point2f(fuv[2 * i], fuv[2 * i + 1]));
            uvs = &tempUVs[0];
        }
    }
    bool discardDegenerateUVs =
        params.FindOneBool("discarddegenerateUVs", false);
    if (uvs) {
        if (nuvi < npi) {
            Error(
                "Not enough of \"uv\"s for triangle mesh.  Expencted %d, "
                "found %d.  Discarding.",
                npi, nuvi);
            uvs = nullptr;
        } else if (nuvi > npi)
            Warning(
                "More \"uv\"s provided than will be used for triangle "
                "mesh.  (%d expcted, %d found)",
                npi, nuvi);
    }
    if (!vi) {
        Error(
            "Vertex indices \"indices\" not provided with triangle mesh shape");
        return std::vector<std::shared_ptr<Shape>>();
    }
    if (!P) {
        Error("Vertex positions \"P\" not provided with triangle mesh shape");
        return std::vector<std::shared_ptr<Shape>>();
    }
    const Vector3f *S = params.FindVector3f("S", &nsi);
    if (S && nsi != npi) {
        Error("Number of \"S\"s for triangle mesh must match \"P\"s");
        S = nullptr;
    }
    const Normal3f *N = params.FindNormal3f("N", &nni);
    if (N && nni != npi) {
        Error("Number of \"N\"s for triangle mesh must match \"P\"s");
        N = nullptr;
    }
    if (discardDegenerateUVs && uvs && N) {
        // if there are normals, check for bad uv's that
        // give degenerate mappings; discard them if so
        const int *vp = vi;
        for (int i = 0; i < nvi; i += 3, vp += 3) {
            Float area =
                .5f * Cross(P[vp[0]] - P[vp[1]], P[vp[2]] - P[vp[1]]).Length();
            if (area < 1e-7) continue;  // ignore degenerate tris.
            if ((uvs[vp[0]].x == uvs[vp[1]].x &&
                 uvs[vp[0]].y == uvs[vp[1]].y) ||
                (uvs[vp[1]].x == uvs[vp[2]].x &&
                 uvs[vp[1]].y == uvs[vp[2]].y) ||
                (uvs[vp[2]].x == uvs[vp[0]].x &&
                 uvs[vp[2]].y == uvs[vp[0]].y)) {
                Warning(
                    "Degenerate uv coordinates in triangle mesh.  Discarding "
                    "all uvs.");
                uvs = nullptr;
                break;
            }
        }
    }
    for (int i = 0; i < nvi; ++i)
        if (vi[i] >= npi) {
            Error(
                "trianglemesh has out of-bounds vertex index %d (%d \"P\" "
                "values were given",
                vi[i], npi);
            return std::vector<std::shared_ptr<Shape>>();
        }

    std::shared_ptr<Texture<Float>> alphaTex;
    std::string alphaTexName = params.FindTexture("alpha");
    if (alphaTexName != "") {
        if (floatTextures->find(alphaTexName) != floatTextures->end())
            alphaTex = (*floatTextures)[alphaTexName];
        else
            Error("Couldn't find float texture \"%s\" for \"alpha\" parameter",
                  alphaTexName.c_str());
    } else if (params.FindOneFloat("alpha", 1.f) == 0.f)
        alphaTex.reset(new ConstantTexture<Float>(0.f));

    std::shared_ptr<Texture<Float>> shadowAlphaTex;
    std::string shadowAlphaTexName = params.FindTexture("shadowalpha");
    if (shadowAlphaTexName != "") {
        if (floatTextures->find(shadowAlphaTexName) != floatTextures->end())
            shadowAlphaTex = (*floatTextures)[shadowAlphaTexName];
        else
            Error(
                "Couldn't find float texture \"%s\" for \"shadowalpha\" "
                "parameter",
                shadowAlphaTexName.c_str());
    } else if (params.FindOneFloat("shadowalpha", 1.f) == 0.f)
        shadowAlphaTex.reset(new ConstantTexture<Float>(0.f));

    return CreateTriangleMesh(o2w, w2o, reverseOrientation, nvi / 3, vi, npi, P,
                              S, N, uvs, alphaTex, shadowAlphaTex);
}
コード例 #4
0
void CTracker::Update(vector<Point2f>& detections)
{
	// If there is no tracks yet, then every point begins its own track.
	if (tracks.size() == 0)
	{
		// If no tracks yet
		for (int i = 0; i < detections.size(); ++i)
		{
			CTrack* tr = new CTrack(detections[i], dt, Accel_noise_mag);
			tracks.push_back(tr);
		}	
	}

	int N = tracks.size();
	int M = detections.size();
	vector<vector<float>> Cost(N, vector<float>(M));
	vector<int> assignment;

	float dist;
	for (int i = 0; i < tracks.size(); ++i)
	{	
		// Point2f prediction=tracks[i]->prediction;
		// console_log(to_string(prediction.x) + to_string(prediction.y));
		for (int j = 0; j < detections.size(); ++j)
		{
			Point2f diff = (tracks[i]->prediction - detections[j]);
			dist = sqrtf(diff.x * diff.x + diff.y * diff.y);
			Cost[i][j] = dist;
		}
	}

	// Solving assignment problem (tracks and predictions of Kalman filter)
	AssignmentProblemSolver APS;
	APS.Solve(Cost, assignment, AssignmentProblemSolver::optimal);

	// clean assignment from pairs with large distance
	// Not assigned tracks
	vector<int> not_assigned_tracks;

	for (int i = 0; i < assignment.size(); ++i)
	{
		if (assignment[i] != -1)
		{
			if (Cost[i][assignment[i]] > dist_thres)
			{
				assignment[i] = -1;
				// Mark unassigned tracks, and increment skipped frames counter,
				// when skipped frames counter will be larger than threshold, track will be deleted.
				not_assigned_tracks.push_back(i);
			}
		}
		else
		{			
			// If track have no assigned detect, then increment skipped frames counter.
			tracks[i]->skipped_frames++;
		}

	}

	// If track didn't get detects long time, remove it.
	for (int i = 0; i < tracks.size(); ++i)
	{
		if (tracks[i]->skipped_frames > maximum_allowed_skipped_frames)
		{
			delete tracks[i];
			tracks.erase(tracks.begin() + i);
			assignment.erase(assignment.begin() + i);
			--i;
		}
	}

	// Search for unassigned detects
	vector<int> not_assigned_detections;
	vector<int>::iterator it;
	for (int i = 0; i < detections.size(); ++i)
	{
		it = find(assignment.begin(), assignment.end(), i);
		if (it == assignment.end())
		{
			not_assigned_detections.push_back(i);
		}
	}

	// and start new tracks for them.
	if (not_assigned_detections.size() != 0)
	{
		for (int i = 0; i < not_assigned_detections.size(); ++i)
		{
			CTrack* tr = new CTrack(detections[not_assigned_detections[i]], dt,Accel_noise_mag);
			tracks.push_back(tr);
		}	
	}

	// Update Kalman Filters state
	for (int i = 0; i < assignment.size(); ++i)
	{
		// If track updated less than one time, than filter state is not correct.
		tracks[i]->KF->GetPrediction();

		if (assignment[i] != -1) // If we have assigned detect, then update using its coordinates,
		{
			tracks[i]->skipped_frames = 0;
			tracks[i]->prediction = tracks[i]->KF->Update(detections[assignment[i]], 1);
			tracks[i]->raw = detections[assignment[i]];
		}
		else				  // if not continue using predictions
		{
			tracks[i]->prediction = tracks[i]->KF->Update(Point2f(0, 0), 0);
			tracks[i]->raw = Point2f(0, 0);
		}
		
		if(tracks[i]->trace.size() > max_trace_length)
		{
			tracks[i]->trace.erase(tracks[i]->trace.begin(), tracks[i]->trace.end() - max_trace_length);
		}

		tracks[i]->trace.push_back(tracks[i]->prediction);
		tracks[i]->KF->LastResult = tracks[i]->prediction;
	}

}
コード例 #5
0
  void PlaneEstimator :: estimate(RGBDImage& image, Mat1b& plane_points)
  {

      // Passing from 3D to the optimizer

      const cv::Mat3f& normal_image = image.normal();
      const cv::Mat1f& distance_image = image.depth();
      cv::Mat1b& mask_image = image.depthMaskRef();
      cv::Mat1b objfilter;
      mask_image.copyTo(objfilter);
      plane_points = image.normal().clone();
      plane_points = 0;

    if (!image.normal().data)
    {
      ntk_dbg(0) << "WARNING: you must active the normal filter to get plane estimation!";
      return;
    }

    double min[dim];
    double max[dim];
    int i;

    for (i=0;i<dim;i++)
    {
      max[i] = 1000.0;
      min[i] = -1000.0;
    }
    m_solver.Setup(min,max,DifferentialEvolutionSolver::stBest1Exp,0.8,0.75);


   // Early estimation of plane points projecting the normal values

    for (int r = 1; r < plane_points.rows-1; ++r)
    for (int c = 1; c < plane_points.cols-1; ++c)
    {
      if (objfilter.data && objfilter(r,c))
      {
        cv::Vec3f normal = normal_image(r,c);
        double prod = normal.dot(m_ref_plane);
        if (prod > 0.95)
          plane_points(r,c) = 255;
        else
          plane_points(r,c) = 0;
      }
    }

    // cleaning of the surface very first estimation
    dilate(plane_points,plane_points,cv::Mat());
    erode(plane_points,plane_points,cv::Mat());
    //imwrite("plane-initial.png",plane_points);

    std:vector<Point3f>& g = m_solver.planePointsRef();

    g.clear();
    
    for (int r = 1; r < plane_points.rows-1; ++r)
    for (int c = 1; c < plane_points.cols-1; ++c)
    {
      if (plane_points(r,c))
      {
        // possible table member!
        Point3f p3d = image.calibration()->depth_pose->unprojectFromImage(Point2f(c,r), distance_image(r,c));
        g.push_back(p3d);
      }
    }
    
    // Calculating...
    m_solver.Solve(max_generations);

    double *solution = m_solver.Solution();

    // Plane normalizer
    float suma = solution[0] + solution[1] + solution[2] + solution[3] ;
    for (int i = 0; i < 4; i++)
      solution[i] = solution[i]/ suma;

    ntk::Plane plano (solution[0], solution[1], solution[2], solution[3]);
    //Update RGBD object
    m_plane.set(solution[0], solution[1], solution[2], solution[3]);


    // Final estimation of plane points projecting the normal values

     cv::Vec3f diffevolnormal(solution[0], solution[1], solution[2]);

     for (int r = 1; r < plane_points.rows-1; ++r)
     for (int c = 1; c < plane_points.cols-1; ++c)
     {
       if (objfilter.data && objfilter(r,c))
       {
         cv::Vec3f normal = normal_image(r,c);
         double prod = normal.dot(diffevolnormal);

         if (prod > 0.5)
           plane_points(r,c) = 255;
         else
           plane_points(r,c) = 0;
       }
     }

     // Cleaning of the DE plane-pixels solution
     dilate(plane_points,plane_points,cv::Mat());
     erode(plane_points,plane_points,cv::Mat());
    // imwrite("plane-DE.png",plane_points);


  }
コード例 #6
0
ファイル: shapedescr.cpp プロジェクト: 007Indian/opencv
cv::RotatedRect cv::fitEllipse( InputArray _points )
{
    Mat points = _points.getMat();
    int i, n = points.checkVector(2);
    int depth = points.depth();
    CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));

    RotatedRect box;

    if( n < 5 )
        CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );

    // New fitellipse algorithm, contributed by Dr. Daniel Weiss
    Point2f c(0,0);
    double gfp[5], rp[5], t;
    const double min_eps = 1e-8;
    bool is_float = depth == CV_32F;
    const Point* ptsi = points.ptr<Point>();
    const Point2f* ptsf = points.ptr<Point2f>();

    AutoBuffer<double> _Ad(n*5), _bd(n);
    double *Ad = _Ad, *bd = _bd;

    // first fit for parameters A - E
    Mat A( n, 5, CV_64F, Ad );
    Mat b( n, 1, CV_64F, bd );
    Mat x( 5, 1, CV_64F, gfp );

    for( i = 0; i < n; i++ )
    {
        Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        c += p;
    }
    c.x /= n;
    c.y /= n;

    for( i = 0; i < n; i++ )
    {
        Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        p -= c;

        bd[i] = 10000.0; // 1.0?
        Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
        Ad[i*5 + 1] = -(double)p.y * p.y;
        Ad[i*5 + 2] = -(double)p.x * p.y;
        Ad[i*5 + 3] = p.x;
        Ad[i*5 + 4] = p.y;
    }

    solve(A, b, x, DECOMP_SVD);

    // now use general-form parameters A - E to find the ellipse center:
    // differentiate general form wrt x/y to get two equations for cx and cy
    A = Mat( 2, 2, CV_64F, Ad );
    b = Mat( 2, 1, CV_64F, bd );
    x = Mat( 2, 1, CV_64F, rp );
    Ad[0] = 2 * gfp[0];
    Ad[1] = Ad[2] = gfp[2];
    Ad[3] = 2 * gfp[1];
    bd[0] = gfp[3];
    bd[1] = gfp[4];
    solve( A, b, x, DECOMP_SVD );

    // re-fit for parameters A - C with those center coordinates
    A = Mat( n, 3, CV_64F, Ad );
    b = Mat( n, 1, CV_64F, bd );
    x = Mat( 3, 1, CV_64F, gfp );
    for( i = 0; i < n; i++ )
    {
        Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        p -= c;
        bd[i] = 1.0;
        Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
        Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
        Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
    }
    solve(A, b, x, DECOMP_SVD);

    // store angle and radii
    rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage
    if( fabs(gfp[2]) > min_eps )
        t = gfp[2]/sin(-2.0 * rp[4]);
    else // ellipse is rotated by an integer multiple of pi/2
        t = gfp[1] - gfp[0];
    rp[2] = fabs(gfp[0] + gfp[1] - t);
    if( rp[2] > min_eps )
        rp[2] = std::sqrt(2.0 / rp[2]);
    rp[3] = fabs(gfp[0] + gfp[1] + t);
    if( rp[3] > min_eps )
        rp[3] = std::sqrt(2.0 / rp[3]);

    box.center.x = (float)rp[0] + c.x;
    box.center.y = (float)rp[1] + c.y;
    box.size.width = (float)(rp[2]*2);
    box.size.height = (float)(rp[3]*2);
    if( box.size.width > box.size.height )
    {
        float tmp;
        CV_SWAP( box.size.width, box.size.height, tmp );
        box.angle = (float)(90 + rp[4]*180/CV_PI);
    }
    if( box.angle < -180 )
        box.angle += 360;
    if( box.angle > 360 )
        box.angle -= 360;

    return box;
}
コード例 #7
0
ファイル: bdpt.cpp プロジェクト: Vertexwahn/pbrt-v3
void BDPTIntegrator::Render(const Scene &scene) {
    ProfilePhase p(Prof::IntegratorRender);
    // Compute _lightDistr_ for sampling lights proportional to power
    std::unique_ptr<Distribution1D> lightDistr =
        ComputeLightPowerDistribution(scene);

    // Partition the image into tiles
    Film *film = camera->film;
    const Bounds2i sampleBounds = film->GetSampleBounds();
    const Vector2i sampleExtent = sampleBounds.Diagonal();
    const int tileSize = 16;
    const int nXTiles = (sampleExtent.x + tileSize - 1) / tileSize;
    const int nYTiles = (sampleExtent.y + tileSize - 1) / tileSize;
    ProgressReporter reporter(nXTiles * nYTiles, "Rendering");

    // Allocate buffers for debug visualization
    const int bufferCount = (1 + maxDepth) * (6 + maxDepth) / 2;
    std::vector<std::unique_ptr<Film>> weightFilms(bufferCount);
    if (visualizeStrategies || visualizeWeights) {
        for (int depth = 0; depth <= maxDepth; ++depth) {
            for (int s = 0; s <= depth + 2; ++s) {
                int t = depth + 2 - s;
                if (t == 0 || (s == 1 && t == 1)) continue;

                char filename[32];
                snprintf(filename, sizeof(filename),
                         "bdpt_d%02i_s%02i_t%02i.exr", depth, s, t);

                weightFilms[BufferIndex(s, t)] = std::unique_ptr<Film>(new Film(
                    film->fullResolution,
                    Bounds2f(Point2f(0, 0), Point2f(1, 1)),
                    std::unique_ptr<Filter>(CreateBoxFilter(ParamSet())),
                    film->diagonal * 1000, filename, 1.f));
            }
        }
    }

    // Render and write the output image to disk
    if (scene.lights.size() > 0) {
        StatTimer timer(&renderingTime);
        ParallelFor([&](const Point2i tile) {
            // Render a single tile using BDPT
            MemoryArena arena;
            int seed = tile.y * nXTiles + tile.x;
            std::unique_ptr<Sampler> tileSampler = sampler->Clone(seed);
            int x0 = sampleBounds.pMin.x + tile.x * tileSize;
            int x1 = std::min(x0 + tileSize, sampleBounds.pMax.x);
            int y0 = sampleBounds.pMin.y + tile.y * tileSize;
            int y1 = std::min(y0 + tileSize, sampleBounds.pMax.y);
            Bounds2i tileBounds(Point2i(x0, y0), Point2i(x1, y1));
            std::unique_ptr<FilmTile> filmTile =
                camera->film->GetFilmTile(tileBounds);
            for (Point2i pPixel : tileBounds) {
                tileSampler->StartPixel(pPixel);
                do {
                    // Generate a single sample using BDPT
                    Point2f pFilm = (Point2f)pPixel + tileSampler->Get2D();

                    // Trace the camera and light subpaths
                    Vertex *cameraVertices = arena.Alloc<Vertex>(maxDepth + 2);
                    Vertex *lightVertices = arena.Alloc<Vertex>(maxDepth + 1);
                    int nCamera = GenerateCameraSubpath(
                        scene, *tileSampler, arena, maxDepth + 2, *camera,
                        pFilm, cameraVertices);
                    int nLight = GenerateLightSubpath(
                        scene, *tileSampler, arena, maxDepth + 1,
                        cameraVertices[0].time(), *lightDistr, lightVertices);

                    // Execute all BDPT connection strategies
                    Spectrum L(0.f);
                    for (int t = 1; t <= nCamera; ++t) {
                        for (int s = 0; s <= nLight; ++s) {
                            int depth = t + s - 2;
                            if ((s == 1 && t == 1) || depth < 0 ||
                                depth > maxDepth)
                                continue;
                            // Execute the $(s, t)$ connection strategy and
                            // update _L_
                            Point2f pFilmNew = pFilm;
                            Float misWeight = 0.f;
                            Spectrum Lpath = ConnectBDPT(
                                scene, lightVertices, cameraVertices, s, t,
                                *lightDistr, *camera, *tileSampler, &pFilmNew,
                                &misWeight);
                            if (visualizeStrategies || visualizeWeights) {
                                Spectrum value;
                                if (visualizeStrategies)
                                    value =
                                        misWeight == 0 ? 0 : Lpath / misWeight;
                                if (visualizeWeights) value = Lpath;
                                weightFilms[BufferIndex(s, t)]->AddSplat(
                                    pFilmNew, value);
                            }
                            if (t != 1)
                                L += Lpath;
                            else
                                film->AddSplat(pFilmNew, Lpath);
                        }
                    }
                    filmTile->AddSample(pFilm, L);
                    arena.Reset();
                } while (tileSampler->StartNextSample());
            }
            film->MergeFilmTile(std::move(filmTile));
            reporter.Update();
        }, Point2i(nXTiles, nYTiles));
        reporter.Done();
    }
    film->WriteImage(1.0f / sampler->samplesPerPixel);

    // Write buffers for debug visualization
    if (visualizeStrategies || visualizeWeights) {
        const Float invSampleCount = 1.0f / sampler->samplesPerPixel;
        for (size_t i = 0; i < weightFilms.size(); ++i)
            if (weightFilms[i]) weightFilms[i]->WriteImage(invSampleCount);
    }
}
コード例 #8
0
ファイル: shape.cpp プロジェクト: TuringKi/legit
void ModalityConvex::update(Image& image, PatchSet* patchSet, Rect bounds)
{

  Ptr<PatchSet> patches = reliablePatchesFilter.empty() ? patchSet : patchSet->filter(*reliablePatchesFilter);

  if (patches->size() < 3)
    {
      //flush();
      return;
    }

  Mat temp = image.get_float_mask();

  temp.setTo(0);

  if (history.empty())
    {
      history.create(image.height(), image.width(), CV_32F);
      history.setTo(0);
    }

  Point2f * points = new Point2f[patches->size()];
  Point2f * hull = NULL;

  Point2f offset = Point2f(image.width()/2, image.height() /2) - patchSet->mean_position();

  Point2f mean(0, 0);

  for (int i = 0; i < patches->size(); i++)
    {
      points[i] = patches->get_position(i) + offset;
    }

  int size = convex_hull(points, patches->size(), &hull);

  for (int i = 0; i < size; i++)
    {
      mean.x += hull[i].x;
      mean.y += hull[i].y;
    }

  mean.x /= size;
  mean.y /= size;

  Point * hulli = new Point[size];
  Point * hullei = new Point[size];

  for (int i = 0; i < size; i++)
    {
      hulli[i].x = (int) hull[i].x;
      hulli[i].y = (int) hull[i].y;
    }

  expand(hull, size, mean, margin);

  for (int i = 0; i < size; i++)
    {
      hullei[i].x = (int) hull[i].x;
      hullei[i].y = (int) hull[i].y;
    }

  fillConvexPoly(temp, hullei, size, Scalar(1 - margin_diminish));
  fillConvexPoly(temp, hulli, size, Scalar(1));

  delete [] points;
  free(hull);
  delete [] hulli;
  delete [] hullei;

  history = history * persistence + temp * (1.0f - persistence);

  debugCanvas->draw(history);
  debugCanvas->push();

}
コード例 #9
0
ファイル: Point2f.cpp プロジェクト: jonathanhook/PhysicsSynth
	Point2f::Point2f(void)
	{
		Point2f(0.0f, 0.0f);
	}
コード例 #10
0
ファイル: curve.cpp プロジェクト: AndreaLoforte/pbrt-v3
bool Curve::recursiveIntersect(const Ray &ray, Float *tHit,
                               SurfaceInteraction *isect, const Point3f cp[4],
                               const Transform &rayToObject, Float u0, Float u1,
                               int depth) const {
    // Try to cull curve segment versus ray

    // Compute bounding box of curve segment, _curveBounds_
    Bounds3f curveBounds =
        Union(Bounds3f(cp[0], cp[1]), Bounds3f(cp[2], cp[3]));
    Float maxWidth = std::max(Lerp(u0, common->width[0], common->width[1]),
                              Lerp(u1, common->width[0], common->width[1]));
    curveBounds = Expand(curveBounds, 0.5 * maxWidth);

    // Compute bounding box of ray, _rayBounds_
    Float rayLength = ray.d.Length();
    Float zMax = rayLength * ray.tMax;
    Bounds3f rayBounds(Point3f(0, 0, 0), Point3f(0, 0, zMax));
    if (Overlaps(curveBounds, rayBounds) == false) return false;
    if (depth > 0) {
        // Split curve segment into sub-segments and test for intersection
        Float uMid = 0.5f * (u0 + u1);
        Point3f cpSplit[7];
        SubdivideBezier(cp, cpSplit);
        return (recursiveIntersect(ray, tHit, isect, &cpSplit[0], rayToObject,
                                   u0, uMid, depth - 1) ||
                recursiveIntersect(ray, tHit, isect, &cpSplit[3], rayToObject,
                                   uMid, u1, depth - 1));
    } else {
        // Intersect ray with curve segment

        // Test ray against segment endpoint boundaries

        // Test sample point against tangent perpendicular at curve start
        Float edge =
            (cp[1].y - cp[0].y) * -cp[0].y + cp[0].x * (cp[0].x - cp[1].x);
        if (edge < 0) return false;

        // Test sample point against tangent perpendicular at curve end
        edge = (cp[2].y - cp[3].y) * -cp[3].y + cp[3].x * (cp[3].x - cp[2].x);
        if (edge < 0) return false;

        // Compute line $w$ that gives minimum distance to sample point
        Vector2f segmentDirection = Point2f(cp[3]) - Point2f(cp[0]);
        Float denom = segmentDirection.LengthSquared();
        if (denom == 0) return false;
        Float w = Dot(-Vector2f(cp[0]), segmentDirection) / denom;

        // Compute $u$ coordinate of curve intersection point and _hitWidth_
        Float u = Clamp(Lerp(w, u0, u1), u0, u1);
        Float hitWidth = Lerp(u, common->width[0], common->width[1]);
        Normal3f nHit;
        if (common->type == CurveType::Ribbon) {
            // Scale _hitWidth_ based on ribbon orientation
            Float sin0 = std::sin((1 - u) * common->normalAngle) *
                         common->invSinNormalAngle;
            Float sin1 =
                std::sin(u * common->normalAngle) * common->invSinNormalAngle;
            nHit = sin0 * common->n[0] + sin1 * common->n[1];
            hitWidth *= AbsDot(nHit, ray.d) / rayLength;
        }

        // Test intersection point against curve width
        Vector3f dpcdw;
        Point3f pc = EvalBezier(cp, Clamp(w, 0, 1), &dpcdw);
        Float ptCurveDist2 = pc.x * pc.x + pc.y * pc.y;
        if (ptCurveDist2 > hitWidth * hitWidth * .25) return false;
        if (pc.z < 0 || pc.z > zMax) return false;

        // Compute $v$ coordinate of curve intersection point
        Float ptCurveDist = std::sqrt(ptCurveDist2);
        Float edgeFunc = dpcdw.x * -pc.y + pc.x * dpcdw.y;
        Float v = (edgeFunc > 0) ? 0.5f + ptCurveDist / hitWidth
                                 : 0.5f - ptCurveDist / hitWidth;

        // Compute hit _t_ and partial derivatives for curve intersection
        if (tHit != nullptr) {
            // FIXME: this tHit isn't quite right for ribbons...
            *tHit = pc.z / rayLength;
            // Compute error bounds for curve intersection
            Vector3f pError(2 * hitWidth, 2 * hitWidth, 2 * hitWidth);

            // Compute $\dpdu$ and $\dpdv$ for curve intersection
            Vector3f dpdu, dpdv;
            EvalBezier(common->cpObj, u, &dpdu);
            if (common->type == CurveType::Ribbon)
                dpdv = Normalize(Cross(nHit, dpdu)) * hitWidth;
            else {
                // Compute curve $\dpdv$ for flat and cylinder curves
                Vector3f dpduPlane = (Inverse(rayToObject))(dpdu);
                Vector3f dpdvPlane =
                    Normalize(Vector3f(-dpduPlane.y, dpduPlane.x, 0)) *
                    hitWidth;
                if (common->type == CurveType::Cylinder) {
                    // Rotate _dpdvPlane_ to give cylindrical appearance
                    Float theta = Lerp(v, -90., 90.);
                    Transform rot = Rotate(-theta, dpduPlane);
                    dpdvPlane = rot(dpdvPlane);
                }
                dpdv = rayToObject(dpdvPlane);
            }
            *isect = (*ObjectToWorld)(SurfaceInteraction(
                ray(pc.z), pError, Point2f(u, v), -ray.d, dpdu, dpdv,
                Normal3f(0, 0, 0), Normal3f(0, 0, 0), ray.time, this));
        }
        ++nHits;
        return true;
    }
}
コード例 #11
0
ファイル: myRadar.cpp プロジェクト: yingjh/IARC_13
// display function should be good enough
void OpenRadar::DrawRadarData()
{
	int usualColor[15] = {16777215,255,128,65280,32768,
		      16711680,16711935,8421376,65535,32896 }; /*<usual color*/
	CvPoint pt1, pt2;

	cvZero(RadarImage);
	cvCircle(RadarImage, cvPoint(DisplayDx,DisplayDy),3, CV_RGB(0,255,255), -1, 8,0);
	int x,y;
	unsigned char * pPixel = 0;
	int colorIndex = 0, colorRGB;
	int R = 255, G = 0, B = 0;
    
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] < 0)
		{
			
			//change color
			colorRGB = usualColor[colorIndex];
			R = colorRGB/65536;
			G = (colorRGB%65536)/256;
			B = colorRGB%256;
			colorIndex = (colorIndex + 1)%10;
			
		}
		else 
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
	
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				pPixel = (unsigned char*)RadarImage->imageData + y*RadarImage->widthStep + 3*x;
				pPixel[0] = B;
				pPixel[1] = G;
				pPixel[2] = R;
			}
		}     
	}
	
	pt1.x = DisplayDx; pt1.y = DisplayDy;
	pt2.x = DisplayDx+line_length*v_scale*sin(v_angle + 0.5*M_PI); 
	pt2.y = DisplayDy+line_length*v_scale*cos(v_angle + 0.5*M_PI);
	cvLine(RadarImage, pt1, pt2, CV_RGB(255,255,255),2,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 	
	pt2.y = DisplayDy+line_length*sin(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	pt2.y = DisplayDy+line_length*sin(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	//pt2.x = DisplayDx+line_length*cos(0.25*M_PI); 
	//pt2.y = DisplayDy+line_length*sin(0.25*M_PI);
	//cout<< line_length <<endl; 
	//cout<< pt1.x <<" , " << pt1.y <<endl;
	//cout<< pt2.x <<" , " << pt2.y <<endl;
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	float angle;
	int line_length2;
	for (int i=0; i<polarH_length;i++)
	{
		angle = (-30+i*polarH_resolution)*M_PI/180;
		line_length2 = H[i]/10;
		pt2.x = DisplayDx+line_length2*sin(angle); 
		pt2.y = DisplayDy+line_length2*cos(angle);
		cvCircle(RadarImage, pt2, 2, CV_RGB(255,255,255),1,8,0);
	}

	////////////////////////////////////////////////////////////////////////////////////
	// mine
	////////////////////////////////////////////////////////////////////////////////////
	Mat binImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC1);
	vector< Point> centerRaw;
	centerRaw.clear();
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] > 200)
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
			//centerRaw.push_back(Point(x,y));
			//cout<<"P:" <<centerRaw[i].x<<","<<centerRaw[i].y<<endl;
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				 circle( binImg,Point(x,y),1,Scalar(255),-1);
			}
		}     
	}
	imshow("binImg",binImg);
	Mat element = getStructuringElement(MORPH_RECT, Size(1,2));
	Mat element2 = getStructuringElement(MORPH_RECT, Size(10,10));
	erode(binImg, binImg, element);
	morphologyEx(binImg, binImg, MORPH_OPEN, element);
	dilate(binImg, binImg, element2);
	morphologyEx(binImg, binImg, MORPH_CLOSE, element2);
	imshow("dilate",binImg);

	vector< vector<Point> > contours;	
	vector< vector<Point> > filterContours;	
	vector< Vec4i > hierarchy;	
	vector< Point2f> center;
	vector< float > radius;
	vector<Point2f> realPoint;
	

	findContours(binImg, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	center.resize(contours.size());
	radius.resize(contours.size());
	//realPoint.resize(contours.size());
	for(int i = 0; i< contours.size(); i++)
	{
		minEnclosingCircle(Mat(contours[i]),center[i],radius[i]);//对轮廓进行多变形逼近
		circle(binImg,center[i],650/DisplayRatio,Scalar(255),1); 
		//cout<<"No."<<i<<" | P: "<< center[i].x<<","<<center[i].y<<endl;
		float realX = (center[i].x - DisplayDx) * DisplayRatio;
		float realY = (center[i].y - DisplayDy) * DisplayRatio;

		realPoint.push_back(Point2f(realX,realY));
		//cout<<"No."<<i<<" | P: "<< realPoint[i].x<<","<<realPoint[i].y<<endl;
	}
	imshow("findContours",binImg);
	// colar map
	Mat mapImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC3);
	circle(mapImg, Point(DisplayDx,DisplayDy),3, CV_RGB(255,255,255),-1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx+40,DisplayDy), Scalar(0,0,255),1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx,DisplayDy+40), Scalar(0,255,0),1);
	for(int i = 0; i< center.size(); i++)
	{
		circle(mapImg,center[i],650/DisplayRatio,Scalar(255,255,0),1,CV_AA); 
		circle(mapImg,center[i],100/DisplayRatio,Scalar(0,255,255),-1); 
	}
	imshow("Map",mapImg);
	////////////////////////////////////
	ukftest::laserPoint msg;
	vector <float> xvec;
	vector <float> yvec;
	for(int i = 0 ; i < realPoint.size(); i++)
	{
		// cm
		xvec.push_back(realPoint[i].x/10.0f);
		yvec.push_back(realPoint[i].y/10.0f);
	}

	// msg
	msg.header.stamp = ros::Time::now();
	msg.header.frame_id = "hokuyo_laser";
	msg.x =xvec;
	msg.y =yvec;
	if(realPoint.size() >0) msg.isBlocking = 1;
	else msg.isBlocking = 0;
	pub_xy. publish(msg);
	
}
コード例 #12
0
void Instructions_State::render(){
	Widget_Gamestate::render();

	get_Video().set_2d(make_pair(Point2f(0.0f, 0.0f), Point2f(1920.0f, 1200.0f)), true);
	if(state == 0){
		render_image("Control_1", Point2f(0.0f,0.0f), Point2f(2048.0f,2048.0f));
	}
	if(state == 1){
		render_image("Control_2", Point2f(0.0f,0.0f), Point2f(2048.0f,2048.0f));
	}
	if(state == 2){
		render_image("Objective", Point2f(0.0f,0.0f), Point2f(2048.0f,2048.0f));
	}
	if(state == 3){
		render_image("World", Point2f(0.0f,0.0f), Point2f(2048.0f,2048.0f));
	}
	if(state == 4){
		render_image("HUD", Point2f(0.0f,0.0f), Point2f(2048.0f,2048.0f));
	}
	if(state == 5){
		render_image("Instructions", Point2f(0.0f,0.0f), Point2f(2048.0f,2048.0f));
	}
	if(state == 6){
		render_image("StillConfused", Point2f(0.0f,0.0f), Point2f(2048.0f,2048.0f));
	}
  
	render_controls(0);
}
コード例 #13
0
ファイル: texture.cpp プロジェクト: Drooids/pbrt-v3
Point2f SphericalMapping2D::sphere(const Point3f &p) const {
    Vector3f vec = Normalize(WorldToTexture(p) - Point3f(0, 0, 0));
    Float theta = SphericalTheta(vec), phi = SphericalPhi(vec);
    return Point2f(theta * InvPi, phi * Inv2Pi);
}
コード例 #14
0
void *affine_loop(void *number)
{

/*
	if(set_single_core_affinity()!=EXIT_SUCCESS)
	{
		perror("Core Affinity");
	}
*/

	int block_size;
	int max_files;
	int i=0;
	int section=0;
	float pos[8]={0,0,1,0,0,1,1,1};

	Point2f srcTri[4];
	Point2f dstTri[4];

	max_files=3601;
	block_size=max_files/4;

	Mat rot_mat( 2, 3, CV_32FC1 );
	Mat warp_mat( 2, 3, CV_32FC1 );


	// Output variables
	Mat src, warp_dst, warp_rotate_dst;

	struct thread_limits *ptr_obj = (struct thread_limits *) number;
	int start_loop = ptr_obj->start_no;
	int stop_loop  = ptr_obj->stop_no;

	/*------------------------- Starting the loop --------------------------*/

	for (i=start_loop; i<=stop_loop; i++)
	{

		/*------------------------- Loading the Image --------------------------*/

		if(option==1)
		{
			// Select the right frame
			sprintf(frame_name2,"Sobel_frame_no_%05u.ppm",i);
			// Load the Image
			src = imread( frame_name2, 1 );
		}

		else
		{
			sprintf(frame_name,"Frame_no_%05u.ppm",i);
			src = imread( frame_name, 1 );
		}

		/*---------------------- Affine Transform : Warp -----------------------*/

		// Setting up the output image parameters

		warp_dst = Mat::zeros( src.rows, src.cols, src.type() );


		/*---------------------- Change the parameter values ----------------------*/

	
	
		switch(section)
		{

			case 0:
			{

				pos[1]=pos[1]+0.001;
				pos[2]=pos[2]-0.001;
				pos[4]=pos[4]+0.001;
				pos[7]=pos[7]-0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );
			
				section=i/block_size;

				//printf("Case 0: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}

			case 1:
			{

				pos[0]=pos[0]+0.001;
				pos[3]=pos[3]+0.001;
				pos[5]=pos[5]-0.001;
				pos[6]=pos[6]-0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );
			
				section=i/block_size;

				//printf("Case 1: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}
		
			case 2:
			{
			
				pos[1]=pos[1]-0.001;
				pos[2]=pos[2]+0.001;
				pos[4]=pos[4]-0.001;
				pos[7]=pos[7]+0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );
			
				section=i/block_size;

				//printf("Case 2: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}
		

			case 3:
			{

				pos[0]=pos[0]-0.001;
				pos[3]=pos[3]-0.001;
				pos[5]=pos[5]+0.001;
				pos[6]=pos[6]+0.001;
		
			
				// Setting parameters for matrix computation

				srcTri[0] = Point2f( 0,0 );
				srcTri[1] = Point2f( src.cols - 1, 0 );
				srcTri[2] = Point2f( 0, src.rows - 1 );
				srcTri[3] = Point2f( src.cols - 1, src.rows - 1 );

				dstTri[0] = Point2f( src.cols*pos[0], src.rows*pos[1] );
				dstTri[1] = Point2f( src.cols*pos[2], src.rows*pos[3] );
				dstTri[2] = Point2f( src.cols*pos[4], src.rows*pos[5] );
				dstTri[3] = Point2f( src.cols*pos[6], src.rows*pos[7] );

			
				section=i/block_size;

				//printf("Case 3: %u\t %f %f %f %f %f %f %f %f\n",i,pos[0],pos[1],pos[2],pos[3],pos[4],pos[5],pos[6],pos[7]);

				break;
			}

			default:
			{
				//printf("Value: %d\n",section);
				//perror("Default switch() case");
				break;
			}
		}
		



		// Calculate the Affine Transform matrix

		warp_mat = getAffineTransform( srcTri, dstTri );


		// Applying the Affine Transform to the src image

		warpAffine( src, warp_dst, warp_mat, warp_dst.size() );



		/*-------------------- Affine Transform : Rotate -----------------------*/

		// Compute the Rotation Matrix Parameters

		Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
		double angle = ROTATION_ANGLE;
		double scale = ISOTROPIC_SCALE_FACTOR;

		// Generate the Rotation Matrix

		rot_mat = getRotationMatrix2D( center, angle, scale );

		// Rotate the Image

		warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );


		/*------------------------- Storing the Image ---------------------------*/


		sprintf(frame_name3,"Affine_frame_no_%05u.ppm",i);

		// Storing the Image

		imwrite(frame_name3, warp_dst);

	}
	// End of 'for' loop

	return NULL;
}
コード例 #15
0
ファイル: paraboloid.cpp プロジェクト: acrlakshman/pbrt-v3
bool Paraboloid::Intersect(const Ray &r, Float *tHit,
                           SurfaceInteraction *isect) const {
    Float phi;
    Point3f pHit;
    // Transform _Ray_ to object space
    Vector3f oErr, dErr;
    Ray ray = (*WorldToObject)(r, &oErr, &dErr);

    // Compute quadratic paraboloid coefficients

    // Initialize _EFloat_ ray coordinate values
    EFloat ox(ray.o.x, oErr.x), oy(ray.o.y, oErr.y), oz(ray.o.z, oErr.z);
    EFloat dx(ray.d.x, dErr.x), dy(ray.d.y, dErr.y), dz(ray.d.z, dErr.z);
    EFloat k = EFloat(zMax) / (EFloat(radius) * EFloat(radius));
    EFloat a = k * (dx * dx + dy * dy);
    EFloat b = 2.f * k * (dx * ox + dy * oy) - dz;
    EFloat c = k * (ox * ox + oy * oy) - oz;

    // Solve quadratic equation for _t_ values
    EFloat t0, t1;
    if (!Quadratic(a, b, c, &t0, &t1)) return false;

    // Check quadric shape _t0_ and _t1_ for nearest intersection
    if (t0.UpperBound() > ray.tMax || t1.LowerBound() <= 0) return false;
    EFloat tShapeHit = t0;
    if (t0.LowerBound() <= 0) {
        tShapeHit = t1;
        if (tShapeHit.UpperBound() > ray.tMax) return false;
    }

    // Compute paraboloid inverse mapping
    pHit = ray((Float)tShapeHit);
    phi = std::atan2(pHit.y, pHit.x);
    if (phi < 0.) phi += 2 * Pi;

    // Test paraboloid intersection against clipping parameters
    if (pHit.z < zMin || pHit.z > zMax || phi > phiMax) {
        if (tShapeHit == t1) return false;
        tShapeHit = t1;
        if (t1.UpperBound() > ray.tMax) return false;
        // Compute paraboloid inverse mapping
        pHit = ray((Float)tShapeHit);
        phi = std::atan2(pHit.y, pHit.x);
        if (phi < 0.) phi += 2 * Pi;
        if (pHit.z < zMin || pHit.z > zMax || phi > phiMax) return false;
    }

    // Find parametric representation of paraboloid hit
    Float u = phi / phiMax;
    Float v = (pHit.z - zMin) / (zMax - zMin);

    // Compute paraboloid $\dpdu$ and $\dpdv$
    Vector3f dpdu(-phiMax * pHit.y, phiMax * pHit.x, 0.);
    Vector3f dpdv = (zMax - zMin) *
                    Vector3f(pHit.x / (2 * pHit.z), pHit.y / (2 * pHit.z), 1.);

    // Compute paraboloid $\dndu$ and $\dndv$
    Vector3f d2Pduu = -phiMax * phiMax * Vector3f(pHit.x, pHit.y, 0);
    Vector3f d2Pduv =
        (zMax - zMin) * phiMax *
        Vector3f(-pHit.y / (2 * pHit.z), pHit.x / (2 * pHit.z), 0);
    Vector3f d2Pdvv = -(zMax - zMin) * (zMax - zMin) *
                      Vector3f(pHit.x / (4 * pHit.z * pHit.z),
                               pHit.y / (4 * pHit.z * pHit.z), 0.);

    // Compute coefficients for fundamental forms
    Float E = Dot(dpdu, dpdu);
    Float F = Dot(dpdu, dpdv);
    Float G = Dot(dpdv, dpdv);
    Vector3f N = Normalize(Cross(dpdu, dpdv));
    Float e = Dot(N, d2Pduu);
    Float f = Dot(N, d2Pduv);
    Float g = Dot(N, d2Pdvv);

    // Compute $\dndu$ and $\dndv$ from fundamental form coefficients
    Float invEGF2 = 1 / (E * G - F * F);
    Normal3f dndu = Normal3f((f * F - e * G) * invEGF2 * dpdu +
                             (e * F - f * E) * invEGF2 * dpdv);
    Normal3f dndv = Normal3f((g * F - f * G) * invEGF2 * dpdu +
                             (f * F - g * E) * invEGF2 * dpdv);

    // Compute error bounds for paraboloid intersection

    // Compute error bounds for intersection computed with ray equation
    EFloat px = ox + tShapeHit * dx;
    EFloat py = oy + tShapeHit * dy;
    EFloat pz = oz + tShapeHit * dz;
    Vector3f pError = Vector3f(px.GetAbsoluteError(), py.GetAbsoluteError(),
                               pz.GetAbsoluteError());

    // Initialize _SurfaceInteraction_ from parametric information
    *isect = (*ObjectToWorld)(SurfaceInteraction(pHit, pError, Point2f(u, v),
                              -ray.d, dpdu, dpdv, dndu, dndv,
                              ray.time, this));
    *tHit = (Float)tShapeHit;
    return true;
}
コード例 #16
0
ファイル: board.cpp プロジェクト: Cyphe/Robert-Lotus
//Create the locations of each position on the board (the top left coordinate)
void Board::CreateLocationsTable()
{
	this->locations[0] = Point2f(118, 198);
	this->locations[1] = Point2f(118, 278);
	this->locations[2] = Point2f(164, 343);
	this->locations[3] = Point2f(360, 200);
	this->locations[4] = Point2f(360, 278);
	this->locations[5] = Point2f(314, 343);
	this->locations[6] = Point2f(239, 368);
	this->locations[7] = Point2f(239, 448);
	this->locations[8] = Point2f(117, 408);
	this->locations[9] = Point2f(41, 304);
	this->locations[10] = Point2f(41, 176);
	this->locations[11] = Point2f(117, 71);
	this->locations[12] = Point2f(239, 32);
	this->locations[13] = Point2f(359, 71);
	this->locations[14] = Point2f(437, 176);
	this->locations[15] = Point2f(437, 303);
	this->locations[16] = Point2f(361, 408);

	this->locations[17] = Point2f(300, 420); //finish zone
}
コード例 #17
0
ファイル: shapedescr.cpp プロジェクト: 007Indian/opencv
// see Welzl, Emo. Smallest enclosing disks (balls and ellipsoids). Springer Berlin Heidelberg, 1991.
void cv::minEnclosingCircle( InputArray _points, Point2f& _center, float& _radius )
{
    Mat points = _points.getMat();
    int count = points.checkVector(2);
    int depth = points.depth();
    Point2f center;
    float radius = 0.f;
    CV_Assert(count >= 0 && (depth == CV_32F || depth == CV_32S));

    _center.x = _center.y = 0.f;
    _radius = 0.f;

    if( count == 0 )
        return;

    bool is_float = depth == CV_32F;
    const Point* ptsi = points.ptr<Point>();
    const Point2f* ptsf = points.ptr<Point2f>();

    // point count <= 3
    if (count <= 3)
    {
        Point2f ptsf3[3];
        for (int i = 0; i < count; ++i)
        {
            ptsf3[i] = (is_float) ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
        }
        findEnclosingCircle3pts_orLess_32f(ptsf3, count, center, radius);
        _center = center;
        _radius = radius;
        return;
    }

    if (is_float)
    {
        findMinEnclosingCircle<Point2f>(ptsf, count, center, radius);
#if 0
        for (size_t m = 0; m < count; ++m)
        {
            float d = (float)norm(ptsf[m] - center);
            if (d > radius)
            {
                printf("error!\n");
            }
        }
#endif
    }
    else
    {
        findMinEnclosingCircle<Point>(ptsi, count, center, radius);
#if 0
        for (size_t m = 0; m < count; ++m)
        {
            double dx = ptsi[m].x - center.x;
            double dy = ptsi[m].y - center.y;
            double d = std::sqrt(dx * dx + dy * dy);
            if (d > radius)
            {
                printf("error!\n");
            }
        }
#endif
    }
    _center = center;
    _radius = radius;
}
コード例 #18
0
ファイル: board.cpp プロジェクト: Cyphe/Robert-Lotus
void Board::CreateStartLocationsTable()
{
	if (this->numplayers < 2 || this->numplayers  > 4)
		return;

	if (this->numplayers == 2)
	{
		this->slocations[0] = Point2f(215, 167);
		this->slocations[1] = Point2f(167, 215);
		this->slocations[2] = Point2f(167, 263);
		this->slocations[3] = Point2f(215, 311);

		this->slocations[4] = Point2f(263, 167);
		this->slocations[5] = Point2f(311, 215);
		this->slocations[6] = Point2f(311, 263);
		this->slocations[7] = Point2f(263, 311);

		this->slocations[8] = Point2f(0, 0);
		this->slocations[9] = Point2f(0, 0);
		this->slocations[10] = Point2f(0, 0);
		this->slocations[11] = Point2f(0, 0);
	}
	else if (this->numplayers == 3)
	{
		this->slocations[0] = Point2f(191, 191);
		this->slocations[1] = Point2f(191, 239);
		this->slocations[2] = Point2f(191, 287);

		this->slocations[3] = Point2f(239, 191);
		this->slocations[4] = Point2f(239, 239);
		this->slocations[5] = Point2f(239, 287);

		this->slocations[6] = Point2f(287, 191);
		this->slocations[7] = Point2f(287, 239);
		this->slocations[8] = Point2f(287, 287);

		this->slocations[9] = Point2f(0, 0);
		this->slocations[10] = Point2f(0, 0);
		this->slocations[11] = Point2f(0, 0);
	}
	else if (this->numplayers == 4)
	{
		this->slocations[0] = Point2f(215, 167);
		this->slocations[1] = Point2f(167, 215);
		this->slocations[2] = Point2f(167, 263);

		this->slocations[3] = Point2f(263, 167);
		this->slocations[4] = Point2f(311, 215);
		this->slocations[5] = Point2f(311, 263);

		this->slocations[6] = Point2f(215, 215);
		this->slocations[7] = Point2f(215, 263);
		this->slocations[8] = Point2f(215, 311);

		this->slocations[9] = Point2f(263, 215);
		this->slocations[10] = Point2f(263, 263);
		this->slocations[11] = Point2f(263, 311);
	}
}
コード例 #19
0
  std::vector<cv::Point2f> EdgeFinder::findEdgeCorners() {

    TextLineCollection tlc(pipeline_data->textLines);

    vector<Point> corners;

    // If the character segment is especially small, just expand the existing box
    // If it's a nice, long segment, then guess the correct box based on character height/position
    if (tlc.longerSegment.length > tlc.charHeight * 3)
    {

      float charHeightToPlateWidthRatio = pipeline_data->config->plateWidthMM / pipeline_data->config->avgCharHeightMM;
      float idealPixelWidth = tlc.charHeight *  (charHeightToPlateWidthRatio * 1.03);	// Add 3% so we don't clip any characters

      float charHeightToPlateHeightRatio = pipeline_data->config->plateHeightMM / pipeline_data->config->avgCharHeightMM;
      float idealPixelHeight = tlc.charHeight *  charHeightToPlateHeightRatio;


      float verticalOffset = (idealPixelHeight * 1.5 / 2);
      float horizontalOffset = (idealPixelWidth * 1.25 / 2);
      LineSegment topLine = tlc.centerHorizontalLine.getParallelLine(verticalOffset);
      LineSegment bottomLine = tlc.centerHorizontalLine.getParallelLine(-1 * verticalOffset);

      LineSegment leftLine = tlc.centerVerticalLine.getParallelLine(-1 * horizontalOffset);
      LineSegment rightLine = tlc.centerVerticalLine.getParallelLine(horizontalOffset);

      Point topLeft = topLine.intersection(leftLine);
      Point topRight = topLine.intersection(rightLine);
      Point botRight = bottomLine.intersection(rightLine);
      Point botLeft = bottomLine.intersection(leftLine);

      corners.push_back(topLeft);
      corners.push_back(topRight);
      corners.push_back(botRight);
      corners.push_back(botLeft);
    }
    else
    {

      int expandX = (int) ((float) pipeline_data->crop_gray.cols) * 0.15f;
      int expandY = (int) ((float) pipeline_data->crop_gray.rows) * 0.15f;
      int w = pipeline_data->crop_gray.cols;
      int h = pipeline_data->crop_gray.rows;

      corners.push_back(Point(-1 * expandX, -1 * expandY));
      corners.push_back(Point(expandX + w, -1 * expandY));
      corners.push_back(Point(expandX + w, expandY + h));
      corners.push_back(Point(-1 * expandX, expandY + h));

  //    for (int i = 0; i < 4; i++)
  //    {
  //      std::cout << "CORNER: " << corners[i].x << " - " << corners[i].y << std::endl;
  //    }
    }

    // Re-crop an image (from the original image) using the new coordinates
    Transformation imgTransform(pipeline_data->grayImg, pipeline_data->crop_gray, pipeline_data->regionOfInterest);
    vector<Point2f> remappedCorners = imgTransform.transformSmallPointsToBigImage(corners);

    Size cropSize = imgTransform.getCropSize(remappedCorners, 
            Size(pipeline_data->config->templateWidthPx, pipeline_data->config->templateHeightPx));

    Mat transmtx = imgTransform.getTransformationMatrix(remappedCorners, cropSize);
    Mat newCrop = imgTransform.crop(cropSize, transmtx);

    // Re-map the textline coordinates to the new crop  
    vector<TextLine> newLines;
    for (unsigned int i = 0; i < pipeline_data->textLines.size(); i++)
    {        
      vector<Point2f> textArea = imgTransform.transformSmallPointsToBigImage(pipeline_data->textLines[i].textArea);
      vector<Point2f> linePolygon = imgTransform.transformSmallPointsToBigImage(pipeline_data->textLines[i].linePolygon);

      vector<Point2f> textAreaRemapped;
      vector<Point2f> linePolygonRemapped;

      textAreaRemapped = imgTransform.remapSmallPointstoCrop(textArea, transmtx);
      linePolygonRemapped = imgTransform.remapSmallPointstoCrop(linePolygon, transmtx);

      newLines.push_back(TextLine(textAreaRemapped, linePolygonRemapped, newCrop.size()));
    }

    // Find the PlateLines for this crop
    PlateLines plateLines(pipeline_data);
    plateLines.processImage(newCrop, newLines, 1.05);

    // Get the best corners
    PlateCorners cornerFinder(newCrop, &plateLines, pipeline_data, newLines);
    vector<Point> smallPlateCorners = cornerFinder.findPlateCorners();


    // Transform the best corner points back to the original image
    std::vector<Point2f> imgArea;
    imgArea.push_back(Point2f(0, 0));
    imgArea.push_back(Point2f(newCrop.cols, 0));
    imgArea.push_back(Point2f(newCrop.cols, newCrop.rows));
    imgArea.push_back(Point2f(0, newCrop.rows));
    Mat newCropTransmtx = imgTransform.getTransformationMatrix(imgArea, remappedCorners);

    vector<Point2f> cornersInOriginalImg = imgTransform.remapSmallPointstoCrop(smallPlateCorners, newCropTransmtx);

    return cornersInOriginalImg;

  }
コード例 #20
0
ファイル: common.cpp プロジェクト: yca/VideoAI
#if 0
p->pyr->setDict("/home/amenmd/myfs/tasks/hilal_tez/work/ox_complete/oxbuild_images_512.dict");
QString path = "/home/amenmd/myfs/tasks/hilal_tez/dataset/oxford/oxbuild_images/";
QStringList lines = Common::importText("/home/amenmd/myfs/tasks/hilal_tez/dataset/oxford/gt_files_170407/bodleian_1_good.txt");
foreach (QString line, lines) {
	if (line.trimmed().isEmpty())
		continue;
	ffDebug() << "processing" << line;
	Mat spm = p->pyr->makeSpm(path + line + ".jpg", 0);
	Mat im = p->pyr->makeHistImage(spm);
	OpenCV::exportMatrixTxt(QString("/home/amenmd/myfs/temp/oxdata/absent/%1_desc.txt").arg(line), spm);
	OpenCV::saveImage(QString("/home/amenmd/myfs/temp/oxdata/absent/%1_desc.jpg").arg(line), im);
	QFile::copy(path + line + ".jpg", "/home/amenmd/myfs/temp/oxdata/absent/" + line + ".jpg");
}
#elif 0
QString line = "all_souls_000013";
Mat spm = p->pyr->makeSpm(path + line + ".jpg", 0);
Mat im = p->pyr->makeHistImage(spm);
OpenCV::exportMatrixTxt(QString("/home/amenmd/myfs/temp/oxdata/query/%1_desc.txt").arg(line), spm);
OpenCV::saveImage(QString("/home/amenmd/myfs/temp/oxdata/query/%1_desc.jpg").arg(line), im);
QFile::copy(path + line + ".jpg", "/home/amenmd/myfs/temp/oxdata/query/" + line + ".jpg");
QStringList vals = QString("X 136.5 34.1 648.5 955.7").split(" ");
cv::Rect r(Point2f(vals[1].toFloat(), vals[2].toFloat()), Point2f(vals[3].toFloat(), vals[4].toFloat()));
im = OpenCV::loadImage(path + line + ".jpg");
OpenCV::saveImage(QString("/home/amenmd/myfs/temp/oxdata/query/%1_roi.jpg").arg(line), Mat(im, r));
spm = p->pyr->makeSpm(Mat(im, r), 0);
im = p->pyr->makeHistImage(spm);
OpenCV::exportMatrixTxt(QString("/home/amenmd/myfs/temp/oxdata/query/%1_desc_roi.txt").arg(line), spm);
OpenCV::saveImage(QString("/home/amenmd/myfs/temp/oxdata/query/%1_desc_roi.jpg").arg(line), im);
#endif
コード例 #21
0
/// Sets the position at \p index to (\p x, \p y).
void DiagramCoordinates::setPosition(size_t index, float x, float y)
{
    setPosition(index, Point2f(x, y));
}
コード例 #22
0
Point2f Reprojector::compute_plane_size(float depth)
{
    float width = FOV_WIDTH * (depth / FOV_DEPTH);
    float height = width / 4 * 3;
	return Point2f(width, height);
}
コード例 #23
0
void sktinfoextractor::hacerUpdate(){
    int hhh=cam->numContextosIniciados;
    if(cam->numContextosIniciados>0){
        if(numFrames==0){
#ifdef _WIN32 || _WIN64
            GetSystemTime(&inicio);
#else
            gettimeofday(&inicio,NULL);
#endif
        }
        bool* update=new bool[cam->numContextosIniciados];

        //if(cam->updateCam(true,0,true,false)){
        int tid;

        Moments moms;
        int b;
        Point2f pt;
        if(cam->numContextosIniciados>1){
            omp_set_num_threads(2);
#pragma omp parallel private(tid,moms,b,pt) shared(update)
            {
                /// TODO : FIX FOR LESS AVAILABLE THREADS THAN CONTEXTS
                tid = omp_get_thread_num();
                //if(tid!=0){
                //  tid--;
                cameras* cc=cam;
                if(tid<cc->numContextosIniciados){
                    if(cc->updateCam(false,tid,true,false)){
                        cc->retriveDepthAndMask(tid,depths[tid],masks[tid]);
                        cc->retrivePointCloudFast(tid,&(depths[tid]),&(pCs[tid]));
                        if(cConfig[tid]->method=="Background"){
                            depths[tid].convertTo(depthsS[tid],CV_32FC1,1.0/6000);
                            sktP[tid]->processImage(depthsS[tid],toBlobs[tid],masks[tid]);
                        }
                        else
                        {
                            sktP[tid]->processImage(pCs[tid],toBlobs[tid],masks[tid]);
                        }
                        findContours(toBlobs[tid], blobs[tid],CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
                        finBlobs[tid].clear();
                        blobSizes[tid].clear();
                        for(b=0; b<blobs[tid].size(); b++)
                        {
                            moms=moments(blobs[tid][b],true);
                            if(moms.m00>cConfig[tid]->minBlobSize)
                            {
                                //Point2f pt((int)(moms.m10 / moms.m00),(int)(moms.m01 / moms.m00));
                                //circle(toBlobs[k],pt,sqrt((double)moms.m00/3.14),cvScalar(255),-1);
                                //Save blob centers
                                pt.x=(int)(moms.m10 / moms.m00);
                                pt.y=(int)(moms.m01 / moms.m00);
                                finBlobs[tid].push_back(pt);
                                blobSizes[tid].push_back((int)moms.m00);
                            }
                        }
                        update[tid]=true;
                    }
                    else{update[tid]=false;}
                }
                //}
            }
        }
        else{
            if(cam->updateCam(false,0,true,false)){
                cam->retriveDepthAndMask(0,depths[0],masks[0]);
                cam->retrivePointCloudFast(0,&(depths[0]),&(pCs[0]));
                if(cConfig[0]->method=="Background"){
                    depths[0].convertTo(depthsS[0],CV_32FC1,1.0/6000);
                    sktP[0]->processImage(depthsS[0],toBlobs[0],masks[0]);
                }
                else
                {
                    sktP[0]->processImage(pCs[0],toBlobs[0],masks[0]);
                }
                findContours(toBlobs[0], blobs[0],CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
                finBlobs[0].clear();
                blobSizes[0].clear();
                for(b=0; b<blobs[0].size(); b++)
                {
                    moms=moments(blobs[0][b],true);
                    if(moms.m00>cConfig[0]->minBlobSize)
                    {
                        //Point2f pt((int)(moms.m10 / moms.m00),(int)(moms.m01 / moms.m00));
                        //circle(toBlobs[k],pt,sqrt((double)moms.m00/3.14),cvScalar(255),-1);
                        //Save blob centers
                        pt.x=(int)(moms.m10 / moms.m00);
                        pt.y=(int)(moms.m01 / moms.m00);
                        finBlobs[0].push_back(pt);
                        blobSizes[0].push_back((int)moms.m00);
                    }
                }
                update[0]=true;
            }
            else{update[0]=false;}
        }
        bool doUpdate=true;
        for(int i=0;i<cam->numContextosIniciados;i++){
            doUpdate=(doUpdate && update[i]);
        }
        if(doUpdate){
            vector<Point2f> ou=finBlobs[0];
            if(cam->numContextosIniciados>1 && update[1]){
                if(ui->mixActive->isChecked()){
                    //Run through blob centers and get 3d vector
                    //Convert 3d vectors to the first cam
                    int count=0;
                    XnPoint3D* rP=real;
                    Vec3f v;
                    //cout <<"SIZE : " << finBlobs[1].size() << endl;
                    for(int i=0;i<finBlobs[1].size();i++){
                        if(count<999 && calMat.cols>2){
                            /// POSIBLE SPEEDUP : USE POINTERS
                            v=pCs[1].at<Vec3f>(finBlobs[1][i].y,finBlobs[1][i].x);
                            ///
                            (*rP).X=v[0]*matC[0][0]+v[1]*matC[0][1]+v[2]*matC[0][2] +matC[0][3];
                            (*rP).Y=v[0]*matC[1][0]+v[1]*matC[1][1]+v[2]*matC[1][2] +matC[1][3];
                            (*rP).Z=v[0]*matC[2][0]+v[1]*matC[2][1]+v[2]*matC[2][2] +matC[2][3];
                            count++;rP++;}
                    }
                    //Convert results to projected in first cam
                    if(count >0)
                        cam->depthG[0].ConvertRealWorldToProjective(count,real,proj);

                    rP=proj;

                    for(int i=0;i<count;i++){
                        //circle(final,Point(proj[i].X,proj[i].Y),sqrt((double)blobSizes[1][i]/3.14),cvScalar(0,0,255),-1);
                        if(!hasCloseBlob2(rP,finBlobs[0],mD)){
                            //cout << "1 : "<< (int)(rP->X) << "," << (int)(rP->Y) << endl;
                            ou.push_back(Point2f(rP->X,rP->Y));
                        }
                        rP++;
                    }
                }
                else{
                    ou.insert(ou.end(), finBlobs[1].begin(), finBlobs[1].end() );
                }
            }
            if(sendTuio && isServerCreated){
                if(calSKT->isCalibrated){
                    calSKT->convertPointVector(&ou);
                }
                tds->sendCursors(ou,minDataUpdate);
            }
            if(useDC){
                if(calSKT->isCalibrated){
                    calSKT->convertPointVector(&ou);
                }
                dc->updateData(ou,minDataUpdate);
            }
            numFrames++;
        }
        if(numFrames==10){
            numFrames=0;

#ifdef _WIN32 || _WIN64
            SYSTEMTIME fin;
            GetSystemTime(&fin);
            double timeSecs=(double)(fin.wSecond+(double)fin.wMilliseconds/1000.0-((double)inicio.wMilliseconds/1000.0+inicio.wSecond))/10;
#else
            struct timeval fin;
            gettimeofday(&fin,NULL);
            double timeSecs=(double)(fin.tv_sec+(double)fin.tv_usec/1000000.0-((double)inicio.tv_usec/1000000.0+inicio.tv_sec))/10;
#endif
            //cout << timeSecs << endl;
            double dif=1.0/timeSecs;
            //cout << "FPS : " << dif <<endl;
            ui->framesPerSec->setText(QString::number(dif));
        }
        delete update;
    }
}
コード例 #24
0
// Create a grayscale face image that has a standard size and contrast & brightness.
// "srcImg" should be a copy of the whole color camera frame, so that it can draw the eye positions onto.
// If 'doLeftAndRightSeparately' is true, it will process left & right sides seperately,
// so that if there is a strong light on one side but not the other, it will still look OK.
// Performs Face Preprocessing as a combination of:
//  - geometrical scaling, rotation and translation using Eye Detection,
//  - smoothing away image noise using a Bilateral Filter,
//  - standardize the brightness on both left and right sides of the face independently using separated Histogram Equalization,
//  - removal of background and hair using an Elliptical Mask.
// Returns either a preprocessed face square image or NULL (ie: couldn't detect the face and 2 eyes).
// If a face is found, it can store the rect coordinates into 'storeFaceRect' and 'storeLeftEye' & 'storeRightEye' if given,
// and eye search regions into 'searchedLeftEye' & 'searchedRightEye' if given.
Mat getPreprocessedFace(Mat &srcImg, int desiredFaceWidth, CascadeClassifier &faceCascade, CascadeClassifier &eyeCascade1, CascadeClassifier &eyeCascade2, bool doLeftAndRightSeparately, Rect *storeFaceRect, Point *storeLeftEye, Point *storeRightEye, Rect *searchedLeftEye, Rect *searchedRightEye)
{
    // Use square faces.
    int desiredFaceHeight = desiredFaceWidth;

    // Mark the detected face region and eye search regions as invalid, in case they aren't detected.
    if (storeFaceRect)
        storeFaceRect->width = -1;
    if (storeLeftEye)
        storeLeftEye->x = -1;
    if (storeRightEye)
        storeRightEye->x= -1;
    if (searchedLeftEye)
        searchedLeftEye->width = -1;
    if (searchedRightEye)
        searchedRightEye->width = -1;

    // Find the largest face.
    Rect faceRect;
    detectLargestObject(srcImg, faceCascade, faceRect,SCALEDWIDTH);

    // Check if a face was detected.
    if (faceRect.width > 0) {

        // Give the face rect to the caller if desired.
        if (storeFaceRect)
            *storeFaceRect = faceRect;

        Mat faceImg = srcImg(faceRect);    // Get the detected face image.

        // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
        Mat gray;
        if (faceImg.channels() == 3) {
            cvtColor(faceImg, gray, CV_BGR2GRAY);
        }
        else if (faceImg.channels() == 4) {
            cvtColor(faceImg, gray, CV_BGRA2GRAY);
        }
        else {
            // Access the input image directly, since it is already grayscale.
            gray = faceImg;
        }

        // Search for the 2 eyes at the full resolution, since eye detection needs max resolution possible!
        Point leftEye, rightEye;
        detectBothEyes(gray, eyeCascade1, eyeCascade2, leftEye, rightEye, searchedLeftEye, searchedRightEye);

        // Give the eye results to the caller if desired.
        if (storeLeftEye)
            *storeLeftEye = leftEye;
        if (storeRightEye)
            *storeRightEye = rightEye;

        // Check if both eyes were detected.
        if (leftEye.x >= 0 && rightEye.x >= 0) {

            // Make the face image the same size as the training images.

            // Since we found both eyes, lets rotate & scale & translate the face so that the 2 eyes
            // line up perfectly with ideal eye positions. This makes sure that eyes will be horizontal,
            // and not too far left or right of the face, etc.

            // Get the center between the 2 eyes.
            Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
            // Get the angle between the 2 eyes.
            double dy = (rightEye.y - leftEye.y);
            double dx = (rightEye.x - leftEye.x);
            double len = sqrt(dx*dx + dy*dy);
            double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

            // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
            const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
            // Get the amount we need to scale the image to be the desired fixed size we want.
            double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
            double scale = desiredLen / len;
            // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
            Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
            // Shift the center of the eyes to be the desired center between the eyes.
            rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
            rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

            // Rotate and scale and translate the image to the desired angle & size & position!
            // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
            Mat warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
            warpAffine(gray, warped, rot_mat, warped.size());
            //imshow("warped", warped);

            // Give the image a standard brightness and contrast, in case it was too dark or had low contrast.
            if (!doLeftAndRightSeparately) {
                // Do it on the whole face.
                equalizeHist(warped, warped);
            }
            else {
                // Do it seperately for the left and right sides of the face.
                equalizeLeftAndRightHalves(warped);
            }
            //imshow("equalized", warped);

            // Use the "Bilateral Filter" to reduce pixel noise by smoothing the image, but keeping the sharp edges in the face.
            Mat filtered = Mat(warped.size(), CV_8U);
            bilateralFilter(warped, filtered, 0, 20.0, 2.0);
            //imshow("filtered", filtered);

            // Filter out the corners of the face, since we mainly just care about the middle parts.
            // Draw a filled ellipse in the middle of the face-sized image.
            Mat mask = Mat(warped.size(), CV_8U, Scalar(0)); // Start with an empty mask.
            Point faceCenter = Point( desiredFaceWidth/2, cvRound(desiredFaceHeight * FACE_ELLIPSE_CY) );
            Size size = Size( cvRound(desiredFaceWidth * FACE_ELLIPSE_W), cvRound(desiredFaceHeight * FACE_ELLIPSE_H) );
            ellipse(mask, faceCenter, size, 0, 0, 360, Scalar(255), CV_FILLED);
            //imshow("mask", mask);

            // Use the mask, to remove outside pixels.
            Mat dstImg = Mat(warped.size(), CV_8U, Scalar(128)); // Clear the output image to a default gray.
            /*
            namedWindow("filtered");
            imshow("filtered", filtered);
            namedWindow("dstImg");
            imshow("dstImg", dstImg);
            namedWindow("mask");
            imshow("mask", mask);
            */
            // Apply the elliptical mask on the face.
            filtered.copyTo(dstImg, mask);  // Copies non-masked pixels from filtered to dstImg.
            //imshow("dstImg", dstImg);

            return dstImg;
        }
        /*
        else {
            // Since no eyes were found, just do a generic image resize.
            resize(gray, tmpImg, Size(w,h));
        }
        */
    }
    return Mat();
}
コード例 #25
0
ファイル: mean.cpp プロジェクト: mdqyy/surfmst
// Mean Shift Algorithm
void meanShift(Mat& img1, Mat& img2, Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilterType, vector<KeyPoint>& tpkeypoints,
               Mat& tpdescriptors, vector<KeyPoint>& keypoints2, Mat& descriptors2, Mat& clusters1, Point2f &cp, int& flag,
               vector<Point2f>& MP1, Mat& img2ROI, vector<KeyPoint>& bkeypoints, Mat& bdescriptors, Mat& temp,
               int& FG_mp, int&FG, int& BG_mp, int& BG, int& FG_BG, int& msI)
{
    size_t i,j;
    // Mat temp=img2.clone();
    int converged = 0, semiConverged = 0;
    Mat img1ROI, labels1_;
    Point2f lastCenter(box.x+box.width/2,box.y+box.height/2);
    float scale = 1;

    img1ROI = img1(boxOrg);
    vector<Point2f> points1, bmp;
    KeyPoint::convert(tpkeypoints, points1);
    searchBin(tpdescriptors, clusters1, labels1_); // clustering based on Kmeans centers obatined earlier

//vector<Point2f> np;
//    Mat newimg = img1ROI.clone();
//     KeyPoint::convert(tpkeypoints, np);
//    for(size_t i=0;i<np.size();i++)
//        circle(newimg, np[i], 2, Scalar(255,0,255),2);

//     imshow( "msimg", newimg );
//    np.clear();

//    waitKey(0);


    vector<float> prevPDF(NOC); // pdf of source
    weightedPDF( points1, boxOrg, labels1_, NOC, prevPDF); // Making histogram/pdf by normalizing the data and applying weights based on positions

    // Iterations for finding the object

    Point2f zBCmax(0,0); // center corspndng to the iteration with max BC
    float BC, BCmax=0, BCprev=0, BCnow=0; // Bhattachrya coefficient, max val for an image, previous and current val for an iteration
    int stopCount=0; // MS iterations must converege for stopCount < ManualSetThreshold
    vector<Point2f> matchedPoints1, matchedPoints2;
    while ( !converged )
    {
       // ofstream tempF;
        //tempF.open("tempF.txt", ios::out);
        matchedPoints1.clear(); matchedPoints2.clear();

        Mat matchedDesc1, matchedDesc2;
        vector<int> queryIdxs, trainIdxs;

        cv::Rect expandedBox;

#ifdef DEBUG
        cout << "iteration in while = \t" << ++iter << endl;
#endif

        if (EXPANDED_BOX)
        {
            expandedBox = Rect(box.x-25, box.y-25, box.width+50, box.height+50);
            boundaryCheckRect(expandedBox);
            img2ROI = img2(expandedBox);
        }
        else
        {
          //  cout << box.br() << "\t" << box.tl() << "\t" << img2.cols << endl;
            img2ROI = img2(box);
        }

        vector<KeyPoint> tempkey;
       // Mat pointsTransed21;
        MP1.clear();

        doIteration(img1ROI, img2ROI, queryIdxs, trainIdxs,descriptorMatcher, matcherFilterType, tpkeypoints,tpdescriptors,
                    keypoints2,descriptors2, matchedDesc1, matchedDesc2,  matchedPoints1, matchedPoints2, MP1,tempkey);

        if(matchedPoints2.size() < 1)
        {
            FG=0; BG=0;FG_mp=0;BG_mp=0;FG_BG=0; msI=0;
            break;

        }
        //  mdescriptors = matchedDesc2;

        //   KeyPoint::convert(keypoints2, points2);


        if (EXPANDED_BOX)
            shiftPoints(matchedPoints2, expandedBox);
        else
            shiftPoints(matchedPoints2, box);


        // shiftPoints(matchedPoints1,boxOrg);
        vector<float> predPDF(NOC,0);

        Mat labels2, labels2_; // depending on PDF_OF_WHOLE
        Point2f z(0,0);


        //==================== Edited at 8th april =======================//
        bmp.clear();
        Mat tmatchedDesc2, tmatchedDesc1;
        vector<Point2f> tmatchedPoints2;
        msI = stopCount;
        FG_mp = matchedPoints2.size();

        vector<KeyPoint> tempbk;
        Mat tempBd;
        vector<DMatch> filteredMatches;
        crossCheckMatching( descriptorMatcher, bdescriptors, descriptors2, filteredMatches, 1 );
        trainIdxs.clear();    queryIdxs.clear();

        for( i = 0; i < filteredMatches.size(); i++ )
        {
            queryIdxs.push_back(filteredMatches[i].queryIdx);
            trainIdxs.push_back(filteredMatches[i].trainIdx);
        }


        vector<Point2f> points1; KeyPoint::convert(bkeypoints, points1, queryIdxs);
        vector<Point2f> points2;   KeyPoint::convert(keypoints2, points2, trainIdxs);
        vector<char> matchesMask( filteredMatches.size(), 0 );
        /////
        Mat H12;
        Mat drawImg;
        if (points2.size() < 4 )
        {
            cout << "backpoints less than 4, hence prev ROI is retained" << endl;
            return;
            for(i=0;i<points2.size();i++)
            {
                bmp.push_back( points2[i]);
                tempBd.push_back(bdescriptors.row(queryIdxs[i]));
                tempbk.push_back(keypoints2[trainIdxs[i]]);
                tempBd.push_back(descriptors2.row(trainIdxs[i]));
                tempbk.push_back(keypoints2[trainIdxs[i]]);
            }
        }
        else
        {
            H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, RANSAC_THREHOLD );

            if( !H12.empty() )
            {
                Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);

                for( size_t i1 = 0; i1 < points1.size(); i1++ )
                {
                    double diff = norm(points2[i1] - points1t.at<Point2f>((int)i1,0));
                    if(diff  <= 20)
                    {
                        matchesMask[i1]=1;
                        bmp.push_back( points2[i1]);
                        tempBd.push_back(bdescriptors.row(queryIdxs[i1]));
                        tempbk.push_back(keypoints2[trainIdxs[i1]]);
                        tempBd.push_back(descriptors2.row(trainIdxs[i1]));
                        tempbk.push_back(keypoints2[trainIdxs[i1]]);

                    }
                }

                drawMatches( img1ROI, bkeypoints, img2ROI, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
             #if DRAW_RICH_KEYPOINTS_MODE
                             , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
             #endif
                             );

            }
        }
        imshow("bm",drawImg);

        //============edit part ====
        shiftPoints(bmp, box);
        vector<int> bflag(bmp.size(),0);

        for(i=0;i<bmp.size();i++)
            bflag[i]=0;

        vector<int> ft(matchedPoints2.size(),0);

        for(i=0;i<matchedPoints2.size();i++)
        {
            ft[i]=0;
            for(j=0; j< bmp.size(); j++)
            {
                double diff = norm (matchedPoints2[i] - bmp[j]);
                // cout << diff << endl;
                if(diff < 0.5)
                {
                    bflag[j]=1;
                    ft[i]=1;
                    break;
                }
            }
            if(ft[i]==0)
            {
                tmatchedPoints2.push_back(matchedPoints2[i]);
                tmatchedDesc1.push_back(matchedDesc1.row(i));
                tmatchedDesc2.push_back(matchedDesc2.row(i));
            }

        }




        //=================================================================//


        // allot descriptors to the clusters to make histogram
        searchBin(tmatchedDesc1, clusters1, labels1_);
        searchBin( tmatchedDesc2, clusters1, labels2);
        if (PDF_OF_WHOLE)
            searchBin( descriptors2, clusters1, labels2_);

        // find the PDF for the above histogram as per weights
        if (PDF_OF_WHOLE)
            weightedPDF( points2, box, labels2_, NOC, predPDF);
        else
            weightedPDF( tmatchedPoints2, box, labels2, NOC, predPDF);

        // find weights for each IPoint as per the values of weighted PDFs
        vector<float> weights(labels2.rows,0);
        Mat imgTemp = img2.clone();
        findWeights( prevPDF, predPDF, labels1_, labels2, weights, queryIdxs, tmatchedDesc1, tmatchedDesc2);

        // find new ROI center as per above weights
        findNewCenter(tmatchedPoints2, weights, box, z);

        lastCenter = Point2f (box.x+box.width/2, box.y+box.height/2);

        // if current BC is less than previous BC, then take mean of the prev and current centers
        BCnow = findBC(predPDF,prevPDF);
        if (BCnow < BCprev)
            z = 0.5*(z+lastCenter);
        BCprev = BCnow;

        // check if ROI centers converge to same pixel
        if ( (norm(z - lastCenter) < 3))
        {
            semiConverged = 1;
            if (!SHOW_FINAL_ROI)
                rectangle(temp, box, Scalar(0,0,255),2);
        }
        else
        {
            // keep iterating
            stopCount++;

            if (stopCount >= MAX_MS_ITER)
            {
                semiConverged = 1;
                flag = 0;
                if (!SHOW_FINAL_ROI)
                    rectangle(temp, box, Scalar(0,0,255),2);
                z = zBCmax;
            }

            box.x = z.x - box.width/2;
            box.y = z.y - box.height/2;
            boundaryCheckRect(box);

            if (stopCount < MAX_MS_ITER)
                if (!SHOW_FINAL_ROI)
                    ;// rectangle(temp, box, Scalar(0,255,0), 2);
        }

        // store values of max BC and corresponding center z
        if ( BCnow > BCmax)
        {
            BCmax = BC;
            zBCmax = z;
        }

        if (semiConverged)
        {
            converged = 1;

            //   FG_mp, FG, BG_mp, BG, FG_BG, msI ;
            //==========edited on 5april ========
            bdescriptors.release();
            bkeypoints.clear();
            for(i=0;i<tempBd.rows;i++)
            {
                bdescriptors.push_back(tempBd.row(i));
                bkeypoints.push_back(tempbk[i]);

            }


            tpdescriptors.release();
            tpkeypoints.clear();
            //============================================//

            for(i=0;i<matchedPoints2.size();i++)
            {
                if(ft[i]==0)
                {
                    tpdescriptors.push_back(matchedDesc1.row(i));
                    tpkeypoints.push_back(tempkey[i]);

                    tpdescriptors.push_back(matchedDesc2.row(i));
                    tpkeypoints.push_back(tempkey[i]);

                }
            }


//=================================
            box.x = z.x - box.width/2;
            box.y = z.y - box.height/2;

           // imgTemp.release();

            trajectory << z.x << "\t" << z.y << "\t" << box.width << "\t" << box.height << endl;

            cp =z;


            cv::circle(temp, z, 3, Scalar(0,255,255), 3);
            cv::rectangle(temp, box, Scalar(255,0,0),2);

            cout << "MP1 \t" << MP1.size() <<"\t" << "bmp \t"  <<bmp.size() << endl;

            for(size_t i=0;i<MP1.size();i++)
            {//circle(temp, MP1[i], 3, Scalar(255,255,255),3);
              circle(temp, matchedPoints2[i], 3, Scalar(255,0,255),3);
            }

            // shiftPoints(bmp,box);
           for(size_t i=0;i<bmp.size();i++)
             { circle(temp, bmp[i], 2, Scalar(0,0,0),2);
              // cout << bmp[i] << endl;
           }

        }

        char c = (char)waitKey(10);
        if( c == '\x1b' ) // esc
        {
            cout << "Exiting from while iterator..." << endl;
            break;
        }
    }




    cv::imshow("Iter", temp);
  //  waitKey(0);
    eachIter.close();

}
コード例 #26
0
ファイル: training.cpp プロジェクト: asolis/detection3D
void Train::warpMatrix(Size sz,
                       double yaw,
                       double pitch,
                       double roll,
                       double scale,
                       double fovy,
                       Mat &M,
                       vector<Point2f>* corners)
{
	double st=sin(deg2Rad(roll));
	double ct=cos(deg2Rad(roll));
	double sp=sin(deg2Rad(pitch));
	double cp=cos(deg2Rad(pitch));
	double sg=sin(deg2Rad(yaw));
	double cg=cos(deg2Rad(yaw));
    
	double halfFovy=fovy*0.5;
	double d=hypot(sz.width,sz.height);
	double sideLength=scale*d/cos(deg2Rad(halfFovy));
	double h=d/(2.0*sin(deg2Rad(halfFovy)));
	double n=h-(d/2.0);
	double f=h+(d/2.0);
    
    
	Mat F=Mat(4,4,CV_64FC1);
	Mat Rroll=Mat::eye(4,4,CV_64FC1);
	Mat Rpitch=Mat::eye(4,4,CV_64FC1);
	Mat Ryaw=Mat::eye(4,4,CV_64FC1);
    
	Mat T=Mat::eye(4,4,CV_64FC1);
	Mat P=Mat::zeros(4,4,CV_64FC1);
    
    
	Rroll.at<double>(0,0)=Rroll.at<double>(1,1)=ct;
	Rroll.at<double>(0,1)=-st;Rroll.at<double>(1,0)=st;
    
	Rpitch.at<double>(1,1)=Rpitch.at<double>(2,2)=cp;
	Rpitch.at<double>(1,2)=-sp;Rpitch.at<double>(2,1)=sp;
    
	Ryaw.at<double>(0,0)=Ryaw.at<double>(2,2)=cg;
	Ryaw.at<double>(0,2)=sg;Ryaw.at<double>(2,0)=sg;
    
    
	T.at<double>(2,3)=-h;
    
	P.at<double>(0,0)=P.at<double>(1,1)=1.0/tan(deg2Rad(halfFovy));
	P.at<double>(2,2)=-(f+n)/(f-n);
	P.at<double>(2,3)=-(2.0*f*n)/(f-n);
	P.at<double>(3,2)=-1.0;
    
	F=P*T*Rpitch*Rroll*Ryaw;
    
	double ptsIn [4*3];
	double ptsOut[4*3];
	double halfW=sz.width/2, halfH=sz.height/2;
	ptsIn[0]=-halfW;ptsIn[ 1]= halfH;
	ptsIn[3]= halfW;ptsIn[ 4]= halfH;
	ptsIn[6]= halfW;ptsIn[ 7]=-halfH;
	ptsIn[9]=-halfW;ptsIn[10]=-halfH;
	ptsIn[2]=ptsIn[5]=ptsIn[8]=ptsIn[11]=0;
	Mat ptsInMat(1,4,CV_64FC3,ptsIn);Mat ptsOutMat(1,4,CV_64FC3,ptsOut);
	perspectiveTransform(ptsInMat,ptsOutMat,F);
    
	Point2f ptsInPt2f[4];Point2f ptsOutPt2f[4];
	for(int i=0;i<4;i++)
	{
		Point2f ptIn(ptsIn[i*3+0],ptsIn[i*3+1]);
		Point2f ptOut(ptsOut[i*3+0],ptsOut[i*3+1]);
		ptsInPt2f[i]=ptIn+Point2f(halfW,halfH);
		ptsOutPt2f[i]=(ptOut+Point2f(1,1))*(sideLength*0.5);
	}
	M=getPerspectiveTransform(ptsInPt2f,ptsOutPt2f);
    
	if(corners!=NULL)
	{
		corners->clear();
		corners->push_back(ptsOutPt2f[0]);
		corners->push_back(ptsOutPt2f[1]);
		corners->push_back(ptsOutPt2f[2]);
		corners->push_back(ptsOutPt2f[3]);
	}
}
コード例 #27
0
    SoccerPitchData(const Size &size) {
        pitchPoints.push_back(Point2f(0, 0));
        pitchPoints.push_back(Point2f(13.85, 0));
        pitchPoints.push_back(Point2f(24.85, 0));
        pitchPoints.push_back(Point2f(43.15, 0));
        pitchPoints.push_back(Point2f(54.15, 0));
        pitchPoints.push_back(Point2f(68, 0));

        pitchPoints.push_back(Point2f(24.85, 5.5));
        pitchPoints.push_back(Point2f(43.15, 5.5));

        pitchPoints.push_back(Point2f(13.85, 16.5));
        pitchPoints.push_back(Point2f(26.69, 16.5));
        pitchPoints.push_back(Point2f(41.31, 16.5));
        pitchPoints.push_back(Point2f(54.15, 16.5));

        pitchPoints.push_back(Point2f(0, 52.5));
        pitchPoints.push_back(Point2f(24.85, 52.5));
        pitchPoints.push_back(Point2f(43.15, 52.5));
        pitchPoints.push_back(Point2f(68, 52.5));

        pitchPoints.push_back(Point2f(13.85, 88.5));
        pitchPoints.push_back(Point2f(26.69, 88.5));
        pitchPoints.push_back(Point2f(41.31, 88.5));
        pitchPoints.push_back(Point2f(54.15, 88.5));

        pitchPoints.push_back(Point2f(24.85, 99.5));
        pitchPoints.push_back(Point2f(43.15, 99.5));

        pitchPoints.push_back(Point2f(0, 105));
        pitchPoints.push_back(Point2f(13.85, 105));
        pitchPoints.push_back(Point2f(24.85, 105));
        pitchPoints.push_back(Point2f(43.15, 105));
        pitchPoints.push_back(Point2f(54.15, 105));
        pitchPoints.push_back(Point2f(68, 105));

        pitchOuterPoints.push_back(Point2f(0, 0));
        pitchOuterPoints.push_back(Point2f(0, 105));
        pitchOuterPoints.push_back(Point2f(68, 105));
        pitchOuterPoints.push_back(Point2f(68, 0));

        worldToScreen(size, pitchPoints);
        worldToScreen(size, pitchOuterPoints);
    };
コード例 #28
0
// Create a grayscale face image that has a standard size and contrast & brightness.
// "srcImg" should be a copy of the whole color camera frame, so that it can draw the eye positions onto.
// If 'doLeftAndRightSeparately' is true, it will process left & right sides seperately,
// so that if there is a strong light on one side but not the other, it will still look OK.
// Performs Face Preprocessing as a combination of:
//  - geometrical scaling, rotation and translation using Eye Detection,
//  - smoothing away image noise using a Bilateral Filter,
//  - standardize the brightness on both left and right sides of the face independently using separated Histogram Equalization,
//  - removal of background and hair using an Elliptical Mask.
// Returns either a preprocessed face square image or NULL (ie: couldn't detect the face and 2 eyes).
// If a face is found, it can store the rect coordinates into 'storeFaceRect' and 'storeLeftEye' & 'storeRightEye' if given,
// and eye search regions into 'searchedLeftEye' & 'searchedRightEye' if given.
Mat getPreprocessedFace(Mat &srcImg, int desiredFaceWidth, CascadeClassifier &faceCascade, CascadeClassifier &eyeCascade1, CascadeClassifier &eyeCascade2, bool doLeftAndRightSeparately, Rect *storeFaceRect, Point *storeLeftEye, Point *storeRightEye, Rect *searchedLeftEye, Rect *searchedRightEye)
{
    // Use square faces.
    int desiredFaceHeight = desiredFaceWidth;

    // Mark the detected face region and eye search regions as invalid, in case they aren't detected.
    if (storeFaceRect)
        storeFaceRect->width = -1;
    if (storeLeftEye)
        storeLeftEye->x = -1;
    if (storeRightEye)
        storeRightEye->x= -1;
    if (searchedLeftEye)
        searchedLeftEye->width = -1;
    if (searchedRightEye)
        searchedRightEye->width = -1;

    // Find the largest face.
    Rect faceRect;
    detectLargestObject(srcImg, faceCascade, faceRect);    //¼ì²âÈËÁ³
	//if (faceRect.x>0)
		//cout <<"face detected"<<endl;
    // Check if a face was detected.
    if (faceRect.width > 0) {

        // Give the face rect to the caller if desired.
        if (storeFaceRect)
            *storeFaceRect = faceRect;

        Mat faceImg = srcImg(faceRect);    // Get the detected face image.

        // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
        Mat gray;
        if (faceImg.channels() == 3) {
            cvtColor(faceImg, gray, CV_BGR2GRAY);
        }
        else if (faceImg.channels() == 4) {
            cvtColor(faceImg, gray, CV_BGRA2GRAY);
        }
        else {
            // Access the input image directly, since it is already grayscale.
            gray = faceImg;
        }

        // Search for the 2 eyes at the full resolution, since eye detection needs max resolution possible!
        Point leftEye, rightEye;
        detectBothEyes(gray, eyeCascade1, eyeCascade2, leftEye, rightEye, searchedLeftEye, searchedRightEye);

        // Give the eye results to the caller if desired.
        if (storeLeftEye)
            *storeLeftEye = leftEye;
        if (storeRightEye)
            *storeRightEye = rightEye;

        // Check if both eyes were detected.
        if (leftEye.x >= 0 && rightEye.x >= 0) 
		{

            // Make the face image the same size as the training images.

            // Since we found both eyes, lets rotate & scale & translate the face so that the 2 eyes
            // line up perfectly with ideal eye positions. This makes sure that eyes will be horizontal,
            // and not too far left or right of the face, etc.

            // Get the center between the 2 eyes.
            Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
            // Get the angle between the 2 eyes.
            double dy = (rightEye.y - leftEye.y);
            double dx = (rightEye.x - leftEye.x);
            double len = sqrt(dx*dx + dy*dy);
            double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

            // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
            const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
            // Get the amount we need to scale the image to be the desired fixed size we want.
            double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
            double scale = desiredLen / len;
            // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
            Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
            // Shift the center of the eyes to be the desired center between the eyes.
            rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
            rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

            // Rotate and scale and translate the image to the desired angle & size & position!
            // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
            Mat warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
            warpAffine(gray, warped, rot_mat, warped.size());
          
            return warped;
        }
        /*
        else {
            // Since no eyes were found, just do a generic image resize.
            resize(gray, tmpImg, Size(w,h));
        }
        */
    }
    return Mat();
}
コード例 #29
0
void OpticalFlowTransitionModel::predict(vector<shared_ptr<Sample>>& samples, const Mat& image, const shared_ptr<Sample> target) {
	points.clear();
	forwardPoints.clear();
	backwardPoints.clear();
	forwardStatus.clear();
	error.clear();
	squaredDistances.clear();
	correctFlowCount = 0;

	// build pyramid of the current image
	cv::buildOpticalFlowPyramid(makeGrayscale(image), currentPyramid, windowSize, maxLevel, true, BORDER_REPLICATE, BORDER_REPLICATE);
	if (previousPyramid.empty() || !target) { // optical flow cannot be computed if there is no previous pyramid or no current target
		swap(previousPyramid, currentPyramid);
		return fallback->predict(samples, image, target);
	}

	// compute grid of points at target location
	for (auto point = templatePoints.begin(); point != templatePoints.end(); ++point)
		points.push_back(Point2f(target->getWidth() * point->x + target->getX(), target->getHeight() * point->y + target->getY()));
	// compute forward and backward optical flow
	cv::calcOpticalFlowPyrLK(previousPyramid, currentPyramid, points, forwardPoints, forwardStatus, error, windowSize, maxLevel);
	cv::calcOpticalFlowPyrLK(currentPyramid, previousPyramid, forwardPoints, backwardPoints, backwardStatus, error, windowSize, maxLevel);
	swap(previousPyramid, currentPyramid);

	// compute flow and forward-backward-error
	vector<Point2f> flows;
	flows.reserve(points.size());
	squaredDistances.reserve(points.size());
	for (unsigned int i = 0; i < points.size(); ++i) {
		flows.push_back(forwardPoints[i] - points[i]);
		if (forwardStatus[i] && backwardStatus[i]) {
			Point2f difference = backwardPoints[i] - points[i];
			squaredDistances.push_back(make_pair(difference.dot(difference), i));
		}
	}
	if (squaredDistances.size() < points.size() / 2) // the flow for more than half of the points could not be computed
		return fallback->predict(samples, image, target);

	// find the flows with the least forward-backward-error (not more than 1 pixel)
	float maxError = 1 * 0.5f;
	vector<float> xs, ys;
	sort(squaredDistances.begin(), squaredDistances.end(), [](pair<float, int> lhs, pair<float, int> rhs) { return lhs.first < rhs.first; });
	xs.reserve(squaredDistances.size());
	ys.reserve(squaredDistances.size());
	for (correctFlowCount = 0; correctFlowCount < squaredDistances.size(); ++correctFlowCount) {
		if (squaredDistances[correctFlowCount].first > maxError)
			break;
		int index = squaredDistances[correctFlowCount].second;
		xs.push_back(flows[index].x);
		ys.push_back(flows[index].y);
	}
	if (correctFlowCount < points.size() / 2) // too few correct correspondences
		return fallback->predict(samples, image, target);

	// compute median flow (only change in position for now)
	sort(xs.begin(), xs.end());
	sort(ys.begin(), ys.end());
	float medianX = xs[xs.size() / 2];
	float medianY = ys[ys.size() / 2];
	Point2f medianFlow(medianX, medianY);

	// compute ratios of point distances in previous and current image
	vector<float> squaredRatios;
	squaredRatios.reserve(correctFlowCount * (correctFlowCount - 1) / 2);
	for (unsigned int i = 0; i < correctFlowCount; ++i) {
		for (unsigned int j = i + 1; j < correctFlowCount; ++j) {
			Point2f point1 = points[squaredDistances[i].second];
			Point2f forwardPoint1 = forwardPoints[squaredDistances[i].second];
			Point2f point2 = points[squaredDistances[j].second];
			Point2f forwardPoint2 = forwardPoints[squaredDistances[j].second];
			Point2f differenceBefore = point1 - point2;
			Point2f differenceAfter = forwardPoint1 - forwardPoint2;
			float squaredDistanceBefore = differenceBefore.dot(differenceBefore);
			float squaredDistanceAfter = differenceAfter.dot(differenceAfter);
			squaredRatios.push_back(squaredDistanceAfter / squaredDistanceBefore);
		}
	}

	// compute median ratio to complete the median flow
	sort(squaredRatios.begin(), squaredRatios.end());
	float medianRatio = sqrt(squaredRatios[squaredRatios.size() / 2]);

	// predict samples according to median flow and random noise
	for (shared_ptr<Sample> sample : samples) {
		// add noise to velocity
		double vx = medianX;
		double vy = medianY;
		double vs = medianRatio;
		// diffuse
		vx += positionDeviation * generator();
		vy += positionDeviation * generator();
		vs *= pow(2, sizeDeviation * generator());
		// round to integer
		sample->setVx(static_cast<int>(std::round(vx)));
		sample->setVy(static_cast<int>(std::round(vy)));
		sample->setVSize(vs);
		// change position according to velocity
		sample->setX(sample->getX() + sample->getVx());
		sample->setY(sample->getY() + sample->getVy());
		sample->setSize(static_cast<int>(std::round(sample->getSize() * sample->getVSize())));
	}
}
コード例 #30
0
ファイル: mlt.cpp プロジェクト: LunaTheFinal/pbrt-v3
Point2f MLTSampler::Get2D() {
    return Point2f(Get1D(), Get1D());
}