コード例 #1
0
//This is the main trace function. It takes a ray as argument (defined by its origin and direction).
//We test if this ray intersects any of the geometry in the scene.
//If the ray intersects an object, we compute the intersection point, the normal at the intersection point,
//and shade this point using this information. Shading depends on the surface property
//(is it transparent, reflective, diffuse). The function returns a color for the ray.
//If the ray intersects an object that is the color of the object at the intersection point,
//otherwise it returns the background color.
Vec3f trace( const Vec3f &rayorig, const Vec3f &raydir,
             const std::vector<Sphere> &spheres, const int &depth)
{
    //if (raydir.length() != 1) std::cerr << "Error " << raydir << std::endl;
    float tnear = INFINITY;
    const Sphere* sphere = NULL;
    // find intersection of this ray with the sphere in the scene
    for (unsigned i = 0; i < spheres.size(); ++i) {
        float t0 = INFINITY, t1 = INFINITY;
        if (spheres[i].intersect(rayorig, raydir, t0, t1)) {
            if (t0 < 0) t0 = t1;
            if (t0 < tnear) {
                tnear = t0;
                sphere = &spheres[i];
            }
        }
    }
    // if there's no intersection return black or background color
    if (!sphere) return Vec3f(2);
    Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray
    Vec3f phit = rayorig + raydir * tnear; // point of intersection
    Vec3f nhit = phit - sphere->center; // normal at the intersection point
    nhit.normalize(); // normalize normal direction
    // If the normal and the view direction are not opposite to each other
    // reverse the normal direction. That also means we are inside the sphere so set
    // the inside bool to true. Finally reverse the sign of IdotN which we want
    // positive.
    float bias = 1e-4; // add some bias to the point from which we will be tracing
    bool inside = false;
    if (raydir.dot(nhit) > 0) nhit = -nhit, inside = true;
    if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
        float facingratio = -raydir.dot(nhit);
        // change the mix value to tweak the effect
        float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1);
        // compute reflection direction (not need to normalize because all vectors
        // are already normalized)
        Vec3f refldir = raydir - nhit * 2 * raydir.dot(nhit);
        refldir.normalize();
        Vec3f reflection = trace(phit + nhit * bias, refldir, spheres, depth + 1);
        //Vec3f reflection = trace(phit, refldir, spheres, depth + 1); little change in the final effect
        Vec3f refraction = 0;
        // if the sphere is also transparent compute refraction ray (transmission)
        if (sphere->transparency) {
            float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface?
            float cosi = -nhit.dot(raydir);
            float k = 1 - eta * eta * (1 - cosi * cosi);
            Vec3f refrdir = raydir * eta + nhit * (eta *  cosi - sqrt(k));
            refrdir.normalize();
            refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
        }
        // the result is a mix of reflection and refraction (if the sphere is transparent)
        surfaceColor = (
                           reflection * fresneleffect +
                           refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
    }
    else {
        // it's a diffuse object, no need to raytrace any further
        for (unsigned i = 0; i < spheres.size(); ++i) {
            if (spheres[i].emissionColor.x > 0) {
                // this is a light
                Vec3f transmission = 1;
                Vec3f lightDirection = spheres[i].center - phit;
                lightDirection.normalize();
                for (unsigned j = 0; j < spheres.size(); ++j) {
                    if (i != j) {
                        float t0, t1;
                        if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) {
                            transmission = 0;
                            break;
                        }
                    }
                }
                surfaceColor += sphere->surfaceColor * transmission *
                                std::max(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor;
            }
        }
    }

    return surfaceColor + sphere->emissionColor;
}
コード例 #2
0
static void process8uC3( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
{
    int x, y, k, k1, rows = image.rows, cols = image.cols;
    float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
    int K = obj.nmixtures;
    
    const float w0 = (float)CV_BGFG_MOG_WEIGHT_INIT;
    const float sk0 = (float)(w0/CV_BGFG_MOG_SIGMA_INIT*sqrt(3.));
    const float var0 = (float)(CV_BGFG_MOG_SIGMA_INIT*CV_BGFG_MOG_SIGMA_INIT);
    const float minVar = (float)(obj.noiseSigma*obj.noiseSigma);
    MixData<Vec3f>* mptr = (MixData<Vec3f>*)obj.bgmodel.data;
    
    for( y = 0; y < rows; y++ )
    {
        const uchar* src = image.ptr<uchar>(y);
        uchar* dst = fgmask.ptr<uchar>(y);
        
        if( alpha > 0 )
        {
            for( x = 0; x < cols; x++, mptr += K )
            {
                float wsum = 0;
                Vec3f pix(src[x*3], src[x*3+1], src[x*3+2]);
                int kHit = -1, kForeground = -1;
                
                for( k = 0; k < K; k++ )
                {
                    float w = mptr[k].weight;
                    wsum += w;
                    if( w < FLT_EPSILON )
                        break;
                    Vec3f mu = mptr[k].mean;
                    Vec3f var = mptr[k].var;
                    Vec3f diff = pix - mu;
                    float d2 = diff.dot(diff);
                    if( d2 < vT*(var[0] + var[1] + var[2]) )
                    {
                        wsum -= w;
                        float dw = alpha*(1.f - w);
                        mptr[k].weight = w + dw;
                        mptr[k].mean = mu + alpha*diff;
                        var = Vec3f(max(var[0] + alpha*(diff[0]*diff[0] - var[0]), minVar),
                                    max(var[1] + alpha*(diff[1]*diff[1] - var[1]), minVar),
                                    max(var[2] + alpha*(diff[2]*diff[2] - var[2]), minVar));
                        mptr[k].var = var;
                        mptr[k].sortKey = w/sqrt(var[0] + var[1] + var[2]);
                        
                        for( k1 = k-1; k1 >= 0; k1-- )
                        {
                            if( mptr[k1].sortKey >= mptr[k1+1].sortKey )
                                break;
                            std::swap( mptr[k1], mptr[k1+1] );
                        }
                        
                        kHit = k1+1;
                        break;
                    }
                }
                
                if( kHit < 0 ) // no appropriate gaussian mixture found at all, remove the weakest mixture and create a new one
                {
                    kHit = k = min(k, K-1);
                    wsum += w0 - mptr[k].weight;
                    mptr[k].weight = w0;
                    mptr[k].mean = pix;
                    mptr[k].var = Vec3f(var0, var0, var0);
                    mptr[k].sortKey = sk0;
                }
                else
                    for( ; k < K; k++ )
                        wsum += mptr[k].weight;
            
                float wscale = 1.f/wsum;
                wsum = 0;
                for( k = 0; k < K; k++ )
                {
                    wsum += mptr[k].weight *= wscale;
                    mptr[k].sortKey *= wscale;
                    if( wsum > T && kForeground < 0 )
                        kForeground = k+1;
                }
                
                dst[x] = (uchar)(-(kHit >= kForeground));
            }
        }
        else
        {
            for( x = 0; x < cols; x++, mptr += K )
            {
                Vec3f pix(src[x*3], src[x*3+1], src[x*3+2]);
                int kHit = -1, kForeground = -1;
                
                for( k = 0; k < K; k++ )
                {
                    if( mptr[k].weight < FLT_EPSILON )
                        break;
                    Vec3f mu = mptr[k].mean;
                    Vec3f var = mptr[k].var;
                    Vec3f diff = pix - mu;
                    float d2 = diff.dot(diff);
                    if( d2 < vT*(var[0] + var[1] + var[2]) )
                    {
                        kHit = k;
                        break;
                    }
                }
 
                if( kHit >= 0 )
                {
                    float wsum = 0;
                    for( k = 0; k < K; k++ )
                    {
                        wsum += mptr[k].weight;
                        if( wsum > T )
                        {
                            kForeground = k+1;
                            break;
                        }
                    }
                }
                
                dst[x] = (uchar)(kHit < 0 || kHit >= kForeground ? 255 : 0);
            }
        }
    }
}
コード例 #3
0
ファイル: intersect.cpp プロジェクト: dalibor-matura/mcl
void TriangleDistance::segPoints(const Vec3f& P, const Vec3f& A, const Vec3f& Q, const Vec3f& B,
                                 Vec3f& VEC, Vec3f& X, Vec3f& Y)
{
  Vec3f T;
  FCL_REAL A_dot_A, B_dot_B, A_dot_B, A_dot_T, B_dot_T;
  Vec3f TMP;

  T = Q - P;
  A_dot_A = A.dot(A);
  B_dot_B = B.dot(B);
  A_dot_B = A.dot(B);
  A_dot_T = A.dot(T);
  B_dot_T = B.dot(T);

  // t parameterizes ray P,A
  // u parameterizes ray Q,B

  FCL_REAL t, u;

  // compute t for the closest point on ray P,A to
  // ray Q,B

  FCL_REAL denom = A_dot_A*B_dot_B - A_dot_B*A_dot_B;

  t = (A_dot_T*B_dot_B - B_dot_T*A_dot_B) / denom;

  // clamp result so t is on the segment P,A

  if((t < 0) || boost::math::isnan(t)) t = 0; else if(t > 1) t = 1;

  // find u for point on ray Q,B closest to point at t

  u = (t*A_dot_B - B_dot_T) / B_dot_B;

  // if u is on segment Q,B, t and u correspond to
  // closest points, otherwise, clamp u, recompute and
  // clamp t

  if((u <= 0) || boost::math::isnan(u))
  {
    Y = Q;

    t = A_dot_T / A_dot_A;

    if((t <= 0) || boost::math::isnan(t))
    {
      X = P;
      VEC = Q - P;
    }
    else if(t >= 1)
    {
      X = P + A;
      VEC = Q - X;
    }
    else
    {
      X = P + A * t;
      TMP = T.cross(A);
      VEC = A.cross(TMP);
    }
  }
  else if (u >= 1)
  {
    Y = Q + B;

    t = (A_dot_B + A_dot_T) / A_dot_A;

    if((t <= 0) || boost::math::isnan(t))
    {
      X = P;
      VEC = Y - P;
    }
    else if(t >= 1)
    {
      X = P + A;
      VEC = Y - X;
    }
    else
    {
      X = P + A * t;
      T = Y - P;
      TMP = T.cross(A);
      VEC= A.cross(TMP);
    }
  }
  else
  {
    Y = Q + B * u;

    if((t <= 0) || boost::math::isnan(t))
    {
      X = P;
      TMP = T.cross(B);
      VEC = B.cross(TMP);
    }
    else if(t >= 1)
    {
      X = P + A;
      T = Q - X;
      TMP = T.cross(B);
      VEC = B.cross(TMP);
    }
    else
    {
      X = P + A * t;
      VEC = A.cross(B);
      if(VEC.dot(T) < 0)
      {
        VEC = VEC * (-1);
      }
    }
  }
}
コード例 #4
0
ファイル: mesher.cpp プロジェクト: Highstaker/antimony
void Mesher::check_feature()
{
    auto contour = get_contour();
    const auto normals = get_normals(contour);

    // Find the largest cone and the normals that enclose
    // the largest angle as n0, n1.
    float theta = 1;
    Vec3f n0, n1;
    for (auto ni : normals)
    {
        for (auto nj : normals)
        {
            float dot = ni.dot(nj);
            if (dot < theta)
            {
                theta = dot;
                n0 = ni;
                n1 = nj;
            }
        }
    }

    // If there isn't a feature in this fan, then return immediately.
    if (theta > 0.9)
        return;

    // Decide whether this is a corner or edge feature.
    const Vec3f nstar = n0.cross(n1);
    float phi = 0;
    for (auto n : normals)
        phi = fmax(phi, fabs(nstar.dot(n)));
    bool edge = phi < 0.7;

    // Find the center of the contour.
    Vec3f center(0, 0, 0);
    for (auto c : contour)
        center += c;
    center /= contour.size();

    // Construct the matrices for use in our least-square fit.
    Eigen::MatrixX3d A(normals.size(), 3);
    {
        int i=0;
        for (auto n : normals)
            A.row(i++) << n.transpose();
    }

    // When building the second matrix, shift position values to be centered
    // about the origin (because that's what the least-squares fit will
    // minimize).
    Eigen::VectorXd B(normals.size(), 1);
    {
        auto n = normals.begin();
        auto c = contour.begin();
        int i=0;
        while (n != normals.end())
            B.row(i++) << (n++)->dot(*(c++) - center);
    }

    // Use singular value decomposition to solve the least-squares fit.
    Eigen::JacobiSVD<Eigen::MatrixX3d> svd(A, Eigen::ComputeFullU |
                                              Eigen::ComputeFullV);

    // Set the smallest singular value to zero to make fitting happier.
    if (edge)
    {
        auto singular = svd.singularValues();
        svd.setThreshold(singular.minCoeff() / singular.maxCoeff() * 1.01);
    }

    // Solve for the new point's position.
    const Vec3f new_pt = svd.solve(B) + center;

    // Erase this triangle fan, as we'll be inserting a vertex in the center.
    triangles.erase(fan_start, voxel_start);

    // Construct a new triangle fan.
    contour.push_back(contour.front());
    {
        auto p0 = contour.begin();
        auto p1 = contour.begin();
        p1++;
        while (p1 != contour.end())
            push_swappable_triangle(Triangle(*(p0++), *(p1++), new_pt));
    }
}
コード例 #5
0
ファイル: Plane.cpp プロジェクト: NGCyang/stellarium
float Plane::calcDistance(const Vec3f p) const
{
	return p.dot(normal) - distance;
}
コード例 #6
0
ファイル: main.cpp プロジェクト: sharmrit/PoolGame_Animation
Vec3f proj (Vec3f v1, Vec3f v2){
		//projection v1 to v2:
	// a1 = (a dot b)/(b dot b)   * b 
	return (((v1.dot(v2))/(v2.dot(v2)))* v2);
}
コード例 #7
0
int PointTracker::POSIT(float f)
{
	// POSIT algorithm for coplanar points as presented in
	// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
	// we use the same notation as in the paper here

	// The expected rotation used for resolving the ambiguity in POSIT:
	// In every iteration step the rotation closer to R_expected is taken 
	Matx33f R_expected;	
	if (init_phase)
		R_expected = Matx33f::eye(); // in the init phase, we want to be close to the default pose = no rotation
	else 
		R_expected = X_CM.R; // later we want to be close to the last (predicted) rotation
	
	// initial pose = last (predicted) pose
	Vec3f k;
	get_row(X_CM.R, 2, k);
	float Z0 = X_CM.t[2];

	float old_epsilon_1 = 0;
	float old_epsilon_2 = 0;
	float epsilon_1 = 1;
	float epsilon_2 = 1;

	Vec3f I0, J0;
	Vec2f I0_coeff, J0_coeff;

	Vec3f I_1, J_1, I_2, J_2;
	Matx33f R_1, R_2;
	Matx33f* R_current;

	const int MAX_ITER = 100;
	const float EPS_THRESHOLD = 1e-4;

	int i=1;
	for (; i<MAX_ITER; ++i)
	{
		epsilon_1 = k.dot(point_model->M01)/Z0;
		epsilon_2 = k.dot(point_model->M02)/Z0;

		// vector of scalar products <I0, M0i> and <J0, M0i>
		Vec2f I0_M0i(p[1][0]*(1.0 + epsilon_1) - p[0][0], 
			         p[2][0]*(1.0 + epsilon_2) - p[0][0]);
		Vec2f J0_M0i(p[1][1]*(1.0 + epsilon_1) - p[0][1],
			         p[2][1]*(1.0 + epsilon_2) - p[0][1]);

		// construct projection of I, J onto M0i plane: I0 and J0
		I0_coeff = point_model->P * I0_M0i;
		J0_coeff = point_model->P * J0_M0i;
		I0 = I0_coeff[0]*point_model->M01 + I0_coeff[1]*point_model->M02;
		J0 = J0_coeff[0]*point_model->M01 + J0_coeff[1]*point_model->M02;

		// calculate u component of I, J		
		float II0 = I0.dot(I0);
		float IJ0 = I0.dot(J0);
		float JJ0 = J0.dot(J0);
		float rho, theta;
		if (JJ0 == II0) {
			rho = sqrt(abs(2*IJ0));
			theta = -PI/4;
			if (IJ0<0) theta *= -1;
		}
		else {
			rho = sqrt(sqrt( (JJ0-II0)*(JJ0-II0) + 4*IJ0*IJ0 ));
			theta = atan( -2*IJ0 / (JJ0-II0) );
			if (JJ0 - II0 < 0) theta += PI;
			theta /= 2;
		}

		// construct the two solutions
		I_1 = I0 + rho*cos(theta)*point_model->u;	
		I_2 = I0 - rho*cos(theta)*point_model->u;

		J_1 = J0 + rho*sin(theta)*point_model->u;
		J_2 = J0 - rho*sin(theta)*point_model->u;

		float norm_const = 1.0/norm(I_1); // all have the same norm
		
		// create rotation matrices
		I_1 *= norm_const; J_1 *= norm_const;
		I_2 *= norm_const; J_2 *= norm_const;

		set_row(R_1, 0, I_1);
		set_row(R_1, 1, J_1);
		set_row(R_1, 2, I_1.cross(J_1));
		
		set_row(R_2, 0, I_2);
		set_row(R_2, 1, J_2);
		set_row(R_2, 2, I_2.cross(J_2));

		// the single translation solution
		Z0 = norm_const * f;

		// pick the rotation solution closer to the expected one
		// in simple metric d(A,B) = || I - A * B^T ||
		float R_1_deviation = norm(Matx33f::eye() - R_expected * R_1.t());
		float R_2_deviation = norm(Matx33f::eye() - R_expected * R_2.t());

		if (R_1_deviation < R_2_deviation)
			R_current = &R_1;
		else
			R_current = &R_2;

		get_row(*R_current, 2, k);

		// check for convergence condition
		if (abs(epsilon_1 - old_epsilon_1) +  abs(epsilon_2 - old_epsilon_2) < EPS_THRESHOLD)
			break;
		old_epsilon_1 = epsilon_1;
		old_epsilon_2 = epsilon_2;
	}	

	// apply results
	X_CM.R = *R_current;
	X_CM.t[0] = p[0][0] * Z0/f;
	X_CM.t[1] = p[0][1] * Z0/f;
	X_CM.t[2] = Z0;

	return i;

	//Rodrigues(X_CM.R, r);
	//qDebug()<<"iter: "<<i;
	//qDebug()<<"t: "<<X_CM.t[0]<<' '<<X_CM.t[1]<<' '<<X_CM.t[2];
	//Vec3f r;
	//
	//qDebug()<<"r: "<<r[0]<<' '<<r[1]<<' '<<r[2]<<'\n';
}
コード例 #8
0
ファイル: PhotonTracer.cpp プロジェクト: yanko/tungsten
Vec3f PhotonTracer::traceSample(Vec2u pixel, const KdTree<Photon> &surfaceTree,
        const KdTree<VolumePhoton> *mediumTree, PathSampleGenerator &sampler,
        float gatherRadius)
{
    PositionSample point;
    if (!_scene->cam().samplePosition(sampler, point))
        return Vec3f(0.0f);
    DirectionSample direction;
    if (!_scene->cam().sampleDirection(sampler, point, pixel, direction))
        return Vec3f(0.0f);
    sampler.advancePath();

    Vec3f throughput = point.weight*direction.weight;
    Ray ray(point.p, direction.d);
    ray.setPrimaryRay(true);

    IntersectionTemporary data;
    IntersectionInfo info;
    const Medium *medium = _scene->cam().medium().get();

    Vec3f result(0.0f);
    int bounce = 0;
    bool didHit = _scene->intersect(ray, data, info);
    while ((medium || didHit) && bounce < _settings.maxBounces - 1) {
        if (medium) {
            if (mediumTree) {
                Vec3f beamEstimate(0.0f);
                mediumTree->beamQuery(ray.pos(), ray.dir(), ray.farT(), [&](const VolumePhoton &p, float t, float distSq) {
                    Ray mediumQuery(ray);
                    mediumQuery.setFarT(t);
                    beamEstimate += (3.0f*INV_PI*sqr(1.0f - distSq/p.radiusSq))/p.radiusSq
                            *medium->phaseFunction(p.pos)->eval(ray.dir(), -p.dir)
                            *medium->transmittance(mediumQuery)*p.power;
                });
                result += throughput*beamEstimate;
            }
            throughput *= medium->transmittance(ray);
        }
        if (!didHit)
            break;

        const Bsdf &bsdf = *info.bsdf;

        SurfaceScatterEvent event = makeLocalScatterEvent(data, info, ray, &sampler);

        Vec3f transparency = bsdf.eval(event.makeForwardEvent(), false);
        float transparencyScalar = transparency.avg();

        Vec3f wo;
        if (sampler.nextBoolean(DiscreteTransparencySample, transparencyScalar)) {
            wo = ray.dir();
            throughput *= transparency/transparencyScalar;
        } else {
            event.requestedLobe = BsdfLobes::SpecularLobe;
            if (!bsdf.sample(event, false))
                break;

            wo = event.frame.toGlobal(event.wo);

            throughput *= event.weight;
        }

        bool geometricBackside = (wo.dot(info.Ng) < 0.0f);
        medium = info.primitive->selectMedium(medium, geometricBackside);

        ray = ray.scatter(ray.hitpoint(), wo, info.epsilon);

        if (std::isnan(ray.dir().sum() + ray.pos().sum()))
            break;
        if (std::isnan(throughput.sum()))
            break;

        sampler.advancePath();
        bounce++;
        if (bounce < _settings.maxBounces)
            didHit = _scene->intersect(ray, data, info);
    }

    if (!didHit) {
        if (!medium && _scene->intersectInfinites(ray, data, info))
            result += throughput*info.primitive->evalDirect(data, info);
        return result;
    }
    if (info.primitive->isEmissive())
        result += throughput*info.primitive->evalDirect(data, info);

    int count = surfaceTree.nearestNeighbours(ray.hitpoint(), _photonQuery.get(), _distanceQuery.get(),
            _settings.gatherCount, gatherRadius);
    if (count == 0)
        return result;

    const Bsdf &bsdf = *info.bsdf;
    SurfaceScatterEvent event = makeLocalScatterEvent(data, info, ray, &sampler);

    Vec3f surfaceEstimate(0.0f);
    for (int i = 0; i < count; ++i) {
        event.wo = event.frame.toLocal(-_photonQuery[i]->dir);
        // Asymmetry due to shading normals already compensated for when storing the photon,
        // so we don't use the adjoint BSDF here
        surfaceEstimate += _photonQuery[i]->power*bsdf.eval(event, false)/std::abs(event.wo.z());
    }
    float radiusSq = count == int(_settings.gatherCount) ? _distanceQuery[0] : gatherRadius*gatherRadius;
    result += throughput*surfaceEstimate*(INV_PI/radiusSq);

    return result;
}
コード例 #9
0
void DVRClipGeometry::linkContour(      DVRTriangle *startTriangle,
                                        Real32       dist2RefPlane,
                                  const Vec3f       &viewDir, 
                                        bool         positiveWinding)
{
    FDEBUG(("DVRClipGeometry - linkcontour dist = %f\n", dist2RefPlane));
  
    bool closed = false;

    // first, we have to check for the correct winding direction.

    Pnt3f vertex[2];
    bool firstEdge;
    int  first = 0, second = 0;

    if(startTriangle->edgeCut[0] && startTriangle->edgeCut[1])
    {
        vertex[0] = interpolate(startTriangle, 1, 0, dist2RefPlane); 
        vertex[1] = interpolate(startTriangle, 1, 2, dist2RefPlane);

        first = 0; second = 1;
    }
    else if (startTriangle->edgeCut[1] && startTriangle->edgeCut[2])
    {
        vertex[0] = interpolate(startTriangle, 2, 1, dist2RefPlane);
        vertex[1] = interpolate(startTriangle, 2, 0, dist2RefPlane);

        first = 1; second = 2;
    }
    else if (startTriangle->edgeCut[0] && startTriangle->edgeCut[2])
    {
        vertex[0] = interpolate(startTriangle, 0, 1, dist2RefPlane);
        vertex[1] = interpolate(startTriangle, 0, 2, dist2RefPlane);

        first = 0; second = 2;
    }

    // Now we should have both cut points on our edges.

    // If the cross product of the normal of this triangle with the 
    // vector between the two cut points (cutPoint[1] - cutPoint[0]) 
    // has a positive dot product with the viewing direction, then 
    // the edge with cutPoint[0] on it is the right direction, otherwise
    // we would have to choose the other direction.

    Vec3f tmp = vertex[1] - vertex[0];

    tmp = tmp.cross(startTriangle->transformedNormal);

    if(tmp.dot(viewDir) <= 0.0)
    {
        firstEdge = false;
    }else
    {
        firstEdge = true;
    }

    if(!positiveWinding)
        firstEdge = !firstEdge;

    DVRTriangle *current = startTriangle;

    current->inContour = true;

    if(firstEdge)
    {
        current->cutPnt      = vertex[0];
        current->cutPoint[0] = vertex[0][0];
        current->cutPoint[1] = vertex[0][1];
        current->cutPoint[2] = vertex[0][2];

        current->contourNeighbour = &_mfTriangles[current->neighbours[first]];

        //      // debugging -> remove
        //      if(!current->contourNeighbour){
        //        std::cerr<<"contour neighbour is NULL\n";
        //        exit(0);
        //      }

        current = current->contourNeighbour;
    }
    else
    {
        current->cutPnt      = vertex[1];
        current->cutPoint[0] = vertex[1][0];
        current->cutPoint[1] = vertex[1][1];
        current->cutPoint[2] = vertex[1][2];    

        current->contourNeighbour = &_mfTriangles[current->neighbours[second]];
        //      // debugging -> remove
        //      if(!current->contourNeighbour){
        //        std::cerr<<"contour neighbour is NULL\n";
        //        exit(0);
        //      }

        current = current->contourNeighbour;    
    }

    //check neighbours
    while(!closed)
    {
        closed             = true;
        current->inContour = true;

        for(UInt32 i = 0; i < 3; i++)
        {
            // if a neighbour triangle is in the active triangle list and 
            // not yet in a contour it is our new contour neighbour.
            if( current->edgeCut[i] && 
               !_mfTriangles[current->neighbours[i]].inContour)
            {
                // calculate cut point 	
                current->cutPnt = interpolate(current, 
                                              i, 
                                              (i + 1) % 3, 
                                              dist2RefPlane);

                current->cutPoint[0] = current->cutPnt[0];
                current->cutPoint[1] = current->cutPnt[1];
                current->cutPoint[2] = current->cutPnt[2];

                current->contourNeighbour = 
                    &_mfTriangles[current->neighbours[i]];

                //  	// debugging -> remove
                //  	if(!current->contourNeighbour){
                //  	  std::cerr<<"contour neighbour is NULL\n";
                //  	  exit(0);
                //  	}

                current = current->contourNeighbour;
                closed  = false;

                break;
            }// !inContour
        } // end for neighbours
    } // end while !closed

    for(UInt32 i = 0; i < 3; i++)
    {
        if(&_mfTriangles[current->neighbours[i]] == startTriangle)
        {
            current->cutPnt = interpolate(current, 
                                          i, 
                                          (i + 1) % 3, 
                                          dist2RefPlane);

            current->cutPoint[0] = current->cutPnt[0];
            current->cutPoint[1] = current->cutPnt[1];
            current->cutPoint[2] = current->cutPnt[2];

            // now the ring is closed.

            current->contourNeighbour = startTriangle;
            //        // debugging -> remove
            //        if(!current->contourNeighbour){
            //  	std::cerr<<"contour neighbour is NULL\n";
            //  	exit(0);
            //        }
            break;
        }
    } // end for neighbours

    //    // debugging -> remove
    //    if(!current->contourNeighbour){
    //      std::cerr <<"contour could not closed\n";
    //      std::cerr <<current->edgeCut[0]<<current->edgeCut[1]
    //                <<current->edgeCut[2]<<std::endl;
    //      exit(0);
    //    }
}
コード例 #10
0
  void doInteraction(float dt){
    Glove *l = &left;
    Glove *r = &right;

    Vec3f headPos = tracker->markerPositions[17]; // right(0-7) left(8-15) A B C (16 17 18)
    if(headPos.mag() == 0) return;

    Rayd rayS(headPos, r->centroid - headPos); // ray pointing in direction of head to right hand
    
    float t = rayS.intersectAllosphere(); // get t on surface of allosphere screen
    Vec3f pos = nav().quat().rotate( rayS(t) ); // rotate point on allosphere to match current nav orientation (check this)
    Rayd ray( nav().pos(), pos); // ray from sphere center (camera location) to intersected location

    // Use ray to intesect with plasma shell on pinch
    if( r->pinchOn[eIndex]){
      cout<<"right index finger pinched, create lightning!"<<endl; 
      //audio
      //cout << currentPlayer << endl;
      //cout << *currentPlayer << endl;
      samplePlayer[*currentPlayer].reset(); // reset the phase == start playing the sound

      *currentPlayer = *currentPlayer + 1;
      if (*currentPlayer == N_SAMPLE_PLAYER){
        *currentPlayer = 0;
        // currentPlayer = 0; // this was a horrible BUG! XXX
      }
      //visual
      Vec3f center = Vec3f(0, 0.6, -1);
      float t = ray.intersectSphere(Vec3f(0,0,0), R);
      Vec3f src = ray(t);
      Vec3f dest = 0.1f * (src - center).normalize() + center;
      Bolt* newPSBolt = new Bolt();
      newPSBolt->makeTexture();
      newPSBolt->start = src;
      newPSBolt->ending = dest;
      newPSBolt->makeBolt(src, dest, 2, 0.03);
      newPSBolt->createBulge(center);
      boltQ->push_back(newPSBolt); 
    }

    // state->cursor.set(nav().pos() + pos);

    // Navigation joystick mode
    // Translation done with the left index finger pinch gesture
    if( l->pinchOn[eIndex]){
    } else if( l->pinched[eIndex]){
      Vec3f translate = sensitivity * l->getPinchTranslate(eIndex);
      for(int i=0; i<3; i++){
        nav().pos()[i] = nav().pos()[i] * 0.9 + 
          (nav().pos()[i] + translate.dot(Vec3d(nav().ur()[i], nav().uu()[i], -nav().uf()[i]))) * 0.1;
      }
      // nav().pos().lerp( nav().pos() + translate, 0.01f);
    } else if( l->pinchOff[eIndex] ){
    } else {
    }
    //ePinky
    //eRing

    // change navigation sensitivity
    if( l->pinched[eMiddle]){
      Vec3f v = l->getPinchTranslate(eMiddle);
      sensitivity = abs(v.y*10);
    }

  }
コード例 #11
0
ファイル: vec_3f.cpp プロジェクト: Gitu1997/Gazebo
BVH_REAL quadraticForm(const Vec3f M[3], const Vec3f& v)
{
  return v.dot(Vec3f(M[0].dot(v), M[1].dot(v), M[2].dot(v)));
}
コード例 #12
0
ファイル: Cylinder.cpp プロジェクト: Aerochip7/trunk
bool Cylinder::InitAverage(const MiscLib::Vector< Vec3f > &samples)
{
	if(samples.size() < 4)
		return false;
	// estimate axis from covariance of normal vectors
	MiscLib::Vector< GfxTL::Vector3Df > normals;
	size_t c = samples.size() / 2;
	for(size_t i = c; i < samples.size(); ++i)
	{
		normals.push_back(GfxTL::Vector3Df(samples[i]));
		normals.push_back(GfxTL::Vector3Df(-samples[i]));
	}
	GfxTL::MatrixXX< 3, 3, float > cov, eigenVectors;
	GfxTL::Vector3Df eigenValues;
	GfxTL::CovarianceMatrix(GfxTL::Vector3Df(0, 0, 0),
		normals.begin(), normals.end(), &cov);
	GfxTL::Jacobi(cov, &eigenValues, &eigenVectors);
	// find the minimal eigenvalue and corresponding vector
	float minEigVal = eigenValues[0];
	unsigned int minEigIdx = 0;
	for(unsigned int i = 1; i < 3; ++i)
		if(eigenValues[i] < minEigVal)
		{
			minEigVal = eigenValues[i];
			minEigIdx = i;
		}
	m_axisDir = Vec3f(eigenVectors[minEigIdx]);
	// get a point on the axis from all pairs
	m_axisPos = Vec3f(0, 0, 0);
	m_radius = 0;
	size_t pointCount = 0;
	size_t pairCount = 0;
	for(size_t i = 0; i < c - 1; ++i)
		for(size_t j = i + 1; j < c; ++j)
		{
			// project first normal into plane
			float l = m_axisDir.dot(samples[i + c]);
			Vec3f xdir = samples[i + c] - l * m_axisDir;
			xdir.normalize();
			Vec3f ydir = m_axisDir.cross(xdir);
			ydir.normalize();
			// xdir is the x axis in the plane (y = 0) samples[i] is the origin
			float lineBnx = ydir.dot(samples[j + c]);
			if(abs(lineBnx) < .05f)
				continue;
			float lineBny = -xdir.dot(samples[j + c]);
			// origin of lineB
			Vec3f originB = samples[j] - samples[i];
			float lineBOx = xdir.dot(originB);
			float lineBOy = ydir.dot(originB);
			float lineBd = lineBnx * lineBOx + lineBny * lineBOy;
			// lineB in the plane complete
			// point of intersection is y = 0 and x = lineBd / lineBnx
			float radius = lineBd / lineBnx;
			m_axisPos += samples[i] + radius * xdir;
			m_radius += abs(radius);
			m_radius += std::sqrt((radius - lineBOx) * (radius - lineBOx) + lineBOy * lineBOy);
			++pointCount;
		}
	if(!pointCount)
		return false;
	m_axisPos /= pointCount;
	m_radius /= pointCount * 2;
	if(m_radius > 1e6)
		return false;

	// find point on axis closest to origin
	float lambda = m_axisDir.dot(-m_axisPos);
	m_axisPos = m_axisPos + lambda * m_axisDir;

	m_hcs.FromNormal(m_axisDir);
	m_angularRotatedRadians = 0;
	return true;
}
コード例 #13
0
 /** The computed distance is perfect in that case
  * @param p_j the point to compute its distance to
  * @return
  */
 float distance(const Vec3f& p_j) const
 {
   return std::abs(float(p_j.dot(n_) + d_));
 }
コード例 #14
0
ファイル: noeuclid.cpp プロジェクト: Danthekilla/noeuclid
void DoneProbes(bool bReRun) {
    //#define CFM  .4
    //#define CFMXY .5
    double CFM = .2;
#define VCFM 1.0
#define COLLIDESIZE 0.5
    //Ratio of XY to Z
#define COLLIDESIZERATIO 3
#define MINSIDE .0
#define ITERATIONS 10


    //Here's what we do: gpTest->TargetLocation.r, .g, .b contains the new position we should be at, bar whatever forces we're receiving from the other rays.

    int i;

    Vec3f newp = gh.MapOffset;

    int iterations = 0;

    Quaternion orotmat[3];

    CollisionProbe * tp = gpTest;


    if (tp->Direction.r * tp->Direction.r + tp->Direction.g * tp->Direction.g + tp->Direction.b * tp->Direction.b > 0.0 && !bReRun) {
        Vec3f df = tp->TargetLocation.vec() - newp;

        double dtdiffx = df.len();

        newp = tp->TargetLocation.vec();

        double dirdiffx = sqrt(tp->Direction.r * tp->Direction.r + tp->Direction.g * tp->Direction.g + tp->Direction.b * tp->Direction.b);

        //Check to see if it's a jump, if so, consider re-running probes.
        if (dtdiffx > dirdiffx * 1.5 + 1.0) {
            printf("Jump %f %f %f -> %f %f %f\n", gh.MapOffset.x, gh.MapOffset.y, gh.MapOffset.z, tp->TargetLocation.r, tp->TargetLocation.g, tp->TargetLocation.b);
            newp = gh.MapOffset = tp->TargetLocation.vec(); // - tp->Direction.r; //??? WHY not these but Z?
            //XXX WHY WHY WHY??? WHY?? (Read why below)
            //XXX TODO TODO TODO!! There is a glitch.  We have to rotate the tp->Direction before adding it, otherwise really weird things will happen.
            //I haven't gotten around to this yet.

            //Attempt to correct direction of speed.
            gh.v = gpTestVelocity->NewDirection.vec();

            gh.ForceProbeReRun();
            goto clend;
        }
    }


    for (iterations = 0; iterations < ITERATIONS; iterations++)
        for (i = probes.size() - 1; i >= 0; i--) {
            CollisionProbe * tp = probes[i];
            //		if( tp->Normal.a > 1.0 ) continue;

            //		if( tp->TargetLocation.a > COLLIDESIZE ) continue;



            //We have a collision, need to "push" back.

            Vec3f sfn = tp->Normal.vec();

            //Actual hit xyz
            Vec3f t = tp->TargetLocation.vec();
            Vec3f nd = t - newp; //tp->Direction.r;		//Direction of ray.

            double nddiff = nd.len();
            if (nddiff < 0.00001 || std::isnan(nddiff)) {
                //fprintf( stderr, "Error: fault with tp direction.\n" );
                continue; //Don't know why this could happen.
            }

            //Make sure this was a resolved surface.  Unresolved services point to
            //some oopsies that happened inside the ray tracer.
            if (tp->Normal.r > 1) {
                continue;
            }



            nd /= nddiff;
            Vec3f iaw = tp->InAreaWarp.vec(); //Space compression


            double newcalcd = 0.0;
            //newcalcd = tp->TargetLocation.a;
            Vec3f newcollision = t - newp;
            newcollision.x /= iaw.x;
            newcollision.y /= iaw.y;
            newcollision.z /= iaw.z;

            //Tricky: newcollision can actually go inverted if the thing would be fast enough to get embedded.  Flip it back around.
            newcollision.x = fabs(newcollision.x) * ((nd.x > 0) ? 1 : -1);
            newcollision.y = fabs(newcollision.y) * ((nd.y > 0) ? 1 : -1);
            newcollision.z = fabs(newcollision.z) * ((nd.z > 0) ? 1 : -1);

            newcalcd = sqrt(newcollision.x * newcollision.x * COLLIDESIZERATIO + newcollision.y * newcollision.y * COLLIDESIZERATIO + newcollision.z * newcollision.z / COLLIDESIZERATIO);
            //		printf( "%f-%f %f-%f %f-%f ---\n", newcollisionx,newcollisiony,newcollisionz );

            if (newcalcd > COLLIDESIZE) continue;
            float press = (COLLIDESIZE - newcalcd) / COLLIDESIZE;

            tp->id = i;
            gamemap.collision(tp);

            /*
                            printf( " +%f (%f %f %f)  ->\n", press, newcollisionx, newcollisiony, newcollisionz );
                            printf( "  %f %f %f %f\n", tp->Position.r, tp->Position.g, tp->Position.b, tp->Position.a );   //x,y,z,unused
                            printf( "  %f %f %f %f\n", tp->Direction.r, tp->Direction.g, tp->Direction.b, tp->Direction.a );   //x,y,z,unused
                            printf( "  %f %f %f %f\n", tp->AuxRotation.r, tp->AuxRotation.g, tp->AuxRotation.b, tp->AuxRotation.a );   //x,y,z,unused
                            printf( "  %f %f %f %f\n", tp->NewDirection.r, tp->NewDirection.g, tp->NewDirection.b, tp->NewDirection.a );   //x,y,z,unused
                            printf( "  %f %f %f %f\n", tp->Normal.r, tp->Normal.g, tp->Normal.b, tp->Normal.a );   //x,y,z,unused
                            printf( "  %f %f %f %f\n", tp->InAreaWarp.r, tp->InAreaWarp.g, tp->InAreaWarp.b, tp->InAreaWarp.a );   //x,y,z,unused
                            printf( "  %f %f %f %f\n", tp->TargetLocation.r, tp->TargetLocation.g, tp->TargetLocation.b, tp->TargetLocation.a );   //x,y,z,unused
             */

            //If it is a bottom probe, we are on the ground.
            if (i > int(probes.size()) - 3) {
                gTimeSinceOnGround = 0;
                //gh.vZ = 0;
            }


            //First of all, nerf any motion toward the collision.
            {
                Vec3f ns = gh.v;
                ns /= ns.len();


                Vec3f dot {nd.x*ns.x,nd.y*ns.y,nd.z*ns.z};
                if (dot.x > 0) {
                    gh.v.x *= (1. - dot.x * press) * VCFM;
                }
                if (dot.y > 0) {
                    gh.v.y *= (1. - dot.y * press) * VCFM;
                }
                if (dot.z > 0) {
                    gh.v.z *= (1. - dot.z * press * VCFM);
                }
            }
            //Next, push the MapOffset back
            //(Change newx, newy, newz)

            //(ndx,ndy,ndz) represents ray.
            //(tx, ty, tz)  represents target ray hit.
            //press = distance of compression.

            Vec3f nid = newcollision * press;
            nid = nid * CFM;
            nid = {nid.x*iaw.x,nid.y*iaw.y,nid.z*iaw.z};

            if (sqrt(nid.x * nid.x + nid.y * nid.y) < MINSIDE) {
                nid.x = nid.y = 0;
            }

            newp.x -= nid.x * fabs(sfn.x);
            newp.y -= nid.y * fabs(sfn.y);
            newp.z -= nid.z * fabs(sfn.z);
        }

    if (!gGodMode) {
        gh.MapOffset = newp;
    }


    //Extract Yaw, Pitch, Roll.

    //	printf( "%f %f %f\n", gpRotFwd->NewDirection.r, gpRotFwd->NewDirection.g, gpRotFwd->NewDirection.b );
    //	printf( "%f %f %f\n", gpRotUp->NewDirection.r, gpRotUp->NewDirection.g, gpRotUp->NewDirection.b );

    /*
            float PlusZ[3];
            float Up[3];
            float Fwd[3];
     */

    //Re-rotate the camera based on the jump.
    

    orotmat[0] = gpRotFwd->NewDirection; //X
    orotmat[1] = gpRotUp->NewDirection; //Y
    orotmat[2] = orotmat[0].cross3d(orotmat[1]);
    //TODO: If we are in a situation where we're stuck on our side, don't exceute this line of code.
    LookQuaternion = Quaternion::fromMatrix((float*)orotmat).normalize();

    //Attempt to re-right the player

#define AUTO_RIGHT_COMP .8
    {
        Vec3f upout = LookQuaternion * Vec3f{0, 1, 0};
        Vec3f fwdtestout = LookQuaternion * Vec3f{0, 0, 1};
        Vec3f lefttestout = LookQuaternion * Vec3f{1, 0, 0};
        upout.z *= -1;
        fwdtestout.x *= -1;
        lefttestout.x *= -1;
        fwdtestout.y *= -1;
        lefttestout.y *= -1;

        lefttestout.z = 0; //Force flat test.

        float irtcos = upout.dot(lefttestout) * AUTO_RIGHT_COMP; //how much effort to try to right?
        float cosofs = (3.14159 / 2.0);

        //Tricky: If we're upside-down we need to re-right ourselves.
        if (upout.z < 0) irtcos *= -1.0;

        Quaternion uprotator = Quaternion::fromAxisAngle({0, 0, 1}, acos(irtcos) - cosofs);
        LookQuaternion = LookQuaternion * uprotator;

    }
clend:
    probes.clear();
    gPosition = newp;
    gDirection = gpForward->Direction.vec();
    gTargetNormal = gpForward->Normal.vec();
    gTargetCompression = gpForward->InAreaWarp.vec();
    gTargetHit = gpForward->TargetLocation.vec();
    gTargetActualDistance = gpForward->Normal.a;
    gTargetProjDistance = gpForward->InAreaWarp.a;
    gTargetPerceivedDistance = gpForward->TargetLocation.a;


    gamemap.update();

    gOverallUpdateNo++;
    //printf( "%f %f %f\n", gPositionX, gPositionY, gPositionZ );

    //	printf( "%7.1f %7.1f %7.1f  /  %7.1f %7.1f %7.1f (%f %f %f)\n", NewYaw, NewPitch, NewRoll, Yaw, Pitch, Roll, gh.MapOffsetX, gh.MapOffsetY, gh.MapOffsetZ );
}
コード例 #15
0
ファイル: Scene.cpp プロジェクト: CZdravko/Horde
//return a convex hull connectivity from
//points .... array of points
//count .... size of array
//l .... line vector(T1-T2)
//ln .... line normal
//prallel lines on a Z=const plane are parallel under perspective projections -> similar
//Projecting points to a single Z plane
//If we always project to a fixed plane than koeficients can be fixed
void Scene::flatten(Vec3f *points, int count, Vec3f l, Vec3f ln, ConvexHull &ch) {
	//TODO : For now we are using a simplified method of returning a bounding rectangle
	//Maybe good enough, but it should return a convex hull
	//NOT GOOD

	//FIND max line length and max distance between lines
	// distance between lines by finding min and max signed distance between line point and first point projected to "ln"

	Vec3f T1, T2, T3, T4;
	//ConvexH c; - bullet versionn

	int idmin = 0, idmax = 0;
	float lmax = 0, len;
	float d[12];
	points[0].x = points[0].x * m_fZNear / points[0].z;
	points[0].y = points[0].y * m_fZNear / points[0].z;
	points[0].z = m_fZNear;
	for (int i = 0; i < count / 2; i++) {
		//Projecting points to m_fZNear
		points[2 * i].x = points[2 * i].x * m_fZNear / points[2 * i].z;
		points[2 * i].y = points[2 * i].y * m_fZNear / points[2 * i].z;
		points[2 * i].z = m_fZNear;
		points[2 * i + 1].x = points[2 * i + 1].x * m_fZNear / points[2 * i + 1].z;
		points[2 * i + 1].y = points[2 * i + 1].y * m_fZNear / points[2 * i + 1].z;
		points[2 * i + 1].z = m_fZNear;

		//gathering span
		d[i] = ln.dot(points[2 * i] - points[0]);

		len = Vec3f(points[2 * i] - points[2 * i + 1]).length();
		idmin = d[i] < d[idmin] ? i : idmin;
		idmax = d[i] > d[idmax] ? i : idmax;
		lmax = len > lmax ? len : lmax;
	}

	//we need to sort points starting from dmin going to dmax
	float yzk[12];
	float* indArr[12];
	for (int i = 0; i < count / 2; i++) {
		yzk[i] = d[i]; // / ch.points[i].z; //They are already projected
		indArr[i] = &yzk[i];
	}

	//Sorting by span
	qsort(indArr, count / 2, sizeof(float*), compre_floats);

	//Generating points and connectivity information for convex polygon
	int ch_pc = 0;
	int point_idxs[6];
	point_idxs[0] = 2 * (indArr[0] - yzk);

	for (int i = 0; i < count / 2 - 1;) {
		int temp = i + 1;
		int ith = indArr[i] - yzk;
		int ihtplus1 = indArr[i + 1] - yzk;
		Vec3f pi = points[2 * ith];
		Vec3f pj = points[2 * ihtplus1];
		float dy = (pj.y - pi.y);
		float dx = (pj.x - pi.x);
		float k = dx != 0 ? dy / dx : 100000.0;
		for (int j = i + 2; j < count / 2; ++j) {
			int jth = indArr[j] - yzk;
			pj = points[2 * jth];
			dy = (pj.y - pi.y);
			dx = (pj.x - pi.x);
			float kj = dx != 0 ? dy / dx : 100000.0;
			if (((kj < k) && kj < 0) || ((kj > k) && kj > 0)) {
				k = kj;
				temp = j;
			}
		}
		i = temp;
		ch_pc++;
		point_idxs[ch_pc] = 2 * (indArr[i] - yzk);
	}

	ch_pc++;
	point_idxs[ch_pc] = 2 * (indArr[count / 2] - yzk);

	ch.points[0] = points[point_idxs[0]];
	ch.points[1] = points[point_idxs[0] + 1];
	ch.connectivity[0] = 1;
	ch.connectivity[1] = 2;
	ch.connectivity[2] = 0;
	ch.connectivity[3] = 3;

	int var;
	for (var = 1; var < ch_pc - 1; var++) {
		ch.points[2 * var] = points[point_idxs[var]];
		ch.connectivity[2 * (2 * var)] = 2 * var - 2;
		ch.connectivity[2 * (2 * var) + 1] = 2 * var + 2;

		ch.points[2 * var + 1] = points[point_idxs[var] + 1];
		ch.connectivity[2 * (2 * var + 1)] = 2 * var + 1 - 2;
		ch.connectivity[2 * (2 * var + 1) + 1] = 2 * var + 1 + 2;
	}

	ch.points[2 * var] = points[point_idxs[var]];
	ch.points[2 * var + 1] = points[point_idxs[var] + 1];
	ch.connectivity[2 * (2 * var)] = 2 * var - 2;
	ch.connectivity[2 * (2 * var) + 1] = 2 * var + 1;
	ch.connectivity[2 * (2 * var + 1)] = 2 * var + 1 - 2;
	ch.connectivity[2 * (2 * var + 1) + 1] = 2 * var;

	ch.count = 2 * ch_pc;
}
コード例 #16
0
ファイル: Cone.cpp プロジェクト: jakexie/trunk
bool Cone::Init(const Vec3f &p1, const Vec3f &p2, const Vec3f &p3,
	const Vec3f &n1, const Vec3f &n2, const Vec3f &n3)
{
	//float ncheck = std::max(n2.dot(n3), std::max(n1.dot(n2), n1.dot(n3)));
	//if(ncheck > 0.999)
	//	return false;
	// compute center by intersecting the three planes given by (p1, n1)
	// (p2, n2) and (p3, n3)
	// set up linear system
	double a[4 * 3];
	double d1 = p1.dot(n1);
	double d2 = p2.dot(n2);
	double d3 = p3.dot(n3);
	// column major
	a[0 + 0 * 3] = n1[0];
	a[1 + 0 * 3] = n2[0];
	a[2 + 0 * 3] = n3[0];
	a[0 + 1 * 3] = n1[1];
	a[1 + 1 * 3] = n2[1];
	a[2 + 1 * 3] = n3[1];
	a[0 + 2 * 3] = n1[2];
	a[1 + 2 * 3] = n2[2];
	a[2 + 2 * 3] = n3[2];
	a[0 + 3 * 3] = d1;
	a[1 + 3 * 3] = d2;
	a[2 + 3 * 3] = d3;
	if(dmat_solve(3, 1, a))
		return false;
	m_center[0] = a[0 + 3 * 3];
	m_center[1] = a[1 + 3 * 3];
	m_center[2] = a[2 + 3 * 3];

	// compute axisDir
	Vec3f s1 = p1 - m_center;
	Vec3f s2 = p2 - m_center;
	Vec3f s3 = p3 - m_center;
	s1.normalize();
	s2.normalize();
	s3.normalize();
	Plane pl(s1 + m_center, s2 + m_center, s3 + m_center);
	m_axisDir = pl.getNormal();
	// make sure axis points in direction of s1
	// this defines the side of the cone!!!
	if(m_axisDir.dot(s1) < 0)
		m_axisDir *= -1;
	m_angle = 0;
	float angle = m_axisDir.dot(n1);
	if(angle < -1) // clamp angle to [-1, 1]
		angle = -1;
	else if(angle > 1)
		angle = 1;
	if(angle < 0)
		// m_angle = omega + 90
		angle = std::acos(angle) - float(M_PI) / 2;
	else
		// m_angle = 90 - omega
		angle = float(M_PI) / 2 - std::acos(angle);
	m_angle += angle;
	angle = m_axisDir.dot(n2);
	if(angle < -1) // clamp angle to [-1, 1]
		angle = -1;
	else if(angle > 1)
		angle = 1;
	if(angle < 0)
		// m_angle = omega + 90
		angle = std::acos(angle) - float(M_PI) / 2;
	else
		// m_angle = 90 - omega
		angle = float(M_PI) / 2 - std::acos(angle);
	m_angle += angle;
	angle = m_axisDir.dot(n3);
	if(angle < -1) // clamp angle to [-1, 1]
		angle = -1;
	else if(angle > 1)
		angle = 1;
	if(angle < 0)
		// m_angle = omega + 90
		angle = std::acos(angle) - float(M_PI) / 2;
	else
		// m_angle = 90 - omega
		angle = float(M_PI) / 2 - std::acos(angle);
	m_angle += angle;
	m_angle /= 3;
	if(m_angle < 1.0e-6 || m_angle > float(M_PI) / 2 - 1.0e-6)
		return false;
	//if(m_angle > 1.3962634015954636615389526147909) // 80 degrees
	if(m_angle > 1.4835298641951801403851371532153f) // 85 degrees
		return false;
	m_normal = Vec3f(std::cos(-m_angle), std::sin(-m_angle), 0);
	m_normalY = m_normal[1] * m_axisDir;
	m_n2d[0] = std::cos(m_angle);
	m_n2d[1] = -std::sin(m_angle);
	m_hcs.FromNormal(m_axisDir);
	m_angularRotatedRadians = 0;
	return true;
}
コード例 #17
0
ファイル: AMCRecording.cpp プロジェクト: msavva/pigraphs
ASFSkeleton::JointQuats ASFSkeleton::computeSkelStateQuaternions() const {
  JointQuats qs;
  using J = Skeleton::JointType;
  const auto p = [&] (const J& iJoint) {
    const string& boneName = kKinectBones2ASFBones.at(iJoint);
    const int bIdx = name2idx(boneName.c_str());
    const Bone& b = *getBone(getRoot(), bIdx);
    const Vec3f bpBody = m_pRootBone->parent2local * b.tgt_pos;
    return bpBody;
  };
  const Vec3f
    // canonical ASFSkeleton frame is x-left, y-up, z-forward
    up = Vec3f::UnitY(),
    right = -Vec3f::UnitX(),
    /// TORSO ///
    // spine base canonical frame
    yb = up,                                    // pelvic up
    xb = crossNorm(yb, right),                  // pelvic front
    zb = crossNorm(xb, yb),                     // pelvic right

    // pelvis frame - origin = ow, child = ove
    ove = p(J::JointType_SpineMid),             // mid-spine vertebrae
    yp = (ove - p(J::JointType_SpineBase)).normalized(),    // pelvic up
    xp = crossNorm(yp, right),                  // pelvic front
    zp = crossNorm(xp, yp),                     // pelvic right

    // torso frame - origin = ove, child = osc
    osc = p(J::JointType_SpineShoulder),        // sternoclavicular joint
    yto = (osc - ove).normalized(),             // torso up
    xto = crossNorm(yto, right),                // torso front
    zto = crossNorm(xto, yto),                  // torso right

    // shoulder-mid frame - origin = osc, child = ocv
    ocv = p(J::JointType_Neck),                 // cervical vertebrae (neck)
    ysh = (ocv - osc).normalized(),             // shoulder-mid up
    xsh = crossNorm(ysh, right),                // shoulder-mid front
    zsh = crossNorm(xsh, ysh),                  // shoulder-mid right

    // neck frame - origin = ocv, child = ohe
    zne = zp,                                   // neck right
   _xne = crossNorm(ysh, zne),                  // neck front temporary
    yne = crossNorm(zne, _xne),                 // neck up
    xne = crossNorm(yne, zne),                  // neck front

    // L clavicle frame - origin osc, child = ghl
    ghl = p(J::JointType_ShoulderLeft),         // glenohumeral rotation center L
    zcl = -(ghl - osc).normalized(),            // clavicle right L
    xcl = crossNorm(ysh, zcl),                  // clavicle front L
    ycl = crossNorm(zcl, xcl),                  // clavicle up L

    // R clavicle frame - origin osc, child = ghr
    ghr = p(J::JointType_ShoulderRight),        // glenohumeral rotation center R
    zcr = (ghr - osc).normalized(),             // clavicle right R
    xcr = crossNorm(ysh, zcr),                  // clavicle front R
    ycr = crossNorm(zcr, xcr),                  // clavicle up R

    /// ARMS AND HANDS ///

    // L arm frame common
    ecl = p(J::JointType_ElbowLeft),            // epicondyle L
    yhl = (ghl - ecl).normalized(),             // humerus up L
    scl = p(J::JointType_WristLeft),            // radial+ulnar styloid L
    yfl = (ecl - scl).normalized(),             // forearm up L

    // L humerus frame - origin = ghl, child = ecl
    // yhl computed in common arm frame
    _zhl = crossNorm(yfl, yhl),
    zhl = (_zhl.dot(right) > 0) ? _zhl : -_zhl, // humerus right L
    xhl = crossNorm(yhl, zhl),                  // humerus front L
    // TODO(MS): degenerate case: yhl-yfl parallel

    // L forearm frame - origin = scl, child = ecl
    // yfl computed in common arm frame
    zfl = zhl,                                  // forearm right L
    xfl = crossNorm(yfl, zfl),                  // forearm front L

    // L palm frame - origin = scl, child = pal
    xrl = xfl,                                  // trapezium front L
    yrl = yfl,                                  // trapezium up L
    zrl = zfl,                                  // trapezium right L

    // L thumb frame - origin = pal, child = thl
    xxl = xfl,                                  // thumb front L
    yxl = yfl,                                  // thumb up L
    zxl = zfl,                                  // thumb right L

    // L fingertip frame - origin = pal, child = phl
    xpl = xfl,                                  // distal phalanges front L
    zpl = zfl,                                  // distal phalanges right L
    ypl = yfl,                                  // distal phalanges up L

    // R arm frame common
    ecr = p(J::JointType_ElbowRight),           // epicondyle R
    yhr = (ghr - ecr).normalized(),             // humerus up R
    scr = p(J::JointType_WristRight),           // radial+ulnar styloid R
    yfr = (ecr - scr).normalized(),             // forearm up R

    // R humerus frame - origin = ghr, child = ecr
    // yhl computed in common arm frame
    _zhr = crossNorm(yfr, yhr),
    zhr = (_zhr.dot(right) > 0) ? _zhr : -_zhr, // humerus right R
    xhr = crossNorm(yhr, zhr),                  // humerus front R
    // TODO(MS): degenerate case: yhr-yfr parallel

    // R forearm frame - origin = scr, child = ecr
    // yfl computed in common arm frame
    zfr = zhr,                                  // forearm right R
    xfr = crossNorm(yfr, zfr),                  // forearm front R

    // R palm frame - origin = scr, child = par
    xrr = xfr,                                  // trapezium front R
    yrr = yfr,                                  // trapezium up R
    zrr = zfr,                                  // trapezium right R

    // R thumb frame - origin = par, child = thr
    xxr = xfr,                                  // thumb front R
    yxr = yfr,                                  // thumb up R
    zxr = zfr,                                  // thumb right R

    // R fingertip frame - origin = par, child = thr
    xpr = xfr,                                  // distal phalanges front R
    zpr = zfr,                                  // distal phalanges right R
    ypr = yfr,                                  // distal phalanges up R

    /// LEGS AND FEET ///

    // L ilium/hip - origin = ove, child = fhl
    fhl = p(J::JointType_HipLeft),              // hip femur head L
    zil = -fhl.normalized(),                    // ilium/hip right L
    xil = crossNorm(yp, zil),                   // ilium/hip front L
    yil = crossNorm(zil, xil),                  // ilium/hip up L

    // L thigh - origin = fhl, child = otl
    otl = p(J::JointType_KneeLeft),             // knee L
    ytl = (fhl - otl).normalized(),             // thigh up L
    xtl = crossNorm(ytl, right),                // thigh front L
    ztl = crossNorm(xtl, ytl),                  // thigh right L

    // L shank - origin = otl, child osl
    osl = p(J::JointType_AnkleLeft),            // ankle L
    ysl = (otl - osl).normalized(),             // shank up L
    xsl = crossNorm(ysl, right),                // shank front L
    zsl = crossNorm(xsl, ysl),                  // shank right L

    // L foot
    yml = -xsl,                                 // foot toe up L
    zml = zsl,                                  // foot toe right L
    xml = crossNorm(yml, zml),                  // foot toe front L

    // R ilium/hip - origin = ove, child = fhr
    fhr = p(J::JointType_HipRight),             // hip femur head R
    zir = fhr.normalized(),                     // ilium/hip right R
    xir = crossNorm(yp, zir),                   // ilium/hip front R
    yir = crossNorm(zir, xir),                  // ilium/hip up R

    // R thigh - origin = fhr, child = otr
    otr = p(J::JointType_KneeRight),            // knee R
    ytr = (fhr - otr).normalized(),             // thigh up R
    xtr = crossNorm(ytr, right),                // thigh front R
    ztr = crossNorm(xtr, ytr),                  // thigh right R
 
    // R shank - origin = otr, child osr
    osr = p(J::JointType_AnkleRight),           // ankle R
    ysr = (otr - osr).normalized(),             // shank up R
    xsr = crossNorm(ysr, right),                // shank front R
    zsr = crossNorm(xsr, ysr),                  // shank right R

    // R foot
    ymr = -xsr,                                 // foot toe up R
    zmr = zsr,                                  // foot toe right R
    xmr = crossNorm(ymr, zmr);                  // foot toe front R

  qs[J::JointType_SpineBase]      = Rquat(xb,  yb,  zb),   // base
  qs[J::JointType_SpineMid]       = Rquat(xp,  yp,  zp),   // pelvis
  qs[J::JointType_SpineShoulder]  = Rquat(xto, yto, zto),  // torso
  qs[J::JointType_Neck]           = Rquat(xsh, ysh, zsh),  // neck
  qs[J::JointType_Head]           = Rquat(xne, yne, zne),  // head
  qs[J::JointType_ShoulderLeft]   = Rquat(xcl, ycl, zcl),  // clavicle L
  qs[J::JointType_ShoulderRight]  = Rquat(xcr, ycr, zcr),  // clavicle R
  qs[J::JointType_ElbowLeft]      = Rquat(xhl, yhl, zhl),  // humerus L
  qs[J::JointType_WristLeft]      = Rquat(xfl, yfl, zfl),  // forearm L
  qs[J::JointType_HandLeft]       = Rquat(xrl, yrl, zrl),  // trapezium L
  qs[J::JointType_ThumbLeft]      = Rquat(xxl, yxl, zxl),  // thumb L
  qs[J::JointType_HandTipLeft]    = Rquat(xpl, ypl, zpl),  // hand tips L
  qs[J::JointType_ElbowRight]     = Rquat(xhr, yhr, zhr),  // humerus R
  qs[J::JointType_WristRight]     = Rquat(xfr, yfr, zfr),  // forearm R
  qs[J::JointType_HandRight]      = Rquat(xrr, yrr, zrr),  // trapezium R
  qs[J::JointType_ThumbRight]     = Rquat(xxr, yxr, zxr),  // thumb R
  qs[J::JointType_HandTipRight]   = Rquat(xpr, ypr, zpr),  // hand tips R
  qs[J::JointType_HipLeft]        = Rquat(xil, yil, zil),  // hip L
  qs[J::JointType_KneeLeft]       = Rquat(xtl, ytl, ztl),  // thigh L
  qs[J::JointType_AnkleLeft]      = Rquat(xsl, ysl, zsl),  // shank L
  qs[J::JointType_FootLeft]       = Rquat(xml, yml, zml),  // foot L
  qs[J::JointType_HipRight]       = Rquat(xir, yir, zir),  // hip R
  qs[J::JointType_KneeRight]      = Rquat(xtr, ytr, ztr),  // thigh R
  qs[J::JointType_AnkleRight]     = Rquat(xsr, ysr, zsr),  // shank R
  qs[J::JointType_FootRight]      = Rquat(xmr, ymr, zmr);  // foot R

  for (int i = 0; i < Skeleton::kNumJoints; ++i) {
    const J ji = static_cast<J>(i);
    const int bIdx = name2idx(kKinectBones2ASFBones.at(ji).c_str());
    const Bone& b = *getBone(getRoot(), bIdx);
    qs[i] = b.local2world.inverse() * qs[i];
  }

  return qs;
}
コード例 #18
0
ファイル: Cone.cpp プロジェクト: jakexie/trunk
void Cone::Parameters(const Vec3f &p, std::pair< float, float > *param) const
{
	// parametrize
	Vec3f s = p - m_center;
	float height = m_axisDir.dot(s);
	float planex = s.dot(m_hcs[0].Data());
	float planey = s.dot(m_hcs[1].Data());
	float l = planex * planex + planey * planey;
	if(l > 0)
	{
		planex /= l;
		planey /= l;
	}
	float angle = std::atan2(planey, planex);
	if(angle < 0)
		angle += float(2 * M_PI);
	/*Vec3f axisDiff = s - height * m_axisDir;
	axisDiff.normalize();
	float angle = m_angular.dot(axisDiff);
	if(angle < -1) // clamp angle to [-1, 1]
		angle = -1;
	else if(angle > 1)
		angle = 1;
	if(m_angular.cross(axisDiff).dot(m_axisDir) < 0)
		angle = std::acos(-angle) + M_PI;
	else
		angle = std::acos(angle);
	// angle ok*/
	// get length from height
	//float length = height / std::cos(m_angle);
	//param->first = length;
	// this should be more precise than a division with std::cos:
	// this is for two sided cone!
	// distance to axis
	float sqrS = s.sqrLength();
	float f = sqrS - (height * height);
	if(f <= 0)
		f = 0;
	else
		f = std::sqrt(f);
	float sdist = fabs(m_n2d[0] * f + ((height < 0)? -1 : 1) * m_n2d[1] * height);
	float length = std::sqrt(sqrS + sdist * sdist);
	param->first = /*(height < 0)? -length :*/ length;
	param->second = angle;
	/*// get normal for p
	Vec3f pln = s.cross(m_axisDir);
	Vec3f plx = m_axisDir.cross(pln);
	Vec3f n;
	if(plx.normalize() < 1.0e-6)
	{
		*param = std::make_pair(0.0f, angle);
		return height;
	}
	if(height < 0)
		n = m_normal[0] * plx - m_normalY;
	else
	n = m_normal[0] * plx + m_normalY;
	Vec3f l = n.cross(pln);
	l.normalize();
	// make sure l points in direction of axis
	if(m_axisDir.dot(l) < 0)
		l *= -1;
	// project p on line m_center + lambda * l
	// get lambda
	float lambda = s.dot(l);
	// make sure l points in direction of axis
	if(m_axisDir.dot(l) < 0)
	{
		if(lambda > 0)
		{
			*param = std::make_pair(s.length(), angle);
			return height;
		}
	}
	else if(lambda < 0)
	{
		*param = std::make_pair(s.length(), angle);
		return height;
	}
	*param = std::make_pair(*fabs(lambda), angle);*/
}
コード例 #19
0
Fubi::RecognitionResult::Result LinearMovementRecognizer::recognizeOn(FubiUser* user)
{
	Fubi::RecognitionResult::Result result = Fubi::RecognitionResult::NOT_RECOGNIZED;
	
	if (user != 0x0)
	{
		// Get joint positions
		SkeletonJointPosition* joint = &(user->m_currentTrackingData.jointPositions[m_joint]);
		SkeletonJointPosition* lastJoint = &(user->m_lastTrackingData.jointPositions[m_joint]);
		if (m_useLocalPos)
		{
			joint = &(user->m_currentTrackingData.localJointPositions[m_joint]);
			lastJoint = &(user->m_lastTrackingData.localJointPositions[m_joint]);
		}

		// Check confidence
		if (joint->m_confidence >= m_minConfidence && lastJoint->m_confidence >= m_minConfidence)
		{
			bool relJointsValid = false;

			// Calculate relative vector of current and last frame
			Vec3f vector(Fubi::Math::NO_INIT);
			Vec3f lastVector(Fubi::Math::NO_INIT);
			if (m_useRelJoint)
			{
				// Using the other joint
				SkeletonJointPosition* relJoint = &(user->m_currentTrackingData.jointPositions[m_relJoint]);
				SkeletonJointPosition* lastRelJoint = &(user->m_lastTrackingData.jointPositions[m_relJoint]);
				if (m_useLocalPos)
				{
					relJoint = &(user->m_currentTrackingData.localJointPositions[m_relJoint]);
					lastRelJoint = &(user->m_lastTrackingData.localJointPositions[m_relJoint]);
				}
				relJointsValid = relJoint->m_confidence >= m_minConfidence && lastRelJoint->m_confidence >= m_minConfidence;
				if(relJointsValid)
				{
					vector = joint->m_position - relJoint->m_position;
					lastVector = lastJoint->m_position -lastRelJoint->m_position;
				}
			}
			else
			{
				// Absolute values (relative to Kinect position)
				relJointsValid = true;
				vector = joint->m_position;
				lastVector =lastJoint->m_position;
			}
	
		
			if (relJointsValid)
			{
				// Get the difference between both vectors and the time
				Vec3f diffVector = vector - lastVector;
				float diffTime = clamp(float(user->m_currentTrackingData.timeStamp - user->m_lastTrackingData.timeStamp), Math::Epsilon, Math::MaxFloat);
	
				float vel = 0;
				float angleDiff = 0;
				if (m_directionValid)
				{
					if (m_useOnlyCorrectDirectionComponent)
					{
						// Weight the vector components according to the given direction
						// Apply the direction stretched to the same length on the vector
						// Components in the correct direction will result in a positive value
						// Components in the wrong direction have a negative value
						Vec3f dirVector = diffVector * (m_direction * diffVector.length());
			
						// Build the sum of the weighted and signed components
						float sum = dirVector.x + dirVector.y + dirVector.z;

						// Calcluate the velocity (if there are too many negative values it may be less then zero)
						vel = (sum <= 0) ? (-sqrt(-sum) / diffTime) : (sqrt(sum) / diffTime);
					}
					else
						// calculate the velocity directly from the current vector
						vel = diffVector.length() / diffTime;

					// Additionally check the angle difference
					angleDiff = radToDeg(acosf(diffVector.dot(m_direction) / (diffVector.length() * m_direction.length())));
				}
				else
				{
					// No direction given so check for movement speed in any direction
					vel = diffVector.length() / diffTime;
				}

				// Check if velocity is in between the boundaries
				if (vel >= m_minVel && vel <= m_maxVel && angleDiff <= m_maxAngleDiff)
					result = RecognitionResult::RECOGNIZED;

				//if (/*!recognized && */abs(vel) > 200)
				//{
				//	if (m_maxVel > 10000.0f)
				//	{
				//		Fubi_logInfo("Lin Gesture rec: vel=%4.0f <= %4.0f <= INF recognized=%s\n", 
				//		  m_minVel, vel, (result == RecognitionResult::RECOGNIZED) ? "true" : "false");
				//	}
				//	else
				//		Fubi_logInfo("Lin Gesture rec: vel=%4.0f <= %4.0f <= %4.0f recognized=%s\n", 
				//		m_minVel, vel, m_maxVel, (result == RecognitionResult::RECOGNIZED) ? "true" : "false");
				//	/*diffVector.normalize();

				//	Fubi_logInfo("Lin Gesture rec: Hand.z=%.3f, targetDir=%.3f/%.3f/%.3f \n\t\tactualDir=%.3f/%.3f/%.3f vel=%.0f/%.0f recognized=%s\n", 
				//		joint.m_position.z,
				//		m_direction.x, m_direction.y, m_direction.z, 
				//		diffVector.x, diffVector.y, diffVector.z,
				//		vel, m_minVel, recognized ? "true" : "false");*/
				//}
			}
			else
				result = Fubi::RecognitionResult::TRACKING_ERROR;
		}
		else
			result = Fubi::RecognitionResult::TRACKING_ERROR;
	}

	return result;
}
コード例 #20
0
void  ProjectionCamera::makeLookAtViewMatrix(Vec3f camPos ,Vec3f  center,Vec3f up,Matrix44f &_lookatMatrix)
{
  Vec3f zaxis = camPos-center;
    zaxis.normalize();
    Vec3f xaxis = up.cross( zaxis);
    xaxis.normalize();
    Vec3f  yaxis  =zaxis.cross(    xaxis);
          
	_lookatMatrix.set(xaxis.x,yaxis.x,zaxis.x,0,xaxis.y,yaxis.y,zaxis.y,0,xaxis.z,yaxis.z,zaxis.z,0,-xaxis.dot( camPos),-yaxis.dot( camPos),-zaxis.dot( camPos),1);
	
}
コード例 #21
0
// 該当位置の色を求める
Pixel rayTrace(const Vec3f ray_start, const Vec3f ray_vec,
               const int recursive_depth,
               const int recursive_depth_max,
               const bool back_face,
               const Model& model,
               const Bvh::BvhNode& bvh_node,
               const Hdri& bg,
               Qmc& random) {

  // BVHによるRayとMeshの交差判定
  Bvh::TestInfo test_info;
  bool has_hit = Bvh::intersect(test_info, ray_start, ray_vec, bvh_node, back_face);

  // 接触なし
  if (!has_hit) {
    // 環境マップのピクセルを使う
    Real thera = std::acos(ray_vec.y());
    Real l = std::sqrt(ray_vec.x() * ray_vec.x() + ray_vec.z() * ray_vec.z());
    Real xz = (l > 0.0) ? ray_vec.x() / l : 0.0;
    Real phi = std::acos(xz);
    if (ray_vec.z() < 0.0) {
      phi = 2.0 * M_PI - phi;
    }

    Real u = phi / (2.0 * M_PI) + 0.25;
    Real v = thera / M_PI;
    
    return bg.pixel(u, v);
  }

  const auto& material = *test_info.material;

  // 再帰上限を超えた
  // FIXME:emissiveはツールで0.0~1.0の範囲でしか設定できないので、ここで大きな値にする
  if (recursive_depth > recursive_depth_max) {
    return material.emissive() * 100;
  }

  // 鏡面反射を再帰で求める
  Pixel reflection_pixel(Pixel::Zero());
  if (!material.reflective().isZero()) {
    Vec3f reflection_vec = reflectVec(ray_vec, test_info.hit_normal);
    // TIPS:ベクトルが同じ場所に衝突しないように少し進めておく
    Vec3f reflection_start = (test_info.hit_pos + reflection_vec * 0.001);

    reflection_pixel = rayTrace(reflection_start, reflection_vec,
                                recursive_depth + 1,
                                recursive_depth_max,
                                false,
                                model,
                                bvh_node,
                                bg,
                                random);
  }

  // 屈折を再帰で求める
  Pixel refraction_pixel(Pixel::Zero());
  if (!material.transparent().isZero()) {
    Real  refractive_index     = material.ior();
    Vec3f hit_normal           = test_info.hit_normal;
    bool  refraction_back_face = true;
    Real F0;

    // レイと法線との内積 >= 0 →透過物体から出る
    if (ray_vec.dot(hit_normal) >= 0.0) {
      // 反射量
      F0 = std::pow(refractive_index - 1.0, 2.0) / std::pow(refractive_index + 1.0, 2.0);

      hit_normal = -hit_normal;
    }
    else {
      // 反射量
      F0 = std::pow(1.0 - refractive_index, 2.0) / std::pow(1.0 + refractive_index, 2.0);

      // Cheetah3Dの屈折率は素材の値なので、入射の場合、真空(1.0)との比にする
      refractive_index = 1 / refractive_index;
    }

    // 全反射??
    Real ddn = ray_vec.dot(hit_normal);
    Real cos2t = 1.0 - refractive_index * refractive_index * (1.0 - ddn * ddn);
    if (cos2t < 0.0) {
      Vec3f reflection_vec = reflectVec(ray_vec, test_info.hit_normal);

      // TIPS:ベクトルが同じ場所に衝突しないように少し進めておく
      Vec3f reflection_start = (test_info.hit_pos + reflection_vec * 0.001);
      
      refraction_pixel = rayTrace(reflection_start, reflection_vec,
                                  recursive_depth + 1,
                                  recursive_depth_max,
                                  false,
                                  model,
                                  bvh_node,
                                  bg,
                                  random);
    }
    else {
      Vec3f refraction_vec = refractVec(ray_vec, hit_normal, refractive_index);
      // TIPS:屈折ベクトルが同じ場所に衝突しないようにちょこっとだけ進めておく
      Vec3f refraction_start = (test_info.hit_pos + refraction_vec * 0.001);

      // 屈折後の光の量
      Real Re = F0 + (1.0 - F0) * std::pow(1.0 + ddn, 5.0);
      Real Tr = 1.0 - Re;
      
      refraction_pixel = rayTrace(refraction_start, refraction_vec,
                                  recursive_depth + 1,
                                  recursive_depth_max,
                                  refraction_back_face,
                                  model,
                                  bvh_node,
                                  bg,
                                  random) * Tr;
    }
  }

  // 拡散反射
  Pixel light_diffuse = Pixel::Zero();
  if (!material.diffuse().isZero()) {
    // TIPS:ベクトルが同じ場所に衝突しないように少し浮かせる
    Vec3f passtarce_start(test_info.hit_pos + test_info.hit_normal * 0.001);
    Vec3f passtarce_vec = radiationVector_qmc(test_info.hit_normal, random);

    light_diffuse = rayTrace(passtarce_start, passtarce_vec,
                             recursive_depth + 1,
                             recursive_depth_max,
                             false,
                             model,
                             bvh_node,
                             bg,
                             random);
  }
  
  Real reflect_value = 1.0 - material.reflective().maxCoeff();
  Real refract_value = 1.0 - material.transparent().maxCoeff();

  Pixel diffuse_color = material.diffuse();
#if 0
  if (material.hasTexture()) {
    Real u = test_info.hit_uv.x();
    Real v = test_info.hit_uv.y();
    
    diffuse_color = material.texture().pixel(u, v);
  }
#endif

  return diffuse_color * light_diffuse * reflect_value * refract_value
       + material.reflective() * reflection_pixel
       + material.transparent() * refraction_pixel
       + material.emissive() * 100;
}
コード例 #22
0
ファイル: vec3f.cpp プロジェクト: NaraVan/OpenGL_Game
float Vec3f::getAngle(Vec3f v1, Vec3f v2) const //!< Returns angle between two vectors. Added by N. Van Rossum
{
	return acos( v1.dot(v2) / (v1.magnitude() * v2.magnitude()));
}
コード例 #23
0
void collisionRecurse(MeshCollisionTraversalNodeOBB* node, int b1, int b2, const Matrix3f& R, const Vec3f& T, BVHFrontList* front_list)
{
  bool l1 = node->isFirstNodeLeaf(b1);
  bool l2 = node->isSecondNodeLeaf(b2);

  if(l1 && l2)
  {
    updateFrontList(front_list, b1, b2);

    if(node->BVTesting(b1, b2, R, T)) return;

    node->leafTesting(b1, b2, R, T);
    return;
  }

  if(node->BVTesting(b1, b2, R, T))
  {
    updateFrontList(front_list, b1, b2);
    return;
  }

  Vec3f temp;

  if(node->firstOverSecond(b1, b2))
  {
    int c1 = node->getFirstLeftChild(b1);
    int c2 = node->getFirstRightChild(b1);

    const OBB& bv1 = node->model1->getBV(c1).bv;

    Matrix3f Rc(R.transposeTimes(bv1.axis[0]), R.transposeTimes(bv1.axis[1]), R.transposeTimes(bv1.axis[2]));
    temp = T - bv1.To;
    Vec3f Tc(temp.dot(bv1.axis[0]), temp.dot(bv1.axis[1]), temp.dot(bv1.axis[2]));

    collisionRecurse(node, c1, b2, Rc, Tc, front_list);

    // early stop is disabled is front_list is used
    if(node->canStop() && !front_list) return;

    const OBB& bv2 = node->model1->getBV(c2).bv;

    Rc = Matrix3f(R.transposeTimes(bv2.axis[0]), R.transposeTimes(bv2.axis[1]), R.transposeTimes(bv2.axis[2]));
    temp = T - bv2.To;
    Tc.setValue(temp.dot(bv2.axis[0]), temp.dot(bv2.axis[1]), temp.dot(bv2.axis[2]));

    collisionRecurse(node, c2, b2, Rc, Tc, front_list);
  }
  else
  {
    int c1 = node->getSecondLeftChild(b2);
    int c2 = node->getSecondRightChild(b2);

    const OBB& bv1 = node->model2->getBV(c1).bv;
    Matrix3f Rc;
    temp = R * bv1.axis[0];
    Rc(0, 0) = temp[0]; Rc(1, 0) = temp[1]; Rc(2, 0) = temp[2];
    temp = R * bv1.axis[1];
    Rc(0, 1) = temp[0]; Rc(1, 1) = temp[1]; Rc(2, 1) = temp[2];
    temp = R * bv1.axis[2];
    Rc(0, 2) = temp[0]; Rc(1, 2) = temp[1]; Rc(2, 2) = temp[2];
    Vec3f Tc = R * bv1.To + T;

    collisionRecurse(node, b1, c1, Rc, Tc, front_list);

    // early stop is disabled is front_list is used
    if(node->canStop() && !front_list) return;

    const OBB& bv2 = node->model2->getBV(c2).bv;
    temp = R * bv2.axis[0];
    Rc(0, 0) = temp[0]; Rc(1, 0) = temp[1]; Rc(2, 0) = temp[2];
    temp = R * bv2.axis[1];
    Rc(0, 1) = temp[0]; Rc(1, 1) = temp[1]; Rc(2, 1) = temp[2];
    temp = R * bv2.axis[2];
    Rc(0, 2) = temp[0]; Rc(1, 2) = temp[1]; Rc(2, 2) = temp[2];
    Tc = R * bv2.To + T;

    collisionRecurse(node, b1, c2, Rc, Tc, front_list);
  }
}
コード例 #24
0
ファイル: math_func.cpp プロジェクト: hksonngan/ds_cinder
bool intersect2D( const ci::Vec3f &start0, const ci::Vec3f &end0, const ci::Vec3f &start1, const ci::Vec3f &end1 )
{
  Vec3f u = end0 - start0;
  Vec3f v = end1 - start1;

  Vec3f w = start0 - start1;

  float bottom = ((u.dot(u))*(v.dot(v))-(u.dot(v))*(u.dot(v)));

  float t0 = 0.0f;
  float t1 = 0.0f;

  float top1 = ((u.dot(v))*(v.dot(w))-(v.dot(v))*(u.dot(w)));
  float top2 = ((u.dot(u))*(v.dot(w))-(u.dot(v))*(u.dot(w)));

  if (!isEqual(bottom, 0.0f)) {
    t0 = top1/bottom;

    t1 = top2/bottom;
  } else {
    t0 = 0.0f;

    t1 = (v.dot(w))/(v.dot(v));
  }

  t0 = clamp(t0, 0.0f, 1.0f);

  t1 = clamp(t1, 0.0f, 1.0f);

  Vec3f p1 = start0 + u * t0;

  Vec3f q1 = start1 + v * t1;

  if (isEqual(top1, 0.0f) && isEqual(top2, 0.0f)) {
    return false;
  }

  if (u.normalized() == v.normalized()) {
    return false;
  }

  if (p1 == q1) {
    return true;
  }

  return false;
}
コード例 #25
0
ファイル: Line.cpp プロジェクト: npapier/vgsdk
bool Line::intersect( const Vec3f& v0, const Vec3f& v1, const Vec3f& v2,
		Vec3f& intersection,
		Vec3f& barycentric, bool& front ) const
{
	//////////////////////////////////////////////////////////////////
	//
	// The method used here is described by Didier Badouel in Graphics
	// Gems (I), pages 390 - 393.
	//
	//////////////////////////////////////////////////////////////////

#define EPSILON 1e-10

	//
	// (1) Compute the plane containing the triangle
	//
	Vec3f	v01 = v1 - v0;
	Vec3f	v12 = v2 - v1;
	Vec3f	norm = v12.cross(v01);	// Un-normalized normal
	// Normalize normal to unit length, and make sure the length is
	// not 0 (indicating a zero-area triangle)
	if (norm.normalize() < EPSILON)		return false;

	//
	// (2) Compute the distance t to the plane along the line
	//
	float d = getDirection().dot(norm);
	if (d < EPSILON && d > -EPSILON)
		return false;			// Line is parallel to plane
	float t = norm.dot(v0 - getPosition()) / d;

	// Note: we DO NOT ignore intersections behind the eye (t < 0.0)

	//
	// (3) Find the largest component of the plane normal. The other
	//     two dimensions are the axes of the aligned plane we will
	//     use to project the triangle.
	//
	float	xAbs = norm[0] < 0.0 ? -norm[0] : norm[0];
	float	yAbs = norm[1] < 0.0 ? -norm[1] : norm[1];
	float	zAbs = norm[2] < 0.0 ? -norm[2] : norm[2];
	int		axis0, axis1;

	if (xAbs > yAbs && xAbs > zAbs) 
	{
		axis0 = 1;
		axis1 = 2;
	}
	else if (yAbs > zAbs) 
	{
		axis0 = 2;
		axis1 = 0;
	}
	else 
	{
		axis0 = 0;
		axis1 = 1;
	}

	//
	// (4) Determine if the projected intersection (of the line and
	//     the triangle's plane) lies within the projected triangle.
	//     Since we deal with only 2 components, we can avoid the
	//     third computation.
	//
	float intersection0 = getPosition()[axis0] + t * getDirection()[axis0];
	float intersection1 = getPosition()[axis1] + t * getDirection()[axis1];

	Vec2f	diff0, diff1, diff2;
	bool	isInter = false;
	float	alpha = 0.f, beta;

	diff0[0] = intersection0 - v0[axis0];
	diff0[1] = intersection1 - v0[axis1];
	diff1[0] = v1[axis0]     - v0[axis0];
	diff1[1] = v1[axis1]     - v0[axis1];
	diff2[0] = v2[axis0]     - v0[axis0];
	diff2[1] = v2[axis1]     - v0[axis1];

	// Note: This code was rearranged somewhat from the code in
	// Graphics Gems to provide a little more numeric
	// stability. However, it can still miss some valid intersections
	// on very tiny triangles.
	isInter = false;
	beta = ((diff0[1] * diff1[0] - diff0[0] * diff1[1]) /
		(diff2[1] * diff1[0] - diff2[0] * diff1[1]));
	
	if (beta >= 0.0 && beta <= 1.0) 
	{
		alpha = -1.0;
		if (diff1[1] < -EPSILON || diff1[1] > EPSILON) 
		{
			alpha = (diff0[1] - beta * diff2[1]) / diff1[1];
		}
		else
		{
			alpha = (diff0[0] - beta * diff2[0]) / diff1[0];
		}
		isInter = (alpha >= 0.0 && alpha + beta <= 1.0);
	}

	//
	// (5) If there is an intersection, set up the barycentric
	//     coordinates and figure out if the front was hit.
	//
	if (isInter) 
	{
		barycentric.setValue(1.0f - (alpha + beta), alpha, beta);
		front = (getDirection().dot(norm) < 0.0);
		intersection = getPosition() + getDirection() * t;
	}

	return isInter;

#undef EPSILON
}
コード例 #26
0
ファイル: VRTransform.cpp プロジェクト: TobiasHue/polyvr
/** enable constraints on the object, 0 leaves the DOF free, 1 restricts it **/
void VRTransform::apply_constraints() {
    if (!doTConstraint and !doRConstraint) return;

    Matrix t = getWorldMatrix();//current position

    //rotation
    if (doRConstraint) {
        int qs = rConstraint[0]+rConstraint[1]+rConstraint[2];
        Matrix t0 = constraints_reference;

        if (qs == 3) for (int i=0;i<3;i++) t[i] = t0[i];

        if (qs == 2) {
            int u,v,w;
            if (rConstraint[0] == 0) { u = 0; v = 1; w = 2; }
            if (rConstraint[1] == 0) { u = 1; v = 0; w = 2; }
            if (rConstraint[2] == 0) { u = 2; v = 0; w = 1; }

            for (int i=0;i<3;i++) t[i][u] = t0[i][u]; //copy old transformation

            //normiere so das die b komponennte konstant bleibt
            for (int i=0;i<3;i++) {
                float a = 1-t[i][u]*t[i][u];
                if (a < 1e-6) {
                    t[i][v] = t0[i][v];
                    t[i][w] = t0[i][w];
                } else {
                    a /= (t0[i][v]*t0[i][v] + t0[i][w]*t0[i][w]);
                    a = sqrt(a);
                    t[i][v] *= a;
                    t[i][w] *= a;
                }
            }
        }

        if (qs == 1) {
            /*int u,v,w;
            if (rConstraint[0] == 1) { u = 0; v = 1; w = 2; }
            if (rConstraint[1] == 1) { u = 1; v = 0; w = 2; }
            if (rConstraint[2] == 1) { u = 2; v = 0; w = 1; }*/

            // TODO
        }
    }

    //translation
    if (doTConstraint) {
        //cout << "\nA";
        if (tConPlane) {
            float d = Vec3f(t[3] - constraints_reference[3]).dot(tConstraint);
            for (int i=0; i<3; i++) t[3][i] -= d*tConstraint[i];
        }
        else {
            Vec3f d = Vec3f(t[3] - constraints_reference[3]);
            d = d.dot(tConstraint)*tConstraint;
            for (int i=0; i<3; i++) t[3][i] = constraints_reference[3][i] + d[i];
        }
    }

    setWorldMatrix(t);
}
コード例 #27
0
ファイル: intersect.cpp プロジェクト: dalibor-matura/mcl
FCL_REAL Intersect::distanceToPlane(const Vec3f& n, FCL_REAL t, const Vec3f& v)
{
  return n.dot(v) - t;
}
コード例 #28
0
ファイル: intr_tribox.cpp プロジェクト: sutuglon/Motor
OZCOLLIDE_API bool testIntersectionTriBox(const Vec3f *_triPts[3],
										 const Vec3f &_triNormal,
										 const Box &_box,
										 const Vec3f &_boxVel,
										 float &_distTravel,
										 Vec3f &_reaction)
{
	int i, j;
	float d0, d1;
	float t;

	Plane tri_plane;
	tri_plane.fromPointAndNormal(*_triPts[0], _triNormal);

	Vec3f n_boxvel = _boxVel;
	n_boxvel.normalize();

	if (_triNormal.dot(n_boxvel) > 0)
		return false;

	float minDist = FLT_MAX;
	int col = -1;

	// box vertices VS triangle
	for (i = 0; i < 8; i++) {
		Vec3f box_vertex = _box.getPoint(i);

		Vec3f box_vertex_normal = _box.getVertexNormal(i);
		if (box_vertex_normal.dot(n_boxvel) < -OZ_COS45)
			continue;

		float t;
		if (!tri_plane.intersectWithLine(box_vertex, box_vertex + n_boxvel, t) )
			continue;

		if (t >= minDist)
			continue;
		if (t < 0)
			continue;

		Vec3f inter;
		inter = box_vertex + n_boxvel * t;

		if (!isPointInsideTriangle(*_triPts[0], *_triPts[1], *_triPts[2], inter))
			continue;

		minDist = t;
		_reaction = _triNormal;
		col = 0;
	}

	// triangle vertices VS box faces
	for (i = 0; i < 3; i++) {
		float tnear, tfar;
		PLANE plane = intersectRayBox(*_triPts[i], -n_boxvel, _box, tnear, tfar);
		if (plane == -1)
			continue;

		if (tnear < 0)
			continue;

		if (tnear < minDist) {
			minDist = tnear;
			_reaction = -_box.getFaceNormal(plane);
			col = 1;
		}
	}

	// box edges VS triangle edges
	for (i = 0; i < 12; i++) {
		BoxEdge boxEdge = _box.getEdge(i);
		Plane boxEdgePlane;
		boxEdgePlane.fromPoints(boxEdge.p1, boxEdge.p0, boxEdge.p0 + _boxVel);

		Vec3f boxEdgeNormal = Vec3f(boxEdgePlane.a, boxEdgePlane.b, boxEdgePlane.c);

		Vec3f v0 = boxEdge.p1 - boxEdge.p0;

		for (j = 0; j < 3; j++) {
			const Vec3f *tpt0;
			const Vec3f *tpt1;
			tpt0 = _triPts[j];
			if (j < 2) tpt1 = _triPts[j + 1];
			else tpt1 = _triPts[0];

			d0 = boxEdgePlane.dist(*tpt0);
			d1 = boxEdgePlane.dist(*tpt1);
			if (d0 * d1 > 0)
				continue;

			if (!boxEdgePlane.intersectWithLine(*tpt0, *tpt1, t))
				continue;

			Vec3f v1 = *tpt1 - *tpt0;
			Vec3f ipt = *tpt0 + v1 * t;

			int a0 = 0, a1 = 1;
			float pl_x = fabsf(boxEdgePlane.a);
			float pl_y = fabsf(boxEdgePlane.b);
			float pl_z = fabsf(boxEdgePlane.c);
			if (pl_x > pl_y && pl_x > pl_z) {
				a0 = 1;
				a1 = 2;
			}
			else {
				if (pl_y > pl_z) {
					a0 = 0;
					a1 = 2;
				}
			}

			float dist = v0[a0] * (ipt[a1] - boxEdge.p0[a1]) - v0[a1] * (ipt[a0] - boxEdge.p0[a0]);
			float denom = v0[a1] * -n_boxvel[a0] - v0[a0] * -n_boxvel[a1];
			if (!denom)
				continue;
			dist /= denom;

			if (dist < 0)
				continue;

			ipt -= dist * n_boxvel;

			Vec3f r1 = boxEdge.p0 - ipt;
			Vec3f r2 = boxEdge.p1 - ipt;
			if (r1.dot(r2) > 0)
				continue;

			if (dist < minDist) {
				minDist = dist;
				_reaction = v0 | v1;
				_reaction.normalize();
				if (_reaction.dot(n_boxvel) > 0)
					_reaction = -_reaction;
				col = 2;
			}
		}
	}

	if (col == -1)
		return false;

	_distTravel = minDist;

	return true;
}
コード例 #29
0
ファイル: intersect.cpp プロジェクト: dalibor-matura/mcl
FCL_REAL TriangleDistance::triDistance(const Vec3f S[3], const Vec3f T[3], Vec3f& P, Vec3f& Q)
{
  // Compute vectors along the 6 sides

  Vec3f Sv[3];
  Vec3f Tv[3];
  Vec3f VEC;

  Sv[0] = S[1] - S[0];
  Sv[1] = S[2] - S[1];
  Sv[2] = S[0] - S[2];

  Tv[0] = T[1] - T[0];
  Tv[1] = T[2] - T[1];
  Tv[2] = T[0] - T[2];

  // For each edge pair, the vector connecting the closest points
  // of the edges defines a slab (parallel planes at head and tail
  // enclose the slab). If we can show that the off-edge vertex of
  // each triangle is outside of the slab, then the closest points
  // of the edges are the closest points for the triangles.
  // Even if these tests fail, it may be helpful to know the closest
  // points found, and whether the triangles were shown disjoint

  Vec3f V, Z, minP, minQ;
  FCL_REAL mindd;
  int shown_disjoint = 0;

  mindd = (S[0] - T[0]).sqrLength() + 1; // Set first minimum safely high

  for(int i = 0; i < 3; ++i)
  {
    for(int j = 0; j < 3; ++j)
    {
      // Find closest points on edges i & j, plus the
      // vector (and distance squared) between these points
      segPoints(S[i], Sv[i], T[j], Tv[j], VEC, P, Q);

      V = Q - P;
      FCL_REAL dd = V.dot(V);

      // Verify this closest point pair only if the distance
      // squared is less than the minimum found thus far.

      if(dd <= mindd)
      {
        minP = P;
        minQ = Q;
        mindd = dd;

        Z = S[(i+2)%3] - P;
        FCL_REAL a = Z.dot(VEC);
        Z = T[(j+2)%3] - Q;
        FCL_REAL b = Z.dot(VEC);

        if((a <= 0) && (b >= 0)) return sqrt(dd);

        FCL_REAL p = V.dot(VEC);

        if(a < 0) a = 0;
        if(b > 0) b = 0;
        if((p - a + b) > 0) shown_disjoint = 1;
      }
    }
  }

  // No edge pairs contained the closest points.
  // either:
  // 1. one of the closest points is a vertex, and the
  //    other point is interior to a face.
  // 2. the triangles are overlapping.
  // 3. an edge of one triangle is parallel to the other's face. If
  //    cases 1 and 2 are not true, then the closest points from the 9
  //    edge pairs checks above can be taken as closest points for the
  //    triangles.
  // 4. possibly, the triangles were degenerate.  When the
  //    triangle points are nearly colinear or coincident, one
  //    of above tests might fail even though the edges tested
  //    contain the closest points.

  // First check for case 1

  Vec3f Sn;
  FCL_REAL Snl;

  Sn = Sv[0].cross(Sv[1]); // Compute normal to S triangle
  Snl = Sn.dot(Sn);        // Compute square of length of normal

  // If cross product is long enough,

  if(Snl > 1e-15)
  {
    // Get projection lengths of T points

    Vec3f Tp;

    V = S[0] - T[0];
    Tp[0] = V.dot(Sn);

    V = S[0] - T[1];
    Tp[1] = V.dot(Sn);

    V = S[0] - T[2];
    Tp[2] = V.dot(Sn);

    // If Sn is a separating direction,
    // find point with smallest projection

    int point = -1;
    if((Tp[0] > 0) && (Tp[1] > 0) && (Tp[2] > 0))
    {
      if(Tp[0] < Tp[1]) point = 0; else point = 1;
      if(Tp[2] < Tp[point]) point = 2;
    }
    else if((Tp[0] < 0) && (Tp[1] < 0) && (Tp[2] < 0))
    {
      if(Tp[0] > Tp[1]) point = 0; else point = 1;
      if(Tp[2] > Tp[point]) point = 2;
    }

    // If Sn is a separating direction,

    if(point >= 0)
    {
      shown_disjoint = 1;

      // Test whether the point found, when projected onto the
      // other triangle, lies within the face.

      V = T[point] - S[0];
      Z = Sn.cross(Sv[0]);
      if(V.dot(Z) > 0)
      {
        V = T[point] - S[1];
        Z = Sn.cross(Sv[1]);
        if(V.dot(Z) > 0)
        {
          V = T[point] - S[2];
          Z = Sn.cross(Sv[2]);
          if(V.dot(Z) > 0)
          {
            // T[point] passed the test - it's a closest point for
            // the T triangle; the other point is on the face of S
            P = T[point] + Sn * (Tp[point] / Snl);
            Q = T[point];
            return (P - Q).length();
          }
        }
      }
    }
  }

  Vec3f Tn;
  FCL_REAL Tnl;

  Tn = Tv[0].cross(Tv[1]);
  Tnl = Tn.dot(Tn);

  if(Tnl > 1e-15)
  {
    Vec3f Sp;

    V = T[0] - S[0];
    Sp[0] = V.dot(Tn);

    V = T[0] - S[1];
    Sp[1] = V.dot(Tn);

    V = T[0] - S[2];
    Sp[2] = V.dot(Tn);

    int point = -1;
    if((Sp[0] > 0) && (Sp[1] > 0) && (Sp[2] > 0))
    {
      if(Sp[0] < Sp[1]) point = 0; else point = 1;
      if(Sp[2] < Sp[point]) point = 2;
    }
    else if((Sp[0] < 0) && (Sp[1] < 0) && (Sp[2] < 0))
    {
      if(Sp[0] > Sp[1]) point = 0; else point = 1;
      if(Sp[2] > Sp[point]) point = 2;
    }

    if(point >= 0)
    {
      shown_disjoint = 1;

      V = S[point] - T[0];
      Z = Tn.cross(Tv[0]);
      if(V.dot(Z) > 0)
      {
        V = S[point] - T[1];
        Z = Tn.cross(Tv[1]);
        if(V.dot(Z) > 0)
        {
          V = S[point] - T[2];
          Z = Tn.cross(Tv[2]);
          if(V.dot(Z) > 0)
          {
            P = S[point];
            Q = S[point] + Tn * (Sp[point] / Tnl);
            return (P - Q).length();
          }
        }
      }
    }
  }

  // Case 1 can't be shown.
  // If one of these tests showed the triangles disjoint,
  // we assume case 3 or 4, otherwise we conclude case 2,
  // that the triangles overlap.

  if(shown_disjoint)
  {
    P = minP;
    Q = minQ;
    return sqrt(mindd);
  }
  else return 0;
}
コード例 #30
0
void HullShaper::setVoxel(
    const Vec3f&    voxelPos,
    int             numVoxelTris,
    const S32*      voxelTris,
    const S8*       numBary,
    const Vec2f*    bary,
    const S32*      dispIsect,
    int             numAuxContours,
    S32*            auxContours)
{
    F32 baryEps   = 1.0e-6f;
    FW_ASSERT(m_mesh);

    m_numVoxelTris  = numVoxelTris;
    m_voxelTris     = voxelTris;
    m_dispIsect     = dispIsect;
    m_mid           = voxelPos + m_voxelSize * 0.5f;

    // Just the interior of a single triangle => easy case.

    m_easyCase = false;
    if (numVoxelTris == 1 && numBary[0])
    {
        const BuilderMesh::Triangle& tri = m_mesh->getTri(voxelTris[0]);
        bool edge = false;
        for (int i = numBary[0] - 1; i >= 0 && !edge; i--)
        {
            const Vec2f& b = bary[i];
            edge = (min(abs(b.x), abs(b.y), abs(b.x + b.y - 1.0f)) <= baryEps);
        }

        if (!edge && !tri.dispTri)
        {
            // Encode normal.

            m_contour = encodeContourNormal(tri.geomNormal);
            Vec3f normal = decodeContourNormal(m_contour);

            // Encode bounds.

            F32 d = normal.dot(tri.p - m_mid);
            F32 du = normal.dot(tri.pu);
            F32 dv = normal.dot(tri.pv);
            Vec2f b = Vec2f(48.0f, -48.0f) * m_voxelSize;
            for (int i = numBary[0] - 1; i >= 0; i--)
            {
                F32 v = d + du * bary[i].x + dv * bary[i].y;
                b.x = fastMin(b.x, v);
                b.y = fastMax(b.y, v);
            }
            b *= m_voxelSizeRcp;
            m_contour = encodeContourBounds(m_contour, b.x, b.y);
            m_numAuxContours = 0;

            // Need to refine?

            Vec2f posThick = decodeContourPosThick(m_contour);
            F32 error = fastMax(posThick.x - b.x, b.y - posThick.x) + posThick.y * 0.5f;
            m_easyCase = true;
            m_easyRefine = (sqr(error * m_voxelSize) > m_devSqr * normal.lenSqr());
            return;
        }
    }

    // Pick reference planes.

    m_planes.clear();
    const Vec2f* baryPtr = bary;
    const S32* dispIsectPtr = dispIsect;
    int step = max(numVoxelTris >> 4, 1);

    for (int i = 0; i < numVoxelTris; i++)
    {
        const BuilderMesh::Triangle& tri = m_mesh->getTri(voxelTris[i]);

        // Add planes for every nth triangle.

        if (i % step == 0)
        {
            // Displacement => ask DisplacedTriangle.

            if (tri.dispTri)
            {
                m_dispNormals.clear();
                tri.dispTri->getPlaneNormals(m_dispNormals, dispIsectPtr);
                for (int j = 0; j < m_dispNormals.getSize(); j++)
                    addPlane(1.0f, m_dispNormals[j]);
            }
            else
            {
                // Determine boundary mask.

                U8 mask = tri.boundaryMask;
                if (numBary[i])
                {
                    mask = 0;
                    for (int j = numBary[i] - 1; j >= 0; j--)
                    {
                        const Vec2f& b = baryPtr[j];
                        if (abs(b.y) <= baryEps)              mask |= 1;
                        if (abs(b.x + b.y - 1.0f) <= baryEps) mask |= 2;
                        if (abs(b.x) <= baryEps)              mask |= 4;
                    }
                    mask &= tri.boundaryMask;
                }

                // Add planes.

                addPlane(1.0f, tri.geomNormal);
                if ((mask & 1) != 0) addPlane(0.5f, tri.geomNormal.cross(tri.pu));
                if ((mask & 2) != 0) addPlane(0.5f, tri.geomNormal.cross(tri.pv - tri.pu));
                if ((mask & 4) != 0) addPlane(0.5f, tri.geomNormal.cross(tri.pv));
            }
        }

        // Advance to the next triangle.

        if (tri.dispTri)
            dispIsectPtr = DisplacedTriangle::getNextIsect(dispIsectPtr);
        else
            baryPtr += numBary[i];
    }

    // Calculate geometry bounds for the planes.

    baryPtr = bary;
    dispIsectPtr = dispIsect;

    for (int triIdx = 0; triIdx < numVoxelTris; triIdx++)
    {
        const BuilderMesh::Triangle& tri = m_mesh->getTri(voxelTris[triIdx]);

        // Displacement => ask DisplacedTriangle.

        if (tri.dispTri)
        {
            for (int i = m_planes.getSize() - 1; i >= 0; i--)
            {
                Plane& p = m_planes[i];
                tri.dispTri->expandPlaneBounds(p.bounds, dispIsectPtr, p.normal, m_mid);
            }
            dispIsectPtr = DisplacedTriangle::getNextIsect(dispIsectPtr);
            continue;
        }

        // No displacement => check barys.

        Vec3f pp = tri.p - m_mid;
        int num = numBary[triIdx];

        for (int i = m_planes.getSize() - 1; i >= 0; i--)
        {
            Plane& p = m_planes[i];
            F32 d = p.normal.dot(pp);
            F32 du = p.normal.dot(tri.pu);
            F32 dv = p.normal.dot(tri.pv);

            if (!num)
            {
                p.bounds.x = fastMin(p.bounds.x, d + fastMin(0.0f, fastMin(du, dv)));
                p.bounds.y = fastMax(p.bounds.y, d + fastMax(0.0f, fastMax(du, dv)));
            }
            else
            {
                for (int j = num - 1; j >= 0; j--)
                {
                    F32 v = d + du * baryPtr[j].x + dv * baryPtr[j].y;
                    p.bounds.x = fastMin(p.bounds.x, v);
                    p.bounds.y = fastMax(p.bounds.y, v);
                }
            }
        }
        baryPtr += num;
    }

    // Build polyhedron of parent contours.

    m_polyhedron.setCube(-0.5f, 0.5f);
    for (int i = 0; i < numAuxContours; i++)
        isectPolyWithContour(m_polyhedron, auxContours[i], i);

    // Find the best plane and the corresponding contour.

    F32 bestScore = -FW_F32_MAX;
    for (int i = 0; i < m_planes.getSize(); i++)
    {
        const Plane& p = m_planes[i];
        Vec2f geomBounds = p.bounds * m_voxelSizeRcp;
        geomBounds.x = fastMax(geomBounds.x, -48.0f);
        geomBounds.y = fastMin(geomBounds.y, 48.0f);

        // Calculate polyhedron bounds.

        Vec2f polyBounds(48.0f, -48.0f);
        for (int j = 0; j < m_polyhedron.getNumVertices(); j++)
        {
            F32 v = p.normal.dot(m_polyhedron.getVertex(j));
            polyBounds.x = fastMin(polyBounds.x, v);
            polyBounds.y = fastMax(polyBounds.y, v);
        }

        // Evaluate score.

        F32 score = fastMax(polyBounds.y - geomBounds.y, geomBounds.x - polyBounds.x) / p.length * p.weight;
        if (score < bestScore)
            continue;

        // Encode contour.

        bestScore = score;
        m_contour = encodeContourBounds(p.encoded, geomBounds.x, geomBounds.y);
    }

    // Intersect polyhedron.

    isectPolyWithContour(m_polyhedron, m_contour, numAuxContours);

    // Output auxiliary contours.

    U32 auxMask = 0;
    for (int i = 0; i < m_polyhedron.getNumFaces(); i++)
        if (m_polyhedron.getFacePlaneID(i) != -1)
            auxMask |= 1 << m_polyhedron.getFacePlaneID(i);

    auxContours[numAuxContours] = m_contour;
    m_numAuxContours = 0;
    for (int i = 0; i <= numAuxContours; i++)
        if (auxMask & (1 << i))
            auxContours[m_numAuxContours++] = auxContours[i];
}