예제 #1
0
파일: util.cpp 프로젝트: gphysics/displaz
size_t closestPointToRay(const V3f* points, size_t nPoints,
                         const V3f& rayOrigin, const V3f& rayDirection,
                         double longitudinalScale, double* distance)
{
    const V3f T = rayDirection.normalized();
    const double f = longitudinalScale*longitudinalScale - 1;
    size_t nearestIdx = -1;
    double nearestDist2 = DBL_MAX;
    for(size_t i = 0; i < nPoints; ++i)
    {
        const V3f v = points[i] - rayOrigin; // vector from ray origin to point
        const double a = v.dot(T); // distance along ray to point of closest approach to test point
        const double r2 = v.length2() + f*a*a;

        if(r2 < nearestDist2)
        {
            // new closest angle to axis
            nearestDist2 = r2;
            nearestIdx = i;
        }
    }
    if(distance)
    {
        if(nPoints == 0)
            *distance = DBL_MAX;
        else
            *distance = sqrt(nearestDist2);
    }
    return nearestIdx;
}
예제 #2
0
	bool intersectP(CRay& _ray) {
		if (_ray.m_id == m_id)
			return false; 
		V3f oc = _ray.m_pos - m_c; 
		float a = _ray.m_dir.dot(_ray.m_dir); // dir should be unit
		float b = _ray.m_dir.dot(oc);
		float c = oc.dot(oc) - m_r2; 
		float delta = b * b  - a * c; 
		if (delta < 0)   // no solution 
			return false; 
		else if (delta > -EPS && delta < EPS) {  // one solution
			float t = - b / a;
			if (t > _ray.m_t_max || t < _ray.m_t_min)  // out of range
				return false; 
		} else {   // two solutions 
			float deltasqrt = sqrt(delta);
			float t1 = (- b - deltasqrt) / a;
			float t2 = (- b + deltasqrt) / a;
			if (t1 >= _ray.m_t_max || t1 <= _ray.m_t_min)
				return false; 

			if (t2 >= _ray.m_t_max || t2 <= _ray.m_t_min)
				return false; 
		}

		return true; 
	}
예제 #3
0
void PhongBrdf::randVonMisesFisher3(V3f mu, float kappa, int n, V3f* directions) {


	V3f normal(0,0,1);
	V3f u = mu.cross(normal);
	float cost = dot(mu,normal);
	float sint = u.length();
	u = u.normalize();

	M33f rot(cost + u.x * u.x * (1 - cost),
			u.x * u.y * (1 - cost) - u.z * sint,
			u.x * u.z * (1 - cost) + u.y * sint,
			u.y * u.x * (1 - cost) + u.z * sint,
			cost + u.y * u.y * (1 - cost),
			u.y * u.z * (1 - cost) - u.x * sint,
			u.z * u.x * (1 - cost) - u.y * sint,
			u.z	* u.y * (1 - cost) + u.x * sint,
			cost + u.z * u.z * (1 - cost));

	float c = 2/kappa*(sinh(kappa)); // normalizing constant

	float y, w, v;
	for (int i=0; i < n; i++) {
		y = randomGenerator.RandomFloat();
		w = 1/kappa * log( exp(-kappa) + kappa * c * y );
		v = 2*M_PI*randomGenerator.RandomFloat();

		directions[i].x = sqrt(1-w*w)*cos(v);
		directions[i].y = sqrt(1-w*w)*sin(v);
		directions[i].z = w;

		directions[i] = directions[i]*rot;
	}

}
예제 #4
0
void PhongBrdf::getRandomDirections(const V3f incoming,
			const V3f normal, int n, V3f* directions) {

	V3f R = -incoming - 2 * (dot(-incoming, normal)) * normal;
	R = R.normalize();

	randVonMisesFisher3(R, phongExponent, n, directions);
}
//-*****************************************************************************
void MeshDrwHelper::updateNormals( V3fArraySamplePtr iN )
{
    if ( !m_valid || !m_meshP )
    {
        makeInvalid();
        return;
    }

//std::cout << "normals - " << m_name << std::endl;

    // Now see if we need to calculate normals.
    if ( ( m_meshN && iN == m_meshN ) )//||
//         ( !iN && m_customN.size() > 0 ) )
    {
        return;
    }

    size_t numPoints = m_meshP->size();
    m_meshN = iN;
    m_customN.clear();

    // Right now we only handle "vertex varying" normals,
    // which have the same cardinality as the points
    if ( !m_meshN || m_meshN->size() != numPoints )
    {
        // Make some custom normals.
        m_meshN.reset();
        m_customN.resize( numPoints );
        std::fill( m_customN.begin(), m_customN.end(), V3f( 0.0f ) );

        //std::cout << "Recalcing normals for object: "
        //          << m_host.name() << std::endl;

        for ( size_t tidx = 0; tidx < m_triangles.size(); ++tidx )
        {
            const Tri &tri = m_triangles[tidx];

            const V3f &A = (*m_meshP)[tri[0]];
            const V3f &B = (*m_meshP)[tri[1]];
            const V3f &C = (*m_meshP)[tri[2]];

            V3f AB = B - A;
            V3f AC = C - A;

            V3f wN = AB.cross( AC );
            m_customN[tri[0]] += wN;
            m_customN[tri[1]] += wN;
            m_customN[tri[2]] += wN;
        }

        // Normalize normals.
        for ( size_t nidx = 0; nidx < numPoints; ++nidx )
        {
            m_customN[nidx].normalize();
        }
    }
}
예제 #6
0
//Splice edge with centered sphere.
bool SpliceEdgeWithSphere(const V3f & a, const V3f & b, float radius, V3f * out){

  float al = a.length(); 
  float bl = b.length();
  
  if( ( (al >= radius) && (bl >= radius) ) ||
      ( (al <= radius) && (bl <= radius) ) ) return false; 

  *out =  a * (bl-radius)/(bl-al) + b * (radius-al)/(bl-al);

  return true;
};
예제 #7
0
void
Camera::rotateVectorQuat(float angle, float x, float y, float z, V3f& vector) const
{
	bm::quaternion<float> temp(x * sin(angle / 2.0f), y * sin(angle / 2.0f), z
			* sin(angle / 2.0f), cos(angle / 2.0f));
	bm::quaternion<float> quat_vec(*vector.x, *vector.y, *vector.z, 0.0);
	bm::quaternion<float> result = (temp * quat_vec) * bm::conj(temp);

	result.real();
	vector.setX(result.R_component_1());
	vector.setY(result.R_component_2());
	vector.setZ(result.R_component_3());
}
예제 #8
0
void
Camera::mouseRotate(float angleY, float angleZ)
{
	V3f vAxis = V3f::cross(mView - mEye, mUp);
	vAxis.normalize();

	// Rotate around our perpendicular axis and along the y-axis
//	rotateView(angleZ, vAxis);
//	rotateView(angleY, 0.0f, 1.0f, 0.0f);
	rotateView(angleZ, vAxis);
	rotateView(angleY, 0.0f, 1.0f,0.0f);
//	applyToGL();
}
예제 #9
0
V2f	
latLong (const V3f &dir)
{
    float r = sqrt (dir.z * dir.z + dir.x * dir.x);

    float latitude = (r < abs (dir.y))?
			 acos (r / dir.length()) * sign (dir.y):
			 asin (dir.y / dir.length());

    float longitude = (dir.z == 0 && dir.x == 0)? 0: atan2 (dir.x, dir.z);

    return V2f (latitude, longitude);
}
예제 #10
0
파일: t_v3tools.cpp 프로젝트: korantu/vx2
int main(){
  V3f x(0,0,1); V3f xr(rot_x(x, 0.87)); same("x rotation", x.dot(xr), cos(0.87));
  V3f y(0,0,1); V3f yr(rot_y(y, 0.23)); same("y rotation", y.dot(yr), cos(0.23));
  V3f z(1,0,0); V3f zr(rot_z(z, 0.19)); same("z rotation", z.dot(zr), cos(0.19));

  V3f nx(3,2,5);
  V3f ny(-2,3,4);
  V3f nz(-4,4,3.8);

  V3f nnx(3,2,5);
  V3f nny(-2,3,4);
  V3f nnz(-4,4,3.8);

  ortoNormalize(nnx, nny, nnz);
  
  same("x unit", nnx.length(), 1.0);
  same("y unit", nny.length(), 1.0);
  same("z unit", nnz.length(), 1.0);

  V3f tmp; tmp.cross(nnx, nx);

  same("x colinear", tmp.length(), 0.0);
  
  tmp.cross(nnx, nny); tmp-=nnz; same("x orto", tmp.length(), 0);
  tmp.cross(nny, nnz); tmp-=nnx; same("y orto", tmp.length(), 0);
  tmp.cross(nnz, nnx); tmp-=nny; same("z orto", tmp.length(), 0);


};
bool SpherePrimitiveEvaluator::closestPoint( const V3f &p, PrimitiveEvaluator::Result *result ) const
{
	assert( dynamic_cast<Result *>( result ) );

	Result *sr = static_cast<Result *>( result );

	sr->m_p = p.normalized() * m_sphere->radius();

	return true;
}
예제 #12
0
파일: v3tools.cpp 프로젝트: korantu/vx2
//make basis ortonormal again.
void ortoNormalize(V3f & nnx, V3f & nny, V3f & nnz){
  V3f newy; newy.cross(nnz, nnx);
  V3f newz; newz.cross(nnx, newy);
  newy /= newy.length();
  newz /= newz.length();
  nnx /= nnx.length();
  nny = newy;
  nnz = newz;
};
예제 #13
0
	bool intersect(CRay& _ray, float* _thit, CLocalGeo* _local, int& _id)  {
		if (_ray.m_id == m_id)
			return false; 
		float t; 
		V3f oc = _ray.m_pos - m_c; 
		float a = _ray.m_dir.dot(_ray.m_dir); // dir should be unit
		float b = _ray.m_dir.dot(oc);
		float c = oc.dot(oc) - m_r2; 
		float delta = b * b  - a * c; 
		if (delta < 0)   // no solution 
			return false; 
		else if (delta > -EPS && delta < EPS) {  // one solution
			t = - b / a;
			if (t > _ray.m_t_max || t < _ray.m_t_min)  // out of range
				return false; 
		} else {   // two solutions 
			float deltasqrt = sqrt(delta);
			float t1 = (- b - deltasqrt) / a;
			float t2 = (- b + deltasqrt) / a;
			bool flag = false; 
			t = _ray.m_t_max; 
			if (t1 <= _ray.m_t_max && t1 >= _ray.m_t_min) {
				flag = true; 
				t = min(t, t1);
			}

			if (t2 <= _ray.m_t_max && t2 >= _ray.m_t_min) {
				flag = true; 
				t = min(t, t2);
			}
			if (!flag)   // both out of range
				return false; 
		}

		// pass t, compute CLocalGeo
		*_thit = t; 
		_id = m_id; 
		_local->m_pos = _ray.Ray_t(t);
		_local->m_n = _local->m_pos - m_c; 
		_local->m_n = _local->m_n / _local->m_n.norm(); 
		return true; 
	} 
예제 #14
0
void
Camera::strafeCamera(float speed)
{
	// Strafing is quite simple if you understand what the cross product is.
	// If you have 2 vectors (say the up vVector and the view vVector) you can
	// use the cross product formula to get a vVector that is 90 degrees from the 2 vectors.
	// For a better explanation on how this works, check out the OpenGL "Normals" tutorial at our site.
	// In our new Update() function, we set the strafing vector (m_vStrafe).  Due
	// to the fact that we need this vector for many things including the strafing
	// movement and camera rotation (up and down), we just calculate it once.
	//
	// Like our MoveCamera() function, we add the strafing vector to our current position
	// and view.  It's as simple as that.  It has already been calculated in Update().

	V3f vStrafe = V3f::cross(mView - mEye, mUp);
	vStrafe.normalize();

	// Add the strafe vector to our position
	*mEye.x += vStrafe.getX() * speed;
	*mEye.z += vStrafe.getZ() * speed;

	// Add the strafe vector to our view
	*mView.x += vStrafe.getX() * speed;
	*mView.z += vStrafe.getZ() * speed;
//	applyToGL();
}
예제 #15
0
void PhongModelApprox::approximate(const Hemisphere& hemi) {

	N = hemi.getNormal();
	phong = hemi.getPhong();
	V3f* directions = hemi.getLobeDirections();
	C3f* radiosities = hemi.getLobeRadiosities();


	for (int i=0; i <lobeDirs.size(); i++) {
		if (i >= hemi.getNLobes()) {
			float nan = std::numeric_limits<float>::quiet_NaN();
			lobeDirs[i] = V3f(nan,nan,nan);
		} else {
			V3f L = directions[i];
			L = L/L.length();
			V3f R = -L - 2 * (dot(-L, N)) * N;

			lobeDirs[i] = R;
			lobeCols[i] = radiosities[i]*dot(N,L);
		}
	}
}
예제 #16
0
/// \todo Use IECore::RadixSort (might still want to use std::sort for small numbers of points - profile to check this)
void PointsPrimitive::depthSort() const
{
	V3f cameraDirection = Camera::viewDirectionInObjectSpace();
	cameraDirection.normalize();

	const vector<V3f> &points = m_memberData->points->readable();
	if( !m_memberData->depthOrder.size() )
	{
		// never sorted before. initialize space.
		m_memberData->depthOrder.resize( points.size() );
		for( unsigned int i=0; i<m_memberData->depthOrder.size(); i++ )
		{
			m_memberData->depthOrder[i] = i;
		}
		m_memberData->depths.resize( points.size() );
	}
	else
	{
		// sorted before. see if the camera direction has changed enough
		// to warrant resorting.
		if( cameraDirection.dot( m_memberData->depthCameraDirection ) > 0.95 )
		{
			return;
		}
	}

	m_memberData->depthCameraDirection = cameraDirection;

	// calculate all distances
	for( unsigned int i=0; i<m_memberData->depths.size(); i++ )
	{
		m_memberData->depths[i] = points[i].dot( m_memberData->depthCameraDirection );
	}

	// sort based on those distances
	SortFn sorter( m_memberData->depths );
	sort( m_memberData->depthOrder.begin(), m_memberData->depthOrder.end(), sorter );
}
예제 #17
0
void CameraController::tumble( const Imath::V2f &p )
{
	V2f d = p - m_data->motionStart;

	V3f centreOfInterestInWorld = V3f( 0, 0, -m_data->centreOfInterest ) * m_data->motionMatrix;
	V3f xAxisInWorld = V3f( 1, 0, 0 );
	m_data->motionMatrix.multDirMatrix( xAxisInWorld, xAxisInWorld );
	xAxisInWorld.normalize();

	M44f t;
	t.translate( centreOfInterestInWorld );

		t.rotate( V3f( 0, -d.x / 100.0f, 0 ) );

		M44f xRotate;
		xRotate.setAxisAngle( xAxisInWorld, -d.y / 100.0f );

		t = xRotate * t;

	t.translate( -centreOfInterestInWorld );

	m_data->transform->matrix = m_data->motionMatrix * t;
}
int ImagePrimitiveEvaluator::intersectionPoints( const V3f &origin, const V3f &direction,
                std::vector<PrimitiveEvaluator::ResultPtr> &results, float maxDistance ) const
{
	results.clear();

	V3f hitPoint;
	Box3f bound = m_image->bound();
	bool hit = boxIntersects( bound , origin, direction.normalized(), hitPoint );

	if ( hit )
	{
		if ( ( origin - hitPoint ).length2() < maxDistance * maxDistance )
		{
			ResultPtr result = staticPointerCast< Result >( createResult() );
			result->m_p = hitPoint;

			results.push_back( result );
		}
	}

	return results.size();
}
예제 #19
0
static void renderDiskExact(IntegratorT& integrator, V3f p, V3f n, float r) {

    int faceRes = integrator.res();
    float plen2 = p.length2();
    if(plen2 == 0) // Sanity check
        return;
    // Angle from face normal to edge is acos(1/sqrt(3)).
    static float cosFaceAngle = 1.0f/sqrtf(3);
    static float sinFaceAngle = sqrtf(2.0f/3.0f);

    // iterate over all the faces.
    for(int iface = MicroBuf::Face_begin; iface < MicroBuf::Face_begin; ++iface) {

        // Cast this back to a Face enum.
        MicroBuf::Face face = static_cast<MicroBuf::Face>(iface);

        // Avoid rendering to the current face if the disk definitely doesn't
        // touch it.  First check the cone angle
        if(sphereOutsideCone(p, plen2, r, MicroBuf::faceNormal(face),
                             cosFaceAngle, sinFaceAngle))
            continue;
        float dot_pFaceN = MicroBuf::dotFaceNormal(face, p);
        float dot_nFaceN = MicroBuf::dotFaceNormal(face, n);
        // If the disk is behind the camera and the disk normal is relatively
        // aligned with the face normal (to within the face cone angle), the
        // disk can't contribute to the face and may be culled.
        if(dot_pFaceN < 0 && fabs(dot_nFaceN) > cosFaceAngle)
            continue;
        // Check whether disk spans the perspective divide for the current
        // face.  Note: sin^2(angle(n, faceN)) = (1 - dot_nFaceN*dot_nFaceN)
        if((1 - dot_nFaceN*dot_nFaceN)*r*r >= dot_pFaceN*dot_pFaceN)
        {
            // When the disk spans the perspective divide, the shape of the
            // disk projected onto the face is a hyperbola.  Bounding a
            // hyperbola is a pain, so the easiest thing to do is trace a ray
            // for every pixel on the face and check whether it hits the disk.
            //
            // Note that all of the tricky rasterization rubbish further down
            // could probably be replaced by the following ray tracing code if
            // I knew a way to compute the tight raster bound.
            integrator.setFace(face);
            for(int iv = 0; iv < faceRes; ++iv)
                for(int iu = 0; iu < faceRes; ++iu)
                {
                    // V = ray through the pixel
                    V3f V = integrator.rayDirection(face, iu, iv);
                    // Signed distance to plane containing disk
                    float t = dot(p, n)/dot(V, n);
                    if(t > 0 && (t*V - p).length2() < r*r)
                    {
                        // The ray hit the disk, record the hit
                        integrator.addSample(iu, iv, t, 1.0f);
                    }
                }
            continue;
        }
        // If the disk didn't span the perspective divide and is behind the
        // camera, it may be culled.
        if(dot_pFaceN < 0)
            continue;
        // Having gone through all the checks above, we know that the disk
        // doesn't span the perspective divide, and that it is in front of the
        // camera.  Therefore, the disk projected onto the current face is an
        // ellipse, and we may compute a quadratic function
        //
        //   q(u,v) = a0*u*u + b0*u*v + c0*v*v + d0*u + e0*v + f0
        //
        // such that the disk lies in the region satisfying q(u,v) < 0.  To do
        // this, start with the implicit definition of the disk on the plane,
        //
        //   norm(dot(p,n)/dot(V,n) * V - p)^2 - r^2 < 0
        //
        // and compute coefficients A,B,C such that
        //
        //   A*dot(V,V) + B*dot(V,n)*dot(p,V) + C < 0
        float dot_pn = dot(p,n);
        float A = dot_pn*dot_pn;
        float B = -2*dot_pn;
        float C = plen2 - r*r;
        // Project onto the current face to compute the coefficients a0 through
        // to f0 for q(u,v)
        V3f pp = MicroBuf::canonicalFaceCoords(face, p);
        V3f nn = MicroBuf::canonicalFaceCoords(face, n);
        float a0 = A + B*nn.x*pp.x + C*nn.x*nn.x;
        float b0 = B*(nn.x*pp.y + nn.y*pp.x) + 2*C*nn.x*nn.y;
        float c0 = A + B*nn.y*pp.y + C*nn.y*nn.y;
        float d0 = (B*(nn.x*pp.z + nn.z*pp.x) + 2*C*nn.x*nn.z);
        float e0 = (B*(nn.y*pp.z + nn.z*pp.y) + 2*C*nn.y*nn.z);
        float f0 = (A + B*nn.z*pp.z + C*nn.z*nn.z);
        // Finally, transform the coefficients so that they define the
        // quadratic function in *raster* face coordinates, (iu, iv)
        float scale = 2.0f/faceRes;
        float scale2 = scale*scale;
        float off = 0.5f*scale - 1.0f;
        float a = scale2*a0;
        float b = scale2*b0;
        float c = scale2*c0;
        float d = ((2*a0 + b0)*off + d0)*scale;
        float e = ((2*c0 + b0)*off + e0)*scale;
        float f = (a0 + b0 + c0)*off*off + (d0 + e0)*off + f0;
        // Construct a tight bound for the ellipse in raster coordinates.
        int ubegin = 0, uend = faceRes;
        int vbegin = 0, vend = faceRes;
        float det = 4*a*c - b*b;
        // Sanity check; a valid ellipse must have det > 0
        if(det <= 0)
        {
            // If we get here, the disk is probably edge on (det == 0) or we
            // have some hopefully small floating point errors (det < 0: the
            // hyperbolic case we've already ruled out).  Cull in either case.
            continue;
        }
        float ub = 0, ue = 0;
        solveQuadratic(det, 4*d*c - 2*b*e, 4*c*f - e*e, ub, ue);
        ubegin = std::max(0, Imath::ceil(ub));
        uend   = std::min(faceRes, Imath::ceil(ue));
        float vb = 0, ve = 0;
        solveQuadratic(det, 4*a*e - 2*b*d, 4*a*f - d*d, vb, ve);
        vbegin = std::max(0, Imath::ceil(vb));
        vend   = std::min(faceRes, Imath::ceil(ve));
        // By the time we get here, we've expended perhaps 120 FLOPS + 2 sqrts
        // to set up the coefficients of q(iu,iv).  The setup is expensive, but
        // the bound is optimal so it will be worthwhile vs raytracing, unless
        // the raster faces are very small.
        integrator.setFace(face);
        for(int iv = vbegin; iv < vend; ++iv)
            for(int iu = ubegin; iu < uend; ++iu)
            {
                float q = a*(iu*iu) + b*(iu*iv) + c*(iv*iv) + d*iu + e*iv + f;
                if(q < 0)
                {
                    V3f V = integrator.rayDirection(face, iu, iv);
                    // compute distance to hit point
                    float z = dot_pn/dot(V, n);
                    integrator.addSample(iu, iv, z, 1.0f);
                }
            }
    }
}
예제 #20
0
void
ObjModelLoader::secondPass()
{
	string lastType = "";
	int grpId = -1;
	mPriModelPtr->setCurrentGrp(mPriModelPtr->getGrpStart()->first);

	fs::path my_path(mPriFName);
	fs::ifstream objFile;
	objFile.open(my_path, ios::out);
	char line[200];
	vector<string> tokens;
	while (objFile.getline(line, 200)) {
		tokens = splitSpace(string(line));
		if (tokens[0] == ("g")) {
//			removeSpecialCharsFromName(tokens[1]);
			++grpId;
			lastType = "g";
			string longname("");
			for (unsigned int i = 1; i < tokens.size(); ++i) {
				longname.append(tokens[i]);
				longname.append("_");
			}
			longname.erase(longname.end() - 1);

			mPriModelPtr->setCurrentGrp(longname);
			//cout << _model.getCurrentGrpPtr()->name << endl;
		}
		else if (tokens[0] == ("v")) {
			lastType = "v";
		}
		// moved material assignment to 2nd pass because it references a group.
		// But only at end of 1st pass we know our groups
		else if (tokens[0] == ("usemtl")) {
			//			cout << "found material reference " << tokens[1] << " in obj-file" << endl;
			string longName("");
			for (unsigned int i = 1; i < tokens.size(); ++i) {
				longName.append(tokens[i]);
				longName.append(" ");
			}
			longName.erase(longName.end() - 1);
			if (mPriMatMap.find(longName) != mPriMatMap.end())
				mPriModelPtr->getCurrentGrpPtr()->setMat(*mPriMatMap[longName]);
			lastType = "usemtl";
		}
		else if (tokens[0] == ("f")) {
			//cout << "Number of Components per face: " << tokens.size()-1 << endl;
			Face* f = new Face();
			f->norm = false;
			f->vert = false;
			f->matIdx = 0;
			f->tex = 0;
			f->fNormal = 0;

			string::size_type loc = tokens[1].find("/", 0);
			if (loc != string::npos) {
				for (string::size_type i = 1; i < tokens.size(); ++i) {
					vector<int> comp = extractFaceComponents(tokens[i]);
					// vertices
					if ((comp[0] & 4)) {
						V3f* vtx = mPriModelPtr->getVPtr(comp[1] - 1);
						f->vertexPtrList.push_back(vtx);
						vtx->addFaceRef(f);
						f->vert = true;
						mPriModelPtr->getCurrentGrpPtr()->nVertices++;
						mPriModelPtr->incVCount();
						mPriModelPtr->getCurrentGrpPtr()->bb->expand(*vtx);
					}
					// textures
					if ((comp[0] & 2)) {
						f->texturePtrList.push_back(mPriModelPtr->getTPtr(
								comp[2] - 1));
						++f->tex;
						mPriModelPtr->getCurrentGrpPtr()->nTextureCoords++;
						mPriModelPtr->incTCount();
					}
					// normals
					if ((comp[0] & 1)) {
						f->normalPtrList.push_back(mPriModelPtr->getNPtr(
								comp[3] - 1));
						f->norm = true;
						mPriModelPtr->getCurrentGrpPtr()->nNormals++;
						mPriModelPtr->incNCount();
					}
					comp.clear();
				}
			}
			else {
				V3f* vtx = mPriModelPtr->getVPtr(atoi(tokens[1].c_str()) - 1);
				f->vertexPtrList.push_back(vtx);
				mPriModelPtr->getCurrentGrpPtr()->bb->expand(*vtx);
				vtx = mPriModelPtr->getVPtr(atoi(tokens[2].c_str()) - 1);
				f->vertexPtrList.push_back(vtx);
				mPriModelPtr->getCurrentGrpPtr()->bb->expand(*vtx);
				vtx = mPriModelPtr->getVPtr(atoi(tokens[3].c_str()) - 1);
				f->vertexPtrList.push_back(vtx);
				mPriModelPtr->getCurrentGrpPtr()->bb->expand(*vtx);
				f->vert = true;
				mPriModelPtr->getCurrentGrpPtr()->nVertices += 3;
				mPriModelPtr->incVCount(3);

			}
			mPriModelPtr->addFPtrToCurrent(f);
			lastType = "f";
		}
	}
	objFile.close();
}
예제 #21
0
//-*****************************************************************************
void MeshDrwHelper::draw( const DrawContext & iCtx ) const
{
    // Bail if invalid.
    if ( !m_valid || m_triangles.size() < 1 || !m_meshP )
    {
        return;
    }

    const V3f *points = m_meshP->get();
    const V3f *normals = NULL;
    if ( m_meshN  && ( m_meshN->size() == m_meshP->size() ) )
    {
        normals = m_meshN->get();
    }
    else if ( m_customN.size() == m_meshP->size() )
    {
        normals = &(m_customN.front());
    }

#ifndef SIMPLE_ABC_VIEWER_NO_GL_CLIENT_STATE
//#if 0
    {
        GL_NOISY( glEnableClientState( GL_VERTEX_ARRAY ) );
        if ( normals )
        {
            GL_NOISY( glEnableClientState( GL_NORMAL_ARRAY ) );
            GL_NOISY( glNormalPointer( GL_FLOAT, 0,
                                       ( const GLvoid * )normals ) );
        }

        GL_NOISY( glVertexPointer( 3, GL_FLOAT, 0,
                                   ( const GLvoid * )points ) );

        GL_NOISY( glDrawElements( GL_TRIANGLES,
                                  ( GLsizei )m_triangles.size() * 3,
                                  GL_UNSIGNED_INT,
                                  ( const GLvoid * )&(m_triangles[0]) ) );

        if ( normals )
        {
            GL_NOISY( glDisableClientState( GL_NORMAL_ARRAY ) );
        }
        GL_NOISY( glDisableClientState( GL_VERTEX_ARRAY ) );
    }
#else
    glBegin( GL_TRIANGLES );

    for ( size_t i = 0; i < m_triangles.size(); ++i )
    {
        const Tri &tri = m_triangles[i];
        const V3f &vertA = points[tri[0]];
        const V3f &vertB = points[tri[1]];
        const V3f &vertC = points[tri[2]];

        if ( normals )
        {
            const V3f &normA = normals[tri[0]];
            glNormal3fv( ( const GLfloat * )&normA );
            glVertex3fv( ( const GLfloat * )&vertA );

            const V3f &normB = normals[tri[1]];
            glNormal3fv( ( const GLfloat * )&normB );
            glVertex3fv( ( const GLfloat * )&vertB );

            const V3f &normC = normals[tri[2]];
            glNormal3fv( ( const GLfloat * )&normC );
            glVertex3fv( ( const GLfloat * )&vertC );
        }
        else
        {
            V3f AB = vertB - vertA;
            V3f AC = vertC - vertA;
            V3f N = AB.cross( AC );
            if ( N.length() > 1.0e-4f )
            {
                N.normalize();
                glNormal3fv( ( const GLfloat * )&N );
            }

            glVertex3fv( ( const GLfloat * )&vertA );

            glVertex3fv( ( const GLfloat * )&vertB );

            glVertex3fv( ( const GLfloat * )&vertC );
        }

    }

    glEnd();

#endif
}
예제 #22
0
std::pair<PrimitiveVariable, PrimitiveVariable> IECoreScene::MeshAlgo::calculateTangents(
	const MeshPrimitive *mesh,
	const std::string &uvSet, /* = "uv" */
	bool orthoTangents, /* = true */
	const std::string &position /* = "P" */
)
{
	if( mesh->minVerticesPerFace() != 3 || mesh->maxVerticesPerFace() != 3 )
	{
		throw InvalidArgumentException( "MeshAlgo::calculateTangents : MeshPrimitive must only contain triangles" );
	}

	const V3fVectorData *positionData = mesh->variableData<V3fVectorData>( position );
	if( !positionData )
	{
		std::string e = boost::str( boost::format( "MeshAlgo::calculateTangents : MeshPrimitive has no Vertex \"%s\" primitive variable." ) % position );
		throw InvalidArgumentException( e );
	}

	const V3fVectorData::ValueType &points = positionData->readable();

	const IntVectorData *vertsPerFaceData = mesh->verticesPerFace();
	const IntVectorData::ValueType &vertsPerFace = vertsPerFaceData->readable();

	const IntVectorData *vertIdsData = mesh->vertexIds();
	const IntVectorData::ValueType &vertIds = vertIdsData->readable();

	const auto uvIt = mesh->variables.find( uvSet );
	if( uvIt == mesh->variables.end() || uvIt->second.interpolation != PrimitiveVariable::FaceVarying || uvIt->second.data->typeId() != V2fVectorDataTypeId )
	{
		throw InvalidArgumentException( ( boost::format( "MeshAlgo::calculateTangents : MeshPrimitive has no FaceVarying V2fVectorData primitive variable named \"%s\"."  ) % ( uvSet ) ).str() );
	}

	const V2fVectorData *uvData = runTimeCast<V2fVectorData>( uvIt->second.data.get() );
	const V2fVectorData::ValueType &uvs = uvData->readable();

	// I'm a little unsure about using the vertIds as a fallback for the stIndices.
	const IntVectorData::ValueType &uvIndices = uvIt->second.indices ? uvIt->second.indices->readable() : vertIds;

	size_t numUVs = uvs.size();

	std::vector<V3f> uTangents( numUVs, V3f( 0 ) );
	std::vector<V3f> vTangents( numUVs, V3f( 0 ) );
	std::vector<V3f> normals( numUVs, V3f( 0 ) );

	for( size_t faceIndex = 0; faceIndex < vertsPerFace.size(); faceIndex++ )
	{
		assert( vertsPerFace[faceIndex] == 3 );

		// indices into the facevarying data for this face
		size_t fvi0 = faceIndex * 3;
		size_t fvi1 = fvi0 + 1;
		size_t fvi2 = fvi1 + 1;
		assert( fvi2 < vertIds.size() );
		assert( fvi2 < uvIndices.size() );

		// positions for each vertex of this face
		const V3f &p0 = points[vertIds[fvi0]];
		const V3f &p1 = points[vertIds[fvi1]];
		const V3f &p2 = points[vertIds[fvi2]];

		// uv coordinates for each vertex of this face
		const V2f &uv0 = uvs[uvIndices[fvi0]];
		const V2f &uv1 = uvs[uvIndices[fvi1]];
		const V2f &uv2 = uvs[uvIndices[fvi2]];

		// compute tangents and normal for this face
		const V3f e0 = p1 - p0;
		const V3f e1 = p2 - p0;

		const V2f e0uv = uv1 - uv0;
		const V2f e1uv = uv2 - uv0;

		V3f tangent = ( e0 * -e1uv.y + e1 * e0uv.y ).normalized();
		V3f bitangent = ( e0 * -e1uv.x + e1 * e0uv.x ).normalized();

		V3f normal = ( p2 - p1 ).cross( p0 - p1 );
		normal.normalize();

		// and accumlate them into the computation so far
		uTangents[uvIndices[fvi0]] += tangent;
		uTangents[uvIndices[fvi1]] += tangent;
		uTangents[uvIndices[fvi2]] += tangent;

		vTangents[uvIndices[fvi0]] += bitangent;
		vTangents[uvIndices[fvi1]] += bitangent;
		vTangents[uvIndices[fvi2]] += bitangent;

		normals[uvIndices[fvi0]] += normal;
		normals[uvIndices[fvi1]] += normal;
		normals[uvIndices[fvi2]] += normal;

	}

	// normalize and orthogonalize everything
	for( size_t i = 0; i < uTangents.size(); i++ )
	{
		normals[i].normalize();

		uTangents[i].normalize();
		vTangents[i].normalize();

		// Make uTangent/vTangent orthogonal to normal
		uTangents[i] -= normals[i] * uTangents[i].dot( normals[i] );
		vTangents[i] -= normals[i] * vTangents[i].dot( normals[i] );

		uTangents[i].normalize();
		vTangents[i].normalize();

		if( orthoTangents )
		{
			vTangents[i] -= uTangents[i] * vTangents[i].dot( uTangents[i] );
			vTangents[i].normalize();
		}

		// Ensure we have set of basis vectors (n, uT, vT) with the correct handedness.
		if( uTangents[i].cross( vTangents[i] ).dot( normals[i] ) < 0.0f )
		{
			uTangents[i] *= -1.0f;
		}
	}

	// convert the tangents back to facevarying data and add that to the mesh
	V3fVectorDataPtr fvUD = new V3fVectorData();
	V3fVectorDataPtr fvVD = new V3fVectorData();

	std::vector<V3f> &fvU = fvUD->writable();
	std::vector<V3f> &fvV = fvVD->writable();
	fvU.resize( uvIndices.size() );
	fvV.resize( uvIndices.size() );

	for( unsigned i = 0; i < uvIndices.size(); i++ )
	{
		fvU[i] = uTangents[uvIndices[i]];
		fvV[i] = vTangents[uvIndices[i]];
	}

	PrimitiveVariable tangentPrimVar( PrimitiveVariable::FaceVarying, fvUD );
	PrimitiveVariable bitangentPrimVar( PrimitiveVariable::FaceVarying, fvVD );

	return std::make_pair( tangentPrimVar, bitangentPrimVar );
}
예제 #23
0
void
Camera::rotateView(float angle, const V3f& vec)
{
	rotateView(angle, vec.getX(), vec.getY(), vec.getZ());
}
//-*****************************************************************************
void MeshDrwHelper::draw( const DrawContext & iCtx ) const
{

    // Bail if invalid.
    if ( !m_valid || m_triangles.size() < 1 || !m_meshP )
    {
        return;
    }

    const V3f *points = m_meshP->get();
    const V3f *normals = NULL;
    if ( m_meshN  && ( m_meshN->size() == m_meshP->size() ) )
    {
        normals = m_meshN->get();
    }
    else if ( m_customN.size() == m_meshP->size() )
    {
        normals = &(m_customN.front());
    }
    
    // colors
    const C4f *colors = NULL;
    if (m_colors.size() == m_meshP->size() )
    {
        colors = &(m_colors.front());

    }

    static MGLFunctionTable *gGLFT = NULL;
    if (gGLFT == NULL)
       gGLFT = MHardwareRenderer::theRenderer()->glFunctionTable();

    gGLFT->glBegin( MGL_TRIANGLES );

    for ( size_t i = 0; i < m_triangles.size(); ++i )
    {
        const Tri &tri = m_triangles[i];
        const V3f &vertA = points[tri[0]];
        const V3f &vertB = points[tri[1]];
        const V3f &vertC = points[tri[2]];

        if ( normals )
        {
            const V3f &normA = normals[tri[0]];
            gGLFT->glNormal3fv( ( const GLfloat * )&normA );
            gGLFT->glVertex3fv( ( const GLfloat * )&vertA );

            const V3f &normB = normals[tri[1]];
            gGLFT->glNormal3fv( ( const GLfloat * )&normB );
            gGLFT->glVertex3fv( ( const GLfloat * )&vertB );

            const V3f &normC = normals[tri[2]];
            gGLFT->glNormal3fv( ( const GLfloat * )&normC );
            gGLFT->glVertex3fv( ( const GLfloat * )&vertC );
        }
        else
        {
            V3f AB = vertB - vertA;
            V3f AC = vertC - vertA;
            V3f N = AB.cross( AC );
            if ( N.length() > 1.0e-4f )
            {
                N.normalize();
                gGLFT->glNormal3fv( ( const GLfloat * )&N );
            }

            gGLFT->glVertex3fv( ( const GLfloat * )&vertA );

            gGLFT->glVertex3fv( ( const GLfloat * )&vertB );

            gGLFT->glVertex3fv( ( const GLfloat * )&vertC );
        }

    }

    gGLFT->glEnd();

}
예제 #25
0
	float det(V3f _V0, V3f _V1, V3f _V2) {
		float d = _V0.x() * _V1.y() * _V2.z() 
			+ _V1.x() * _V2.y() * _V0.z()
			+ _V2.x() * _V0.y() * _V1.z()
			- _V2.x() * _V1.y() * _V0.z()
			- _V1.x() * _V0.y() * _V2.z()
			- _V0.x() * _V2.y() * _V1.z();
		return d; 
	}
예제 #26
0
void GraphGadget::calculateDragSnapOffsets( Gaffer::Set *nodes )
{
	m_dragSnapOffsets[0].clear();
	m_dragSnapOffsets[1].clear();
	
	std::vector<const ConnectionGadget *> connections;
	for( size_t i = 0, s = nodes->size(); i < s; ++i )
	{
		Gaffer::Node *node = runTimeCast<Gaffer::Node>( nodes->member( i ) );
		if( !node )
		{
			continue;
		}
	
		connections.clear();
		connectionGadgets( node, connections, nodes );
		
		for( std::vector<const ConnectionGadget *>::const_iterator it = connections.begin(), eIt = connections.end(); it != eIt; ++it )
		{
			// get the node gadgets at either end of the connection
			
			const ConnectionGadget *connection = *it;
			const Nodule *srcNodule = connection->srcNodule();
			const Nodule *dstNodule = connection->dstNodule();
			const NodeGadget *srcNodeGadget = srcNodule->ancestor<NodeGadget>();
			const NodeGadget *dstNodeGadget = dstNodule->ancestor<NodeGadget>();
			
			if( !srcNodeGadget || !dstNodeGadget )
			{
				continue;
			}
			
			// check that the connection tangents are opposed - if not we don't want to snap
			
			V3f srcTangent = srcNodeGadget->noduleTangent( srcNodule );
			V3f dstTangent = dstNodeGadget->noduleTangent( dstNodule );
				
			if( srcTangent.dot( dstTangent ) > -0.5f )
			{
				continue;
			}
			
			// compute an offset that will bring the src and destination nodules into line
			
			const int snapAxis = fabs( srcTangent.x ) > 0.5 ? 1 : 0;
						
			V3f srcPosition = V3f( 0 ) * srcNodule->fullTransform();
			V3f dstPosition = V3f( 0 ) * dstNodule->fullTransform();
			float offset = srcPosition[snapAxis] - dstPosition[snapAxis];
				
			if( dstNodule->plug()->node() != node )
			{
				offset *= -1;
			}
			
			m_dragSnapOffsets[snapAxis].push_back( offset );
			
			// compute an offset that will bring the src and destination nodes into line
			
			V3f srcNodePosition = V3f( 0 ) * srcNodeGadget->fullTransform();
			V3f dstNodePosition = V3f( 0 ) * dstNodeGadget->fullTransform();
			offset = srcNodePosition[snapAxis] - dstNodePosition[snapAxis];
				
			if( dstNodule->plug()->node() != node )
			{
				offset *= -1;
			}

			m_dragSnapOffsets[snapAxis].push_back( offset );

			// compute an offset that will position the node snugly next to its input
			// in the other axis.
			
			Box3f srcNodeBound = srcNodeGadget->transformedBound( 0 );
			Box3f dstNodeBound = dstNodeGadget->transformedBound( 0 );
			
			const int otherAxis = snapAxis == 1 ? 0 : 1;
			if( otherAxis == 1 )
			{
				offset = dstNodeBound.max[otherAxis] - srcNodeBound.min[otherAxis] + 1.0f;
			}
			else
			{
				offset = dstNodeBound.min[otherAxis] - srcNodeBound.max[otherAxis] - 1.0f;				
			}
			
			if( dstNodule->plug()->node() == node )
			{
				offset *= -1;
			}
			
			m_dragSnapOffsets[otherAxis].push_back( offset );
		}
	}
	
	// sort and remove duplicates so that we can use lower_bound() to find appropriate
	// snap points in dragMove().
	
	for( int axis = 0; axis <= 1; ++axis )
	{
		std::sort( m_dragSnapOffsets[axis].begin(), m_dragSnapOffsets[axis].end() );
		m_dragSnapOffsets[axis].erase( std::unique( m_dragSnapOffsets[axis].begin(), m_dragSnapOffsets[axis].end()), m_dragSnapOffsets[axis].end() );
	}
	
}
예제 #27
0
void renderDisk(IntegratorT& integrator, V3f N, V3f p, V3f n, float r,
                float cosConeAngle, float sinConeAngle)
{
    float dot_pn = dot(p, n);
    // Cull back-facing points.  In conjunction with the oddball composition
    // rule below, this is very important for smoothness of the result:  If we
    // don't cull the back faces, coverage will be overestimated in every
    // microbuffer pixel which contains an edge.
    if(dot_pn > 0)
        return;
    float plen2 = p.length2();
    float plen = sqrtf(plen2);
    // If solid angle of bounding sphere is greater than exactRenderAngle,
    // resolve the visibility exactly rather than using a cheap approx.
    //
    // TODO: Adjust exactRenderAngle for best results!
    const float exactRenderAngle = 0.05f;
    float origArea = M_PI*r*r;
    // Solid angle of the bound
    if(exactRenderAngle*plen2 < origArea)
    {
        // Multiplier for radius to make the cracks a bit smaller.  We
        // can't do this too much, or sharp convex edges will be
        // over occluded (TODO: Adjust for best results!  Maybe the "too
        // large" problem could be worked around using a tracing offset?)
        const float radiusMultiplier = M_SQRT2;
        // Resolve visibility of very close surfels using ray tracing.
        // This is necessary to avoid artifacts where surfaces meet.
        renderDiskExact(integrator, p, n, radiusMultiplier*r);
        return;
    }
    // Figure out which face we're on and get u,v coordinates on that face,
    MicroBuf::Face faceIndex = MicroBuf::faceIndex(p);
    int faceRes = integrator.res();
    float u = 0, v = 0;
    MicroBuf::faceCoords(faceIndex, p, u, v);
    // Compute the area of the surfel when projected onto the env face.
    // This depends on several things:
    // 1) The area of the original disk
    // 2) The angles between the disk normal n, viewing vector p, and face
    // normal.  This is the area projected onto a plane parallel to the env
    // map face, and through the centre of the disk.
    float pDotFaceN = MicroBuf::dotFaceNormal(faceIndex, p);
    float angleFactor = fabs(dot_pn/pDotFaceN);
    // 3) Ratio of distance to the surfel vs distance to projected point on
    // the face.
    float distFactor = 1.0f/(pDotFaceN*pDotFaceN);
    // Putting these together gives the projected area
    float projArea = origArea * angleFactor * distFactor;
    // Half-width of a square with area projArea
    float wOn2 = sqrtf(projArea)*0.5f;
    // Transform width and position to face raster coords.
    float rasterScale = 0.5f*faceRes;
    u = rasterScale*(u + 1.0f);
    v = rasterScale*(v + 1.0f);
    wOn2 *= rasterScale;
    // Construct square box with the correct area.  This shape isn't
    // anything like the true projection of a disk onto the raster, but
    // it's much cheaper!  Note that points which are proxies for clusters
    // of smaller points aren't going to be accurately resolved no matter
    // what we do.
    struct BoundData
    {
        MicroBuf::Face faceIndex;
        float ubegin, uend;
        float vbegin, vend;
    };
    // The current surfel can cross up to three faces.
    int nfaces = 1;
    BoundData boundData[3];
    BoundData& bd0 = boundData[0];
    bd0.faceIndex = faceIndex;
    bd0.ubegin = u - wOn2;
    bd0.uend = u + wOn2;
    bd0.vbegin = v - wOn2;
    bd0.vend = v + wOn2;
    // Detect & handle overlap onto adjacent faces
    //
    // We assume that wOn2 is the same on the adjacent face, an assumption
    // which is true when the surfel is close the the corner of the cube.
    // We also assume that a surfel touches at most three faces.  This is
    // true as long as the surfels don't have a massive solid angle; for
    // such cases the axis-aligned box isn't going to be accurate anyway and
    // the code should have branched into the renderDiskExact function instead.
    if(bd0.ubegin < 0)
    {
        // left neighbour
        BoundData& b = boundData[nfaces++];
        b.faceIndex = MicroBuf::neighbourU(faceIndex, 0);
        MicroBuf::faceCoords(b.faceIndex, p, u, v);
        u = rasterScale*(u + 1.0f);
        v = rasterScale*(v + 1.0f);
        b.ubegin = u - wOn2;
        b.uend = u + wOn2;
        b.vbegin = v - wOn2;
        b.vend = v + wOn2;
    }
    else if(bd0.uend > faceRes)
    {
        // right neighbour
        BoundData& b = boundData[nfaces++];
        b.faceIndex = MicroBuf::neighbourU(faceIndex, 1);
        MicroBuf::faceCoords(b.faceIndex, p, u, v);
        u = rasterScale*(u + 1.0f);
        v = rasterScale*(v + 1.0f);
        b.ubegin = u - wOn2;
        b.uend = u + wOn2;
        b.vbegin = v - wOn2;
        b.vend = v + wOn2;
    }
    if(bd0.vbegin < 0)
    {
        // bottom neighbour
        BoundData& b = boundData[nfaces++];
        b.faceIndex = MicroBuf::neighbourV(faceIndex, 0);
        MicroBuf::faceCoords(b.faceIndex, p, u, v);
        u = rasterScale*(u + 1.0f);
        v = rasterScale*(v + 1.0f);
        b.ubegin = u - wOn2;
        b.uend = u + wOn2;
        b.vbegin = v - wOn2;
        b.vend = v + wOn2;
    }
    else if(bd0.vend > faceRes)
    {
        // top neighbour
        BoundData& b = boundData[nfaces++];
        b.faceIndex = MicroBuf::neighbourV(faceIndex, 1);
        MicroBuf::faceCoords(b.faceIndex, p, u, v);
        u = rasterScale*(u + 1.0f);
        v = rasterScale*(v + 1.0f);
        b.ubegin = u - wOn2;
        b.uend = u + wOn2;
        b.vbegin = v - wOn2;
        b.vend = v + wOn2;
    }
    for(int iface = 0; iface < nfaces; ++iface)
    {
        BoundData& bd = boundData[iface];
        // Range of pixels which the square touches (note, exclusive end)
        int ubeginRas = Imath::clamp(int(bd.ubegin),   0, faceRes);
        int uendRas   = Imath::clamp(int(bd.uend) + 1, 0, faceRes);
        int vbeginRas = Imath::clamp(int(bd.vbegin),   0, faceRes);
        int vendRas   = Imath::clamp(int(bd.vend) + 1, 0, faceRes);
        integrator.setFace(bd.faceIndex);
        for(int iv = vbeginRas; iv < vendRas; ++iv)
            for(int iu = ubeginRas; iu < uendRas; ++iu)
            {
                // Calculate the fraction coverage of the square over the current
                // pixel for antialiasing.  This estimate is what you'd get if you
                // filtered the square representing the surfel with a 1x1 box filter.
                float urange = std::min<float>(iu+1, bd.uend) -
                               std::max<float>(iu,   bd.ubegin);
                float vrange = std::min<float>(iv+1, bd.vend) -
                               std::max<float>(iv,   bd.vbegin);
                float coverage = urange*vrange;
                integrator.addSample(iu, iv, plen, coverage);
            }
    }
}
예제 #28
0
Rgba
EnvmapImage::filteredLookup (V3f d, float r, int n) const
{
    //
    // Filtered environment map lookup: Take n by n point samples
    // from the environment map, clustered around direction d, and
    // combine the samples with a tent filter.
    //
    
    //
    // Depending on the type of map, pick an appropriate function
    // to convert 3D directions to 2D pixel poitions.
    //

    V2f (* dirToPos) (const Box2i &, const V3f &);

    if (_type == ENVMAP_LATLONG)
	dirToPos = dirToPosLatLong;
    else
	dirToPos = dirToPosCube;

    //
    // Pick two vectors, dx and dy, of length r, that are orthogonal
    // to the lookup direction, d, and to each other.
    //

    d.normalize();
    V3f dx, dy;

    if (abs (d.x) > 0.707f)
	dx = (d % V3f (0, 1, 0)).normalized() * r;
    else
	dx = (d % V3f (1, 0, 0)).normalized() * r;

    dy = (d % dx).normalized() * r;

    //
    // Take n by n point samples from the map, and add them up.
    // The directions for the point samples are all within the pyramid
    // defined by the vectors d-dy-dx, d-dy+dx, d+dy-dx, d+dy+dx.
    //

    float wt = 0;

    float cr = 0;
    float cg = 0;
    float cb = 0;
    float ca = 0;

    for (int y = 0; y < n; ++y)
    {
	float ry = float (2 * y + 2) / float (n + 1) - 1;
	float wy = 1 - abs (ry);
	V3f ddy (ry * dy);

	for (int x = 0; x < n; ++x)
	{
	    float rx = float (2 * x + 2) / float (n + 1) - 1;
	    float wx = 1 - abs (rx);
	    V3f ddx (rx * dx);
	    
	    Rgba s = sample (dirToPos (_dataWindow, d + ddx + ddy));

	    float w = wx * wy;
	    wt += w;

	    cr += s.r * w;
	    cg += s.g * w;
	    cb += s.b * w;
	    ca += s.a * w;
	}
    }

    wt = 1 / wt;

    Rgba c;

    c.r = cr * wt;
    c.g = cg * wt;
    c.b = cb * wt;
    c.a = ca * wt;

    return c;
}
예제 #29
0
static void renderNode(IntegratorT& integrator, V3f P, V3f N, float cosConeAngle,
                       float sinConeAngle, float maxSolidAngle, int dataSize,
                       const DiffusePointOctree::Node* node)
{
    // This is an iterative traversal of the point hierarchy, since it's
    // slightly faster than a recursive traversal.
    //
    // The max required size for the explicit stack should be < 200, since
    // tree depth shouldn't be > 24, and we have a max of 8 children per node.
    const DiffusePointOctree::Node* nodeStack[200];
    nodeStack[0] = node;
    int stackSize = 1;
    while(stackSize > 0)
    {
        node = nodeStack[--stackSize];
        {
            // Examine node bound and cull if possible
            // TODO: Reinvestigate using (node->aggP - P) with spherical harmonics
            V3f c = node->center - P;
            if(sphereOutsideCone(c, c.length2(), node->boundRadius, N,
                                 cosConeAngle, sinConeAngle))
                continue;
        }
        float r = node->aggR;
        V3f p = node->aggP - P;
        float plen2 = p.length2();
        // Examine solid angle of interior node bounding sphere to see whether we
        // can render it directly or not.
        //
        // TODO: Would be nice to use dot(node->aggN, p.normalized()) in the solid
        // angle estimation.  However, we get bad artifacts if we do this naively.
        // Perhaps with spherical harmoics it'll be better.
        float solidAngle = M_PI*r*r / plen2;
        if(solidAngle < maxSolidAngle)
        {
            integrator.setPointData(reinterpret_cast<const float*>(&node->aggCol));
            renderDisk(integrator, N, p, node->aggN, r, cosConeAngle, sinConeAngle);
        }
        else
        {
            // If we get here, the solid angle of the current node was too large
            // so we must consider the children of the node.
            //
            // The render order is sorted so that points are rendered front to
            // back.  This greatly improves the correctness of the hider.
            //
            // FIXME: The sorting procedure gets things wrong sometimes!  The
            // problem is that points may stick outside the bounds of their octree
            // nodes.  Probably we need to record all the points, sort, and
            // finally render them to get this right.
            if(node->npoints != 0)
            {
                // Leaf node: simply render each child point.
                std::pair<float, int> childOrder[8];
                // INDIRECT
                assert(node->npoints <= 8);
                for(int i = 0; i < node->npoints; ++i)
                {
                    const float* data = &node->data[i*dataSize];
                    V3f p = V3f(data[0], data[1], data[2]) - P;
                    childOrder[i].first = p.length2();
                    childOrder[i].second = i;
                }
                std::sort(childOrder, childOrder + node->npoints);
                for(int i = 0; i < node->npoints; ++i)
                {
                    const float* data = &node->data[childOrder[i].second*dataSize];
                    V3f p = V3f(data[0], data[1], data[2]) - P;
                    V3f n = V3f(data[3], data[4], data[5]);
                    float r = data[6];
                    integrator.setPointData(data+7);
                    renderDisk(integrator, N, p, n, r, cosConeAngle, sinConeAngle);
                }
                continue;
            }
            else
            {
                // Interior node: render children.
                std::pair<float, const DiffusePointOctree::Node*> children[8];
                int nchildren = 0;
                for(int i = 0; i < 8; ++i)
                {
                    DiffusePointOctree::Node* child = node->children[i];
                    if(!child)
                        continue;
                    children[nchildren].first = (child->center - P).length2();
                    children[nchildren].second = child;
                    ++nchildren;
                }
                std::sort(children, children + nchildren);
                // Interior node: render each non-null child.  Nodes we want to
                // render first must go onto the stack last.
                for(int i = nchildren-1; i >= 0; --i)
                    nodeStack[stackSize++] = children[i].second;
            }
        }
    }
}