Esempio n. 1
0
static bool set_attribute_float3(float3 f, TypeDesc type, bool derivatives, void *val)
{
	float3 fv[3];

	fv[0] = f;
	fv[1] = make_float3(0.0f, 0.0f, 0.0f);
	fv[2] = make_float3(0.0f, 0.0f, 0.0f);

	return set_attribute_float3(fv, type, derivatives, val);
}
Esempio n. 2
0
CCL_NAMESPACE_BEGIN

Camera::Camera()
{
	shuttertime = 1.0f;

	aperturesize = 0.0f;
	focaldistance = 10.0f;
	blades = 0;
	bladesrotation = 0.0f;

	matrix = transform_identity();

	motion.pre = transform_identity();
	motion.post = transform_identity();
	use_motion = false;

	aperture_ratio = 1.0f;

	type = CAMERA_PERSPECTIVE;
	panorama_type = PANORAMA_EQUIRECTANGULAR;
	fisheye_fov = M_PI_F;
	fisheye_lens = 10.5f;
	fov = M_PI_4_F;

	sensorwidth = 0.036f;
	sensorheight = 0.024f;

	nearclip = 1e-5f;
	farclip = 1e5f;

	width = 1024;
	height = 512;
	resolution = 1;

	viewplane.left = -((float)width/(float)height);
	viewplane.right = (float)width/(float)height;
	viewplane.bottom = -1.0f;
	viewplane.top = 1.0f;

	screentoworld = transform_identity();
	rastertoworld = transform_identity();
	ndctoworld = transform_identity();
	rastertocamera = transform_identity();
	cameratoworld = transform_identity();
	worldtoraster = transform_identity();

	dx = make_float3(0.0f, 0.0f, 0.0f);
	dy = make_float3(0.0f, 0.0f, 0.0f);

	need_update = true;
	need_device_update = true;
	previous_need_motion = -1;
}
void
Projector::updateOrientation()
{
    float3 kNormal = make_float3(0.0f, 1.0f, 0.0f);

    Plane kBasePlane = Plane(make_float3(0.0f, 0.0f, 0.0f), kNormal);
    Plane kUpperPlane = Plane(kNormal, m_fUpperLimit);
    Plane kLowerPlane = Plane(kNormal, m_fLowerLimit);

    m_kUp = m_pkCamera->getUpDirection(); 
    m_kView = m_pkCamera->getViewDirection();
    m_kPosition = m_pkCamera->getPosition();
    m_kLookAt = m_kPosition + m_kView;

    if(m_bSimple)
    {
        m_kUp = make_float3(0.0f, 1.0f, 0.0f);
        m_kPosition = m_kPosition - m_kView * 5.0f;
        m_kLookAt = m_kPosition + m_kView;
    }
    else
    {   
        float fHeight = kLowerPlane.distance(m_kPosition);

        bool bBelow = fHeight < 0.0f;

        if(fHeight < (m_fStrength + m_fElevation))
        {
            if(bBelow)
            {
                m_kPosition += kNormal * ((m_fStrength + m_fElevation) - 2.0f * fHeight);
            }
            else
            {
                m_kPosition += kNormal * ((m_fStrength + m_fElevation) - fHeight);        
            }
        }
        
        if (!kBasePlane.findIntersection(m_kLookAt, m_kPosition, m_kPosition + m_kView))
        {
            float3 kFlip = m_kView - kNormal * dot(m_kView, kNormal) * 2.0f;
            kBasePlane.findIntersection(m_kLookAt, m_kPosition, m_kPosition + kFlip);
        }

        float fD = fabs( kBasePlane.dotNormal(m_kView) );
        float fZ = (m_pkCamera->getFarClip() - m_pkCamera->getNearClip()) * 0.5f;
        
        float3 kAim = (m_kPosition + m_kView * fZ);
        kAim = kAim - kNormal * dot(kAim, kNormal);
        
        m_kLookAt = ((m_kLookAt * fD + kAim * (1.0f - fD)));
        m_kView = (m_kLookAt - m_kPosition);
    }
}
Esempio n. 4
0
void 
OpenSteer::OpenSteerDemo::position3dCamera (AbstractVehicle& selected,
                                            float distance,
                                            float /*elevation*/)
{
    //selectedVehicle = &selected;
    if (&selected)
    {
        const float3 behind = float3_scalar_multiply(make_float3(selected.forward()), -distance);
        camera.setPosition ( make_float4(float3_add(make_float3(selected.position()), behind), 0.f) );
        camera.target = make_float3(selected.position());
    }
}
Esempio n. 5
0
void MeshViewer::initLights()
{
	// Lights buffer / Sun position
	BasicLight lights[] = { { make_float3( 200.0f, 260.0f, 220.0f ), make_float3( 1.0f, 1.0f, 1.0f ), 1 } };

	Buffer light_buffer = m_context->createBuffer(RT_BUFFER_INPUT);
	light_buffer->setFormat			(RT_FORMAT_USER);
	light_buffer->setElementSize		(sizeof(BasicLight));
	light_buffer->setSize			(sizeof(lights) / sizeof(lights[0]));
	memcpy							(light_buffer->map(), lights, sizeof(lights));
	light_buffer->unmap();

	m_context[ "lights" ]->set(light_buffer);
}
Esempio n. 6
0
void SPHSystem::add_particle(float3 pos, float3 vel)
{
	Particle *p=&(hMem[hParam->num_particle]);

	p->pos=pos;
	p->vel=vel;
	p->acc=make_float3(0.0f, 0.0f, 0.0f);
	p->ev=make_float3(0.0f, 0.0f, 0.0f);

	p->dens=hParam->rest_density;
	p->pres=0.0f;

	hParam->num_particle++;
}
Esempio n. 7
0
void 
OpenSteer::OpenSteerDemo::drawCircleHighlightOnVehicle (const AbstractVehicle& v,
                                                  const float radiusMultiplier,
                                                  const float3 color)
{
    if (&v)
    {
        const float3& cPosition = make_float3(camera.position());
        draw3dCircle  (v.radius() * radiusMultiplier,  // adjusted radius
                       make_float3(v.position()),                   // center
                       float3_subtract(make_float3(v.position()), cPosition),       // view axis
                       color,                          // drawing color
                       20);                            // circle segments
    }
}
Esempio n. 8
0
LDEVICE float3 BeckmannDistribution::sample( float2 seed ) const
{
    if( m_alpha < 0.000001f )
        return make_float3( 0.0f, 0.0f, 1.0f );

    const float alpha_sqr     = m_alpha*m_alpha;
    const float tan_theta_sqr = -alpha_sqr*logf( 1.0f - seed.x );
    const float cos_theta     = 1.0f / sqrtf( 1.0f + tan_theta_sqr );
    const float sin_theta     = cos_theta*sqrtf( tan_theta_sqr );

    const float phi = 2.0f * legion::PI * seed.y;
    return make_float3( cosf( phi ) * sin_theta,
                        sinf( phi ) * sin_theta,
                        cos_theta );
}
Esempio n. 9
0
  SchlierenRenderer::SchlierenRenderer()
: _initialized(false), _rot_x(0), _rot_y(0)
{
  _params.passes = 0;
  //_params.tex_data = NULL;
  _params.width = 512;
  _params.height = 512;
  _params.inout_rgb = NULL;
  _params.out_rgb = NULL;
  //        _params.camera_pos = make_float3(0,0,-5);
  //        _params.camera_x = make_float3(1,0,0);
  //        _params.camera_y = make_float3(0,1,0);
  //        _params.camera_z = make_float3(0,0,1);
  updateCamera();
  _params.raysPerPixel = 1;
  _params.numRenderPasses = 1;
  _params.cutoff = CUTOFF_NONE;
  _params.cutoff_dirty = true;
  _params.data = NULL;
  _params.projectionDistance = 0.4;
  _params.stepSize = 0.1;
  _params.threadSafe = false;
  _params.dataScalar = 1.0f;
  _params.cutoffScalar = 5000.0f;
  _params.useOctree = false;
  _params.useRefraction = false;
  _params.inout_rgb = new float4[_params.width*_params.height];
  _params.min_bound = make_float3(-.5,-.5,-.5);
  _params.max_bound = make_float3(.5,.5,.5);
  float3 camera_pos, camera_x, camera_y, camera_z, center = _params.max_bound/2.0;
  float rot_x = 0, rot_y = 0;
  //        camera_pos.z = center.z + cos(rot_x)*5.0;
  //        camera_pos.x = center.x + sin(rot_x)*5.0;
  //        camera_z = normalize(center-camera_pos);
  //        camera_z = make_float3(0,0,1);

  //        camera_y = make_float3(0,1,0);
  //        camera_x = normalize(cross(camera_y, camera_z*-1.0f));
  //        camera_y = normalize(cross(camera_x, camera_z));

  //        _params.camera_corner = _params.camera_pos-(_params.camera_x*.5+_params.camera_y*.5);

  //        _params.camera_x = camera_x;
  //        _params.camera_y = camera_y;
  //        _params.camera_z = camera_z;

  _center = make_float3(0,0,0);
}
Esempio n. 10
0
static void mikk_get_texture_coordinate(const SMikkTSpaceContext *context, float uv[2], const int face_num, const int vert_num)
{
	MikkUserData *userdata = (MikkUserData*)context->m_pUserData;
	if(userdata->layer != NULL) {
		BL::MeshTextureFace tf = userdata->layer->data[face_num];
		float3 tfuv;

		switch(vert_num) {
			case 0:
				tfuv = get_float3(tf.uv1());
				break;
			case 1:
				tfuv = get_float3(tf.uv2());
				break;
			case 2:
				tfuv = get_float3(tf.uv3());
				break;
			default:
				tfuv = get_float3(tf.uv4());
				break;
		}

		uv[0] = tfuv.x;
		uv[1] = tfuv.y;
	}
	else {
		int vert_idx = userdata->mesh.tessfaces[face_num].vertices()[vert_num];
		float3 orco =
			get_float3(userdata->mesh.vertices[vert_idx].undeformed_co());
		float2 tmp = map_to_sphere(make_float3(orco[0], orco[1], orco[2]));
		uv[0] = tmp.x;
		uv[1] = tmp.y;
	}
}
Esempio n. 11
0
void SPHSystem::init_system()
{
	float3 pos;
	float3 vel=make_float3(0.0f);

	for(pos.x=world_size.x*0.0f; pos.x<world_size.x*0.6f; pos.x+=(kernel[LEVEL1]*0.5f))
	{
		for(pos.y=world_size.y*0.0f; pos.y<world_size.y*1.0f; pos.y+=(kernel[LEVEL1]*0.5f))
		{
			for(pos.z=world_size.z*0.0f; pos.z<world_size.z*1.0f; pos.z+=(kernel[LEVEL1]*0.5f))
			{
				add_particle(pos, vel, LEVEL1);
			}
		}
	}

	/*for(pos.x=world_size.x*0.7f; pos.x<world_size.x*1.0f; pos.x+=(kernel[LEVEL2]*0.5f))
	{
		for(pos.y=world_size.y*0.0f; pos.y<world_size.y*1.0f; pos.y+=(kernel[LEVEL2]*0.5f))
		{
			for(pos.z=world_size.z*0.0f; pos.z<world_size.z*1.0f; pos.z+=(kernel[LEVEL2]*0.5f))
			{
				add_particle(pos, vel, 1);
			}
		}
	}*/

	printf("Init Particle: %u\n", num_particle);
}
Esempio n. 12
0
float Camera::world_to_raster_size(float3 P)
{
	if(type == CAMERA_ORTHOGRAPHIC) {
		return min(len(full_dx), len(full_dy));
	}
	else if(type == CAMERA_PERSPECTIVE) {
		/* Calculate as if point is directly ahead of the camera. */
		float3 raster = make_float3(0.5f*width, 0.5f*height, 0.0f);
		float3 Pcamera = transform_perspective(&rastertocamera, raster);

		/* dDdx */
		float3 Ddiff = transform_direction(&cameratoworld, Pcamera);
		float3 dx = len_squared(full_dx) < len_squared(full_dy) ? full_dx : full_dy;
		float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff);

		/* dPdx */
		float dist = len(transform_point(&worldtocamera, P));
		float3 D = normalize(Ddiff);
		return len(dist*dDdx - dot(dist*dDdx, D)*D);
	}
	else {
		// TODO(mai): implement for CAMERA_PANORAMA
		assert(!"pixel width calculation for panoramic projection not implemented yet");
	}

	return 1.0f;
}
void InteractiveCamera::buildRenderCamera(Camera* renderCamera){
	float xDirection = sin(yaw) * cos(pitch);
	float yDirection = sin(pitch);
	float zDirection = cos(yaw) * cos(pitch);
	glm::vec3 directionToCamera = glm::vec3(xDirection, yDirection, zDirection);
	glm::vec3 viewDirection = -directionToCamera;
	glm::vec3 eyePosition = centerPosition + directionToCamera * radius;

	renderCamera->position = make_float3(eyePosition[0], eyePosition[1], eyePosition[2]);
	renderCamera->view = make_float3(viewDirection[0], viewDirection[1], viewDirection[2]);
	renderCamera->up = make_float3(0, 1, 0);
	renderCamera->resolution = make_float2(resolution.x, resolution.y);
	renderCamera->fov = make_float2(fov.x, fov.y);
	renderCamera->apertureRadius = apertureRadius;
	renderCamera->focalDistance = radius;
}
Esempio n. 14
0
void PzPTest::setUp()
{
	holder.resize(3);

	float3 * pos = holder.pos.map( MEM::MISC::BUF_H );
	pos[0] = make_float3( 0 , 0 , 1 );
	pos[1] = make_float3( 0 , 0 ,-1 );
	pos[2] = make_float3( 0 , 0 ,-2 );
	holder.pos.unmap();

	float * rad = holder.radius.map( MEM::MISC::BUF_H );
	rad[0] = 1;
	rad[1] = 1;
	rad[2] = 1;
	holder.radius.unmap();
}
Esempio n. 15
0
static void flatten_background_closure_tree(ShaderData *sd,
                                            const OSL::ClosureColor *closure,
                                            float3 weight = make_float3(1.0f, 1.0f, 1.0f))
{
	/* OSL gives us a closure tree, if we are shading for background there
	 * is only one supported closure type at the moment, which has no evaluation
	 * functions, so we just sum the weights */

	switch(closure->id) {
		case OSL::ClosureColor::MUL: {
			OSL::ClosureMul *mul = (OSL::ClosureMul *)closure;
			flatten_background_closure_tree(sd, mul->closure, weight * TO_FLOAT3(mul->weight));
			break;
		}
		case OSL::ClosureColor::ADD: {
			OSL::ClosureAdd *add = (OSL::ClosureAdd *)closure;

			flatten_background_closure_tree(sd, add->closureA, weight);
			flatten_background_closure_tree(sd, add->closureB, weight);
			break;
		}
		default: {
			OSL::ClosureComponent *comp = (OSL::ClosureComponent *)closure;
			CClosurePrimitive *prim = (CClosurePrimitive *)comp->data();

			if(prim) {
#ifdef OSL_SUPPORTS_WEIGHTED_CLOSURE_COMPONENTS
				weight = weight*TO_FLOAT3(comp->w);
#endif
				prim->setup(sd, 0, weight);
			}
			break;
		}
	}
}
Esempio n. 16
0
  //! Function to transform a location
  __device__ float3 transform( const float3& rIn ) const
  {
    /*!
      Performs the transformation described by
      the class to the given input location
    */
    float r1[kVectorSize], r2[kVectorSize];

    // Set up in put affine vector
    r1[0] = rIn.x;
    r1[1] = rIn.y;
    r1[2] = rIn.z;
    r1[3] = 1;

    // Do the multiplication
#pragma unroll 4
    for( unsigned int i=0; i<kVectorSize; i++ )
    {
      r2[i] = 0;
#pragma unroll 4
      for( unsigned int j=0; j<kVectorSize; j++ )
      {
        r2[i] += this->operator() ( i, j ) * r1[j];
      }
    }

    // Construct float3 return value
    return( make_float3( r2[0], r2[1], r2[2] ) );
  }
Esempio n. 17
0
ParticleSystem::ParticleSystem(uint numParticles, bool bUseVBO, bool bUseGL) :
    m_bInitialized(false),
    m_bUseVBO(bUseVBO),
    m_numParticles(numParticles),
    m_particleRadius(0.1),
    m_doDepthSort(false),
    m_timer(NULL),
    m_time(0.0f)
{
    m_params.gravity = make_float3(0.0f, 0.0f, 0.0f);
    m_params.globalDamping = 1.0f;
    m_params.noiseSpeed = make_float3(0.0f, 0.0f, 0.0f);

    _initialize(numParticles, bUseGL);

}
Esempio n. 18
0
	void eval(float3 *P_, float3 *dPdu_, float3 *dPdv_, float u, float v)
	{
		OsdEvalCoords coords;
		coords.u = u;
		coords.v = v;
		coords.face = face_id;

		evalctrl.EvalLimitSample<OsdCpuVertexBuffer,OsdCpuVertexBuffer>(coords, evalctx, 0);

		*P_ = make_float3(P[0], P[1], P[2]);
		if (dPdu_) *dPdu_ = make_float3(dPdv[0], dPdv[1], dPdv[2]);
		if (dPdv_) *dPdv_ = make_float3(dPdu[0], dPdu[1], dPdu[2]);

		/* optimize: skip evaluating derivatives when not needed */
		/* todo: swapped derivatives, different winding convention? */
	}
Esempio n. 19
0
/******************************************************************************
 * Set light
 *
 * @param theta ...
 * @param phi ...
 ******************************************************************************/
void SampleViewer::setLight( float theta, float phi )
{
	// Retrieve cartesian coordinates from spheric ones
	_light[ 0 ] = sinf( theta ) * cosf( phi );
	_light[ 1 ] = cosf( theta );
	_light[ 2 ] = sinf( theta ) * sinf( phi );

	// Store theta and phi parameters
	_light[ 3 ] = theta;
	_light[ 4 ] = phi;

	// Express the light direction in the view coordinate system
	float3 lightDirInView = make_float3( _light[ 0 ], _light[ 1 ], _light[ 2 ] );

	// Retrieve the model view matrix
	float4x4 modelViewMatrix;
	glGetFloatv( GL_MODELVIEW_MATRIX, modelViewMatrix._array );

	// Reset translation part
	modelViewMatrix._array[ 12 ] = 0.f;
	modelViewMatrix._array[ 13 ] = 0.f;
	modelViewMatrix._array[ 14 ] = 0.f;
	modelViewMatrix._array[ 15 ] = 1.f;

	// Compute inverse matrix
	float4x4 invModelViewMatrix;
	invModelViewMatrix = transpose( modelViewMatrix );

	// Retrieve light direction in world coordinate system then normalize
	float3 lightDirInWorld = mulRot( invModelViewMatrix, lightDirInView );
	float3 lightDir = normalize( lightDirInWorld );

	// Update the GigaVoxels pipeline
	_sampleCore->setLightPosition( lightDir.x, lightDir.y, lightDir.z );
}
Esempio n. 20
0
float3
OpenSteer::SteerLibrary::
steerForCohesion (const AbstractVehicle& v, 
				  const float maxDistance,
                  const float cosMaxAngle,
                  const AVGroup& flock)
{
    // steering accumulator and count of neighbors, both initially zero
    float3 steering = float3_zero();
    int neighbors = 0;

    // for each of the other vehicles...
    for (AVIterator other = flock.begin(); other != flock.end(); other++)
    {
        if (inBoidNeighborhood (v, **other, v.radius() * 3, maxDistance, cosMaxAngle))
        {
            // accumulate sum of neighbor's positions
			steering = float3_add(steering, make_float3((**other).position()));

            // count neighbors
            neighbors++;
        }
    }

    // divide by neighbors, subtract off current position to get error-
    // correcting direction, then normalize to pure direction
    if (neighbors > 0)
		steering = float3_normalize(float3_subtract(float3_scalar_divide(steering, (float)neighbors), make_float3(v.position())));

    return steering;
}
Esempio n. 21
0
float3
OpenSteer::SteerLibrary::
steerForFlee (const AbstractVehicle& v, const float3& target)
{
    const float3 desiredVelocity = float3_subtract(make_float3(v.position()), target);
    return float3_subtract(desiredVelocity, v.velocity());
}
Esempio n. 22
0
void SetPixels(ScreenParams& params)
{
    float3 dir = params.look_at - make_float3(EYEX0, EYEY0, EYEZ0);
    dir = normalize(dir);

    float3 up = {Upx,Upy,Upz};
    float3 u = normalize(cross(up, dir));

    float3 v = cross(dir, u);

    float3 midScr = {EYEX0 + ScrToEye * dir.x, EYEY0 + ScrToEye * dir.y, EYEZ0 + ScrToEye * dir.z};

    float3 downScr;
    int shijiW = 16;
    int shijiH = 9;
    downScr = midScr - shijiH * v;

    params.screen_left_down = downScr - shijiW * u;
    params.screen_right_down = downScr + shijiW * u;

    float3 upScr = midScr + shijiH * v;

    params.screen_left_up = upScr - shijiW * u;
    params.screen_right_up = upScr + shijiW * u;

    params.pixel_right = (params.screen_right_down - params.screen_left_down) / ScrWidth;
    params.pixel_up = (params.screen_left_up - params.screen_left_down) / ScrHeight;
}
Esempio n. 23
0
  //! Routine to transform a float3 vector
  __device__ float3 transform( const float3& a ) const
  {
    float r1[kVectorSize], r2[kVectorSize];

    // Set up in put affine vector
    r1[0] = a.x;
    r1[1] = a.y;
    r1[2] = a.z;
    r1[3] = 1;

    // Do the multiplication
#pragma unroll 4
    for( unsigned int i=0; i<kVectorSize; i++ )
    {
      r2[i] = 0;
#pragma unroll 4
      for( unsigned int j=0; j<kVectorSize; j++ )
      {
        r2[i] += (*this)( i, j ) * r1[j];
      }
    }

    // Construct float3 return value
    return( make_float3( r2[0], r2[1], r2[2] ) );
  }
Esempio n. 24
0
void uploadToGPU()
{//upload scene to GPU
	std::vector<float4> triangles;
	std::vector<float4> spheres;
	std::vector<float4> boxes;
	world.updateToArray(triangles, spheres, boxes);

	size_t triangle_size = triangles.size() * sizeof(float4);
	size_t sphere_size = spheres.size() * sizeof(float4);
	size_t boxes_size = boxes.size() * sizeof(float4);

	//merge into one scene
	std::vector<float4> sceneObj;
	sceneObj.insert(sceneObj.end(), triangles.begin(), triangles.end());
	sceneObj.insert(sceneObj.end(), spheres.begin(), spheres.end());
	sceneObj.insert(sceneObj.end(), boxes.begin(), boxes.end());

	//if the scene is dynamic. this function might has to be updated per frame.
	//in that case should avoid mallocing every frame.
	cutilSafeCall(cudaMalloc((void**)&dev_scene_pointer, triangle_size + sphere_size + boxes_size));
	cudaMemcpy(dev_scene_pointer, &sceneObj[0], triangle_size + sphere_size + boxes_size, cudaMemcpyHostToDevice);//&triangles[0]
	bindTexture(dev_scene_pointer, triangles.size()/4, spheres.size()/3, boxes.size()/4);//change the denominator for more information to bind 

	//add a device framebuffer for last frame.
	std::vector<float3> clean(r_width * r_height, make_float3(0.0));
	//float zero = 0;
	cutilSafeCall(cudaMalloc((void**)&dev_lastframe_ptr, r_width * r_height * sizeof(float3)));
	cudaMemcpy(dev_lastframe_ptr, &clean[0], r_width * r_height * sizeof(float3), cudaMemcpyHostToDevice);
	cutilSafeCall(cudaMalloc((void**)&dev_num_layers, sizeof(int)));
	cudaMemcpy(dev_num_layers, &frame_num, sizeof(int), cudaMemcpyHostToDevice);
}
Esempio n. 25
0
BoundBox Camera::viewplane_bounds_get()
{
	/* TODO(sergey): This is all rather stupid, but is there a way to perform
	 * checks we need in a more clear and smart fasion?
	 */
	BoundBox bounds = BoundBox::empty;

	if(type == CAMERA_PANORAMA) {
		bounds.grow(make_float3(cameratoworld.w.x,
		                        cameratoworld.w.y,
		                        cameratoworld.w.z));
	}
	else {
		bounds.grow(transform_raster_to_world(0.0f, 0.0f));
		bounds.grow(transform_raster_to_world(0.0f, (float)height));
		bounds.grow(transform_raster_to_world((float)width, (float)height));
		bounds.grow(transform_raster_to_world((float)width, 0.0f));
		if(type == CAMERA_PERSPECTIVE) {
			/* Center point has the most distance in local Z axis,
			 * use it to construct bounding box/
			 */
			bounds.grow(transform_raster_to_world(0.5f*width, 0.5f*height));
		}
	}
	return bounds;
}
bool EnhancedIKSolver::solve_chain(int root_bone, int end_bone,
		const float3& position, int current_frame) {
	std::vector<int> indices;
	fill_chain(root_bone, end_bone, current_frame, indices);

	//Calculate in root_bone coordinate system where is the goal position
	osg::Matrix m;
	skeleton->get_node(root_bone)->get_node_world_matrix_origin(current_frame,
			m);
	osg::Vec3 position_osg(position.x, position.y, position.z);
	osg::Vec3 goal_position = position_osg * m;

	//Solve and update the rotations
	if (ik_solver.solve_chain(make_float3(goal_position._v))) {
		int j = 0;
		for (unsigned int i = 0; i < indices.size(); i++) {
			Node * node = skeleton->get_node(indices[i]);
			float4 new_rot;
			ik_solver.get_rotation_joint(j, new_rot);
			node->quat_arr.at(current_frame).set(new_rot.x, new_rot.y,
					new_rot.z, new_rot.w);
			j++;
		}
		return true;
	} else {
		return false;
	}
}
static float3 get_node_output_vector(BL::Node b_node, const string& name)
{
	BL::NodeSocket b_sock = get_node_output(b_node, name);
	float value[3];
	RNA_float_get_array(&b_sock.ptr, "default_value", value);
	return make_float3(value[0], value[1], value[2]);
}
optix::float3 DiffuseAmbientOccluded::doWeightedCosineSamplingWithEnvironmentSampling(const optix::Ray& r, HitInfo& hit, bool emit) const
{
	float3 rho_a = get_emission(hit);
	float3 rho_d = get_diffuse(hit);

	float3 avgUnoccluded = make_float3(0,0,0);
	int numOfUnoccluded = 0;

	const int numOfRays = 200;
	for(int i = 0 ; i < numOfRays ; i++)
	{
		Ray hemiRay = r;
		HitInfo hemiHit;
		if(sampleCosineWeightedHemisphere(hemiRay, hemiHit, hit))
		{
			avgUnoccluded += hemiRay.direction;
			numOfUnoccluded++;
		}
	}

	float3 averageNormal = normalize(avgUnoccluded / numOfUnoccluded);
	float accessability = ((float)numOfUnoccluded) / numOfRays;

	// sample the environment in the average unoccluded direction
	float3 hdrValue = tracer -> get_background(averageNormal);

	return M_1_PIf * accessability * rho_d * hdrValue;
}
Esempio n. 29
0
int DiagSplit::T(Patch *patch, float2 Pstart, float2 Pend)
{
	float3 Plast = make_float3(0.0f, 0.0f, 0.0f);
	float Lsum = 0.0f;
	float Lmax = 0.0f;

	for(int i = 0; i < params.test_steps; i++) {
		float t = i/(float)(params.test_steps-1);

		float3 P = project(patch, Pstart + t*(Pend - Pstart));

		if(i > 0) {
			float L = len(P - Plast);
			Lsum += L;
			Lmax = max(L, Lmax);
		}

		Plast = P;
	}

	int tmin = (int)ceil(Lsum/params.dicing_rate);
	int tmax = (int)ceil((params.test_steps-1)*Lmax/params.dicing_rate); // XXX paper says N instead of N-1, seems wrong?

	if(tmax - tmin > params.split_threshold)
		return DSPLIT_NON_UNIFORM;
	
	return tmax;
}
Esempio n. 30
0
static void flatten_volume_closure_tree(ShaderData *sd,
                                        const OSL::ClosureColor *closure,
                                        float3 weight = make_float3(1.0f, 1.0f, 1.0f))
{
	/* OSL gives us a closure tree, we flatten it into arrays per
	 * closure type, for evaluation, sampling, etc later on. */

	switch(closure->id) {
		case OSL::ClosureColor::MUL: {
			OSL::ClosureMul *mul = (OSL::ClosureMul *)closure;
			flatten_volume_closure_tree(sd, mul->closure, TO_FLOAT3(mul->weight) * weight);
			break;
		}
		case OSL::ClosureColor::ADD: {
			OSL::ClosureAdd *add = (OSL::ClosureAdd *)closure;
			flatten_volume_closure_tree(sd, add->closureA, weight);
			flatten_volume_closure_tree(sd, add->closureB, weight);
			break;
		}
		default: {
			OSL::ClosureComponent *comp = (OSL::ClosureComponent *)closure;
			CClosurePrimitive *prim = (CClosurePrimitive *)comp->data();

			if(prim) {
#ifdef OSL_SUPPORTS_WEIGHTED_CLOSURE_COMPONENTS
				weight = weight*TO_FLOAT3(comp->w);
#endif
				prim->setup(sd, 0, weight);
			}
		}
	}
}