Ejemplo n.º 1
0
void BVH::pack_triangle(int idx, float4 tri_verts[3])
{
	int tob = pack.prim_object[idx];
	assert(tob >= 0 && tob < objects.size());
	const Mesh *mesh = objects[tob]->mesh;

	int tidx = pack.prim_index[idx];
	Mesh::Triangle t = mesh->get_triangle(tidx);
	const float3 *vpos = &mesh->verts[0];
	float3 v0 = vpos[t.v[0]];
	float3 v1 = vpos[t.v[1]];
	float3 v2 = vpos[t.v[2]];

	tri_verts[0] = float3_to_float4(v0);
	tri_verts[1] = float3_to_float4(v1);
	tri_verts[2] = float3_to_float4(v2);
}
Ejemplo n.º 2
0
static float4 CurveSegmentMotionCV(ParticleCurveData *CData, int sys, int curve, int curvekey)
{
  const float3 ickey_loc = CData->curvekey_co[curvekey];
  const float curve_time = CData->curvekey_time[curvekey];
  const float curve_length = CData->curve_length[curve];
  float time = (curve_length > 0.0f) ? curve_time / curve_length : 0.0f;
  float radius = shaperadius(
      CData->psys_shape[sys], CData->psys_rootradius[sys], CData->psys_tipradius[sys], time);

  if (CData->psys_closetip[sys] &&
      (curvekey == CData->curve_firstkey[curve] + CData->curve_keynum[curve] - 1))
    radius = 0.0f;

  /* curve motion keys store both position and radius in float4 */
  float4 mP = float3_to_float4(ickey_loc);
  mP.w = radius;
  return mP;
}
Ejemplo n.º 3
0
void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
{
	Scene::MotionType need_motion = scene->need_motion(device->info.advanced_shading);

	update();

	if(previous_need_motion != need_motion) {
		/* scene's motion model could have been changed since previous device
		 * camera update this could happen for example in case when one render
		 * layer has got motion pass and another not */
		need_device_update = true;
	}

	if(!need_device_update)
		return;
	
	KernelCamera *kcam = &dscene->data.cam;

	/* store matrices */
	kcam->screentoworld = screentoworld;
	kcam->rastertoworld = rastertoworld;
	kcam->rastertocamera = rastertocamera;
	kcam->cameratoworld = cameratoworld;
	kcam->worldtocamera = worldtocamera;
	kcam->worldtoscreen = worldtoscreen;
	kcam->worldtoraster = worldtoraster;
	kcam->worldtondc = worldtondc;

	/* camera motion */
	kcam->have_motion = 0;

	if(need_motion == Scene::MOTION_PASS) {
		if(type == CAMERA_PANORAMA) {
			if(use_motion) {
				kcam->motion.pre = transform_inverse(motion.pre);
				kcam->motion.post = transform_inverse(motion.post);
			}
			else {
				kcam->motion.pre = kcam->worldtocamera;
				kcam->motion.post = kcam->worldtocamera;
			}
		}
		else {
			if(use_motion) {
				kcam->motion.pre = cameratoraster * transform_inverse(motion.pre);
				kcam->motion.post = cameratoraster * transform_inverse(motion.post);
			}
			else {
				kcam->motion.pre = worldtoraster;
				kcam->motion.post = worldtoraster;
			}
		}
	}
#ifdef __CAMERA_MOTION__
	else if(need_motion == Scene::MOTION_BLUR) {
		if(use_motion) {
			transform_motion_decompose((DecompMotionTransform*)&kcam->motion, &motion, &matrix);
			kcam->have_motion = 1;
		}
	}
#endif

	/* depth of field */
	kcam->aperturesize = aperturesize;
	kcam->focaldistance = focaldistance;
	kcam->blades = (blades < 3)? 0.0f: blades;
	kcam->bladesrotation = bladesrotation;

	/* motion blur */
#ifdef __CAMERA_MOTION__
	kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime: -1.0f;
#else
	kcam->shuttertime = -1.0f;
#endif

	/* type */
	kcam->type = type;

	/* anamorphic lens bokeh */
	kcam->inv_aperture_ratio = 1.0f / aperture_ratio;

	/* panorama */
	kcam->panorama_type = panorama_type;
	kcam->fisheye_fov = fisheye_fov;
	kcam->fisheye_lens = fisheye_lens;
	kcam->equirectangular_range = make_float4(longitude_min - longitude_max, -longitude_min,
	                                          latitude_min -  latitude_max, -latitude_min + M_PI_2_F);

	/* sensor size */
	kcam->sensorwidth = sensorwidth;
	kcam->sensorheight = sensorheight;

	/* render size */
	kcam->width = width;
	kcam->height = height;
	kcam->resolution = resolution;

	/* store differentials */
	kcam->dx = float3_to_float4(dx);
	kcam->dy = float3_to_float4(dy);

	/* clipping */
	kcam->nearclip = nearclip;
	kcam->cliplength = (farclip == FLT_MAX)? FLT_MAX: farclip - nearclip;

	/* Camera in volume. */
	kcam->is_inside_volume = 0;

	previous_need_motion = need_motion;
}
Ejemplo n.º 4
0
static void ExportCurveSegmentsMotion(Mesh *mesh, ParticleCurveData *CData, int motion_step)
{
  VLOG(1) << "Exporting curve motion segments for mesh " << mesh->name << ", motion step "
          << motion_step;

  /* find attribute */
  Attribute *attr_mP = mesh->curve_attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
  bool new_attribute = false;

  /* add new attribute if it doesn't exist already */
  if (!attr_mP) {
    VLOG(1) << "Creating new motion vertex position attribute";
    attr_mP = mesh->curve_attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
    new_attribute = true;
  }

  /* export motion vectors for curve keys */
  size_t numkeys = mesh->curve_keys.size();
  float4 *mP = attr_mP->data_float4() + motion_step * numkeys;
  bool have_motion = false;
  int i = 0;
  int num_curves = 0;

  for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
    for (int curve = CData->psys_firstcurve[sys];
         curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys];
         curve++) {
      /* Curve lengths may not match! Curves can be clipped. */
      int curve_key_end = (num_curves + 1 < (int)mesh->curve_first_key.size() ?
                               mesh->curve_first_key[num_curves + 1] :
                               (int)mesh->curve_keys.size());
      const int num_center_curve_keys = curve_key_end - mesh->curve_first_key[num_curves];
      const int is_num_keys_different = CData->curve_keynum[curve] - num_center_curve_keys;

      if (!is_num_keys_different) {
        for (int curvekey = CData->curve_firstkey[curve];
             curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve];
             curvekey++) {
          if (i < mesh->curve_keys.size()) {
            mP[i] = CurveSegmentMotionCV(CData, sys, curve, curvekey);
            if (!have_motion) {
              /* unlike mesh coordinates, these tend to be slightly different
               * between frames due to particle transforms into/out of object
               * space, so we use an epsilon to detect actual changes */
              float4 curve_key = float3_to_float4(mesh->curve_keys[i]);
              curve_key.w = mesh->curve_radius[i];
              if (len_squared(mP[i] - curve_key) > 1e-5f * 1e-5f)
                have_motion = true;
            }
          }
          i++;
        }
      }
      else {
        /* Number of keys has changed. Generate an interpolated version
         * to preserve motion blur. */
        const float step_size = num_center_curve_keys > 1 ? 1.0f / (num_center_curve_keys - 1) :
                                                            0.0f;
        for (int step_index = 0; step_index < num_center_curve_keys; ++step_index) {
          const float step = step_index * step_size;
          mP[i] = LerpCurveSegmentMotionCV(CData, sys, curve, step);
          i++;
        }
        have_motion = true;
      }
      num_curves++;
    }
  }

  /* in case of new attribute, we verify if there really was any motion */
  if (new_attribute) {
    if (i != numkeys || !have_motion) {
      /* No motion or hair "topology" changed, remove attributes again. */
      if (i != numkeys) {
        VLOG(1) << "Hair topology changed, removing attribute.";
      }
      else {
        VLOG(1) << "No motion, removing attribute.";
      }
      mesh->curve_attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
    }
    else if (motion_step > 0) {
      VLOG(1) << "Filling in new motion vertex position for motion_step " << motion_step;
      /* motion, fill up previous steps that we might have skipped because
       * they had no motion, but we need them anyway now */
      for (int step = 0; step < motion_step; step++) {
        float4 *mP = attr_mP->data_float4() + step * numkeys;

        for (int key = 0; key < numkeys; key++) {
          mP[key] = float3_to_float4(mesh->curve_keys[key]);
          mP[key].w = mesh->curve_radius[key];
        }
      }
    }
  }
}
Ejemplo n.º 5
0
static void ExportCurveSegmentsMotion(Mesh *mesh, ParticleCurveData *CData, int motion_step)
{
	VLOG(1) << "Exporting curve motion segments for mesh " << mesh->name
	        << ", motion step " << motion_step;

	/* find attribute */
	Attribute *attr_mP = mesh->curve_attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
	bool new_attribute = false;

	/* add new attribute if it doesn't exist already */
	if(!attr_mP) {
		VLOG(1) << "Creating new motion vertex position attribute";
		attr_mP = mesh->curve_attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
		new_attribute = true;
	}

	/* export motion vectors for curve keys */
	size_t numkeys = mesh->curve_keys.size();
	float4 *mP = attr_mP->data_float4() + motion_step*numkeys;
	bool have_motion = false;
	int i = 0;

	for(int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
		if(CData->psys_curvenum[sys] == 0)
			continue;

		for(int curve = CData->psys_firstcurve[sys]; curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys]; curve++) {
			if(CData->curve_keynum[curve] <= 1 || CData->curve_length[curve] == 0.0f)
				continue;

			for(int curvekey = CData->curve_firstkey[curve]; curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve]; curvekey++) {
				if(i < mesh->curve_keys.size()) {
					float3 ickey_loc = CData->curvekey_co[curvekey];
					float time = CData->curvekey_time[curvekey]/CData->curve_length[curve];
					float radius = shaperadius(CData->psys_shape[sys], CData->psys_rootradius[sys], CData->psys_tipradius[sys], time);

					if(CData->psys_closetip[sys] && (curvekey == CData->curve_firstkey[curve] + CData->curve_keynum[curve] - 1))
						radius = 0.0f;

					/* curve motion keys store both position and radius in float4 */
					mP[i] = float3_to_float4(ickey_loc);
					mP[i].w = radius;

					/* unlike mesh coordinates, these tend to be slightly different
					 * between frames due to particle transforms into/out of object
					 * space, so we use an epsilon to detect actual changes */
					float4 curve_key = float3_to_float4(mesh->curve_keys[i]);
					curve_key.w = mesh->curve_radius[i];
					if(len_squared(mP[i] - curve_key) > 1e-5f*1e-5f)
						have_motion = true;
				}

				i++;
			}
		}
	}

	/* in case of new attribute, we verify if there really was any motion */
	if(new_attribute) {
		if(i != numkeys || !have_motion) {
			/* No motion or hair "topology" changed, remove attributes again. */
			if(i != numkeys) {
				VLOG(1) << "Hair topology changed, removing attribute.";
			}
			else {
				VLOG(1) << "No motion, removing attribute.";
			}
			mesh->curve_attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
		}
		else if(motion_step > 0) {
			VLOG(1) << "Filling in new motion vertex position for motion_step "
			        << motion_step;
			/* motion, fill up previous steps that we might have skipped because
			 * they had no motion, but we need them anyway now */
			for(int step = 0; step < motion_step; step++) {
				float4 *mP = attr_mP->data_float4() + step*numkeys;

				for(int key = 0; key < numkeys; key++) {
					mP[key] = float3_to_float4(mesh->curve_keys[key]);
					mP[key].w = mesh->curve_radius[key];
				}
			}
		}
	}
}
Ejemplo n.º 6
0
void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
{
	Scene::MotionType need_motion = scene->need_motion(device->info.advanced_shading);

	update();

	if(previous_need_motion != need_motion) {
		/* scene's motion model could have been changed since previous device
		 * camera update this could happen for example in case when one render
		 * layer has got motion pass and another not */
		need_device_update = true;
	}

	if(!need_device_update)
		return;
	
	KernelCamera *kcam = &dscene->data.cam;

	/* store matrices */
	kcam->screentoworld = screentoworld;
	kcam->rastertoworld = rastertoworld;
	kcam->rastertocamera = rastertocamera;
	kcam->cameratoworld = cameratoworld;
	kcam->worldtocamera = worldtocamera;
	kcam->worldtoscreen = worldtoscreen;
	kcam->worldtoraster = worldtoraster;
	kcam->worldtondc = worldtondc;

	/* camera motion */
	kcam->have_motion = 0;
	kcam->have_perspective_motion = 0;

	if(need_motion == Scene::MOTION_PASS) {
		/* TODO(sergey): Support perspective (zoom, fov) motion. */
		if(type == CAMERA_PANORAMA) {
			if(use_motion) {
				kcam->motion.pre = transform_inverse(motion.pre);
				kcam->motion.post = transform_inverse(motion.post);
			}
			else {
				kcam->motion.pre = kcam->worldtocamera;
				kcam->motion.post = kcam->worldtocamera;
			}
		}
		else {
			if(use_motion) {
				kcam->motion.pre = cameratoraster * transform_inverse(motion.pre);
				kcam->motion.post = cameratoraster * transform_inverse(motion.post);
			}
			else {
				kcam->motion.pre = worldtoraster;
				kcam->motion.post = worldtoraster;
			}
		}
	}
#ifdef __CAMERA_MOTION__
	else if(need_motion == Scene::MOTION_BLUR) {
		if(use_motion) {
			transform_motion_decompose((DecompMotionTransform*)&kcam->motion, &motion, &matrix);
			kcam->have_motion = 1;
		}
		if(use_perspective_motion) {
			kcam->perspective_motion = perspective_motion;
			kcam->have_perspective_motion = 1;
		}
	}
#endif

	/* depth of field */
	kcam->aperturesize = aperturesize;
	kcam->focaldistance = focaldistance;
	kcam->blades = (blades < 3)? 0.0f: blades;
	kcam->bladesrotation = bladesrotation;

	/* motion blur */
#ifdef __CAMERA_MOTION__
	kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime: -1.0f;

	if(need_motion == Scene::MOTION_BLUR) {
		vector<float> shutter_table;
		util_cdf_inverted(SHUTTER_TABLE_SIZE,
		                  0.0f,
		                  1.0f,
		                  function_bind(shutter_curve_eval, _1, shutter_curve),
		                  false,
		                  shutter_table);
		shutter_table_offset = scene->lookup_tables->add_table(dscene,
		                                                       shutter_table);
		kcam->shutter_table_offset = (int)shutter_table_offset;
	}
	else if(shutter_table_offset != TABLE_OFFSET_INVALID) {
		scene->lookup_tables->remove_table(shutter_table_offset);
		shutter_table_offset = TABLE_OFFSET_INVALID;
	}
#else
	kcam->shuttertime = -1.0f;
#endif

	/* type */
	kcam->type = type;

	/* anamorphic lens bokeh */
	kcam->inv_aperture_ratio = 1.0f / aperture_ratio;

	/* panorama */
	kcam->panorama_type = panorama_type;
	kcam->fisheye_fov = fisheye_fov;
	kcam->fisheye_lens = fisheye_lens;
	kcam->equirectangular_range = make_float4(longitude_min - longitude_max, -longitude_min,
	                                          latitude_min -  latitude_max, -latitude_min + M_PI_2_F);

	switch(stereo_eye) {
		case STEREO_LEFT:
			kcam->interocular_offset = -interocular_distance * 0.5f;
			break;
		case STEREO_RIGHT:
			kcam->interocular_offset = interocular_distance * 0.5f;
			break;
		case STEREO_NONE:
		default:
			kcam->interocular_offset = 0.0f;
			break;
	}

	kcam->convergence_distance = convergence_distance;

	/* sensor size */
	kcam->sensorwidth = sensorwidth;
	kcam->sensorheight = sensorheight;

	/* render size */
	kcam->width = width;
	kcam->height = height;
	kcam->resolution = resolution;

	/* store differentials */
	kcam->dx = float3_to_float4(dx);
	kcam->dy = float3_to_float4(dy);

	/* clipping */
	kcam->nearclip = nearclip;
	kcam->cliplength = (farclip == FLT_MAX)? FLT_MAX: farclip - nearclip;

	/* Camera in volume. */
	kcam->is_inside_volume = 0;

	/* Rolling shutter effect */
	kcam->rolling_shutter_type = rolling_shutter_type;
	kcam->rolling_shutter_duration = rolling_shutter_duration;

	previous_need_motion = need_motion;
}