コード例 #1
0
ファイル: blender_object.cpp プロジェクト: wisaac407/blender
bool BlenderSync::object_is_mesh(BL::Object& b_ob)
{
	BL::ID b_ob_data = b_ob.data();

	if(!b_ob_data) {
		return false;
	}

	if(b_ob.type() == BL::Object::type_CURVE) {
		/* Skip exporting curves without faces, overhead can be
		 * significant if there are many for path animation. */
		BL::Curve b_curve(b_ob.data());

		return (b_curve.bevel_object() ||
		        b_curve.extrude() != 0.0f ||
		        b_curve.bevel_depth() != 0.0f ||
		        b_curve.dimensions() == BL::Curve::dimensions_2D ||
		        b_ob.modifiers.length());
	}
	else {
		return (b_ob_data.is_a(&RNA_Mesh) ||
		        b_ob_data.is_a(&RNA_Curve) ||
		        b_ob_data.is_a(&RNA_MetaBall));
	}
}
コード例 #2
0
static bool object_render_hide(BL::Object b_ob, bool top_level, bool parent_hide, bool& hide_triangles)
{
    /* check if we should render or hide particle emitter */
    BL::Object::particle_systems_iterator b_psys;

    bool hair_present = false;
    bool show_emitter = false;
    bool hide = false;

    for(b_ob.particle_systems.begin(b_psys); b_psys != b_ob.particle_systems.end(); ++b_psys) {
        if((b_psys->settings().render_type() == BL::ParticleSettings::render_type_PATH) &&
                (b_psys->settings().type()==BL::ParticleSettings::type_HAIR))
            hair_present = true;

        if(b_psys->settings().use_render_emitter()) {
            hide = false;
            show_emitter = true;
        }
    }

    /* duplicators hidden by default, except dupliframes which duplicate self */
    if(b_ob.is_duplicator())
        if(top_level || b_ob.dupli_type() != BL::Object::dupli_type_FRAMES)
            hide = true;

    /* hide original object for duplis */
    BL::Object parent = b_ob.parent();
    if(parent && object_render_hide_original(parent.dupli_type()))
        if(parent_hide)
            hide = true;

    hide_triangles = (hair_present && !show_emitter);
    return hide && !show_emitter;
}
コード例 #3
0
void BlenderSync::sync_camera_motion(BL::RenderSettings& b_render,
                                     BL::Object& b_ob,
                                     int width, int height,
                                     float motion_time)
{
	if(!b_ob)
		return;

	Camera *cam = scene->camera;
	BL::Array<float, 16> b_ob_matrix;
	b_engine.camera_model_matrix(b_ob, b_ob_matrix);
	Transform tfm = get_transform(b_ob_matrix);
	tfm = blender_camera_matrix(tfm, cam->type, cam->panorama_type);

	if(tfm != cam->matrix) {
		VLOG(1) << "Camera " << b_ob.name() << " motion detected.";
		if(motion_time == -1.0f) {
			cam->motion.pre = tfm;
			cam->use_motion = true;
		}
		else if(motion_time == 1.0f) {
			cam->motion.post = tfm;
			cam->use_motion = true;
		}
	}

	if(cam->type == CAMERA_PERSPECTIVE) {
		BlenderCamera bcam;
		float aspectratio, sensor_size;
		blender_camera_init(&bcam, b_render);

		blender_camera_from_object(&bcam, b_engine, b_ob);
		blender_camera_viewplane(&bcam,
		                         width, height,
		                         NULL,
		                         &aspectratio,
		                         &sensor_size);
		/* TODO(sergey): De-duplicate calculation with camera sync. */
		float fov = 2.0f * atanf((0.5f * sensor_size) / bcam.lens / aspectratio);
		if(fov != cam->fov) {
			VLOG(1) << "Camera " << b_ob.name() << " FOV change detected.";
			if(motion_time == -1.0f) {
				cam->fov_pre = fov;
				cam->use_perspective_motion = true;
			}
			else if(motion_time == 1.0f) {
				cam->fov_post = fov;
				cam->use_perspective_motion = true;
			}
		}
	}
}
コード例 #4
0
ファイル: blender_object.cpp プロジェクト: dezelin/blender
bool BlenderSync::object_is_mesh(BL::Object& b_ob)
{
	BL::ID b_ob_data = b_ob.data();

	return (b_ob_data && (b_ob_data.is_a(&RNA_Mesh) ||
		b_ob_data.is_a(&RNA_Curve) || b_ob_data.is_a(&RNA_MetaBall)));
}
コード例 #5
0
ファイル: blender_object.cpp プロジェクト: wisaac407/blender
CCL_NAMESPACE_BEGIN

/* Utilities */

bool BlenderSync::BKE_object_is_modified(BL::Object& b_ob)
{
	/* test if we can instance or if the object is modified */
	if(b_ob.type() == BL::Object::type_META) {
		/* multi-user and dupli metaballs are fused, can't instance */
		return true;
	}
	else if(ccl::BKE_object_is_modified(b_ob, b_scene, preview)) {
		/* modifiers */
		return true;
	}
	else {
		/* object level material links */
		BL::Object::material_slots_iterator slot;
		for(b_ob.material_slots.begin(slot); slot != b_ob.material_slots.end(); ++slot)
			if(slot->link() == BL::MaterialSlot::link_OBJECT)
				return true;
	}

	return false;
}
コード例 #6
0
void BlenderSession::bake(BL::Object b_object, const string& pass_type, BL::BakePixel pixel_array, int num_pixels, int depth, float result[])
{
	ShaderEvalType shader_type = get_shader_type(pass_type);
	size_t object_index = ~0;
	int tri_offset = 0;

	if(shader_type == SHADER_EVAL_UV) {
		/* force UV to be available */
		Pass::add(PASS_UV, scene->film->passes);
	}

	if(is_light_pass(shader_type)) {
		/* force use_light_pass to be true */
		Pass::add(PASS_LIGHT, scene->film->passes);
	}

	/* create device and update scene */
	scene->film->tag_update(scene);
	scene->integrator->tag_update(scene);

	/* update scene */
	sync->sync_camera(b_render, b_engine.camera_override(), width, height);
	sync->sync_data(b_v3d, b_engine.camera_override(), &python_thread_state);

	/* get buffer parameters */
	SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_scene, b_v3d, b_rv3d, scene->camera, width, height);

	/* set number of samples */
	session->tile_manager.set_samples(session_params.samples);
	session->reset(buffer_params, session_params.samples);
	session->update_scene();

	/* find object index. todo: is arbitrary - copied from mesh_displace.cpp */
	for(size_t i = 0; i < scene->objects.size(); i++) {
		if(strcmp(scene->objects[i]->name.c_str(), b_object.name().c_str()) == 0) {
			object_index = i;
			tri_offset = scene->objects[i]->mesh->tri_offset;
			break;
		}
	}

	/* when used, non-instanced convention: object = ~object */
	int object = ~object_index;

	BakeData *bake_data = scene->bake_init(object, tri_offset, num_pixels);

	populate_bake_data(bake_data, pixel_array, num_pixels);

	scene->bake(shader_type, bake_data, result);

	/* free all memory used (host and device), so we wouldn't leave render
	 * engine with extra memory allocated
	 */

	session->device_free();

	delete sync;
	sync = NULL;
}
コード例 #7
0
ファイル: blender_object.cpp プロジェクト: mik0001/Blender
void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint visibility)
{
	/* light is handled separately */
	if(object_is_light(b_ob)) {
		sync_light(b_parent, b_index, b_ob, tfm);
		return;
	}

	/* only interested in object that we can create meshes from */
	if(!object_is_mesh(b_ob))
		return;

	/* test if we need to sync */
	ObjectKey key(b_parent, b_index, b_ob);
	Object *object;
	bool object_updated = false;

	if(object_map.sync(&object, b_ob, b_parent, key))
		object_updated = true;
	
	/* mesh sync */
	object->mesh = sync_mesh(b_ob, object_updated);

	/* object sync */
	if(object_updated || (object->mesh && object->mesh->need_update)) {
		object->name = b_ob.name().c_str();
		object->tfm = tfm;
		
		object->visibility = object_ray_visibility(b_ob) & visibility;
		if(b_parent.ptr.data != b_ob.ptr.data)
			object->visibility &= object_ray_visibility(b_parent);

		object->tag_update(scene);
	}
}
コード例 #8
0
void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint layer_flag)
{
	/* light is handled separately */
	if(object_is_light(b_ob)) {
		sync_light(b_parent, b_index, b_ob, tfm);
		return;
	}

	/* only interested in object that we can create meshes from */
	if(!object_is_mesh(b_ob))
		return;

	/* test if we need to sync */
	ObjectKey key(b_parent, b_index, b_ob);
	Object *object;
	bool object_updated = false;

	if(object_map.sync(&object, b_ob, b_parent, key))
		object_updated = true;
	
	/* holdout? */
	bool holdout = (layer_flag & render_layer.holdout_layer) != 0;

	/* mesh sync */
	object->mesh = sync_mesh(b_ob, holdout, object_updated);

	/* object sync */
	if(object_updated || (object->mesh && object->mesh->need_update)) {
		object->name = b_ob.name().c_str();
		object->pass_id = b_ob.pass_index();
		object->tfm = tfm;

		/* visibility flags for both parent */
		object->visibility = object_ray_visibility(b_ob) & PATH_RAY_ALL;
		if(b_parent.ptr.data != b_ob.ptr.data)
			object->visibility &= object_ray_visibility(b_parent);

		/* camera flag is not actually used, instead is tested
		   against render layer flags */
		if(object->visibility & PATH_RAY_CAMERA) {
			object->visibility |= layer_flag << PATH_RAY_LAYER_SHIFT;
			object->visibility &= ~PATH_RAY_CAMERA;
		}

		object->tag_update(scene);
	}
}
コード例 #9
0
ファイル: blender_mesh.cpp プロジェクト: diekev/blender
static void create_subd_mesh(Scene *scene,
                             Mesh *mesh,
                             BL::Object& b_ob,
                             BL::Mesh& b_mesh,
                             const vector<Shader*>& used_shaders,
                             float dicing_rate,
                             int max_subdivisions)
{
	BL::SubsurfModifier subsurf_mod(b_ob.modifiers[b_ob.modifiers.length()-1]);
	bool subdivide_uvs = subsurf_mod.use_subsurf_uv();

	create_mesh(scene, mesh, b_mesh, used_shaders, true, subdivide_uvs);

	/* export creases */
	size_t num_creases = 0;
	BL::Mesh::edges_iterator e;

	for(b_mesh.edges.begin(e); e != b_mesh.edges.end(); ++e) {
		if(e->crease() != 0.0f) {
			num_creases++;
		}
	}

	mesh->subd_creases.resize(num_creases);

	Mesh::SubdEdgeCrease* crease = mesh->subd_creases.data();
	for(b_mesh.edges.begin(e); e != b_mesh.edges.end(); ++e) {
		if(e->crease() != 0.0f) {
			crease->v[0] = e->vertices()[0];
			crease->v[1] = e->vertices()[1];
			crease->crease = e->crease();

			crease++;
		}
	}

	/* set subd params */
	if(!mesh->subd_params) {
		mesh->subd_params = new SubdParams(mesh);
	}
	SubdParams& sdparams = *mesh->subd_params;

	PointerRNA cobj = RNA_pointer_get(&b_ob.ptr, "cycles");

	sdparams.dicing_rate = max(0.1f, RNA_float_get(&cobj, "dicing_rate") * dicing_rate);
	sdparams.max_level = max_subdivisions;

	scene->camera->update();
	sdparams.camera = scene->camera;
	sdparams.objecttoworld = get_transform(b_ob.matrix_world());
}
コード例 #10
0
static float blender_camera_focal_distance(BL::Object b_ob, BL::Camera b_camera)
{
	BL::Object b_dof_object = b_camera.dof_object();

	if(!b_dof_object)
		return b_camera.dof_distance();
	
	/* for dof object, return distance along camera Z direction */
	Transform obmat = transform_clear_scale(get_transform(b_ob.matrix_world()));
	Transform dofmat = get_transform(b_dof_object.matrix_world());
	Transform mat = transform_inverse(obmat) * dofmat;

	return fabsf(transform_get_column(&mat, 3).z);
}
コード例 #11
0
void BlenderSync::sync_camera_motion(BL::Object b_ob, int motion)
{
	Camera *cam = scene->camera;

	Transform tfm = get_transform(b_ob.matrix_world());
	tfm = blender_camera_matrix(tfm, cam->type);

	if(tfm != cam->matrix) {
		if(motion == -1)
			cam->motion.pre = tfm;
		else
			cam->motion.post = tfm;

		cam->use_motion = true;
	}
}
コード例 #12
0
ファイル: blender_object.cpp プロジェクト: diekev/blender
/* TODO(sergey): Not really optimal, consider approaches based on k-DOP in order
 * to reduce number of objects which are wrongly considered visible.
 */
static bool object_boundbox_clip(Scene *scene,
                                 BL::Object& b_ob,
                                 Transform& tfm,
                                 float margin)
{
	Camera *cam = scene->camera;
	Transform& worldtondc = cam->worldtondc;
	BL::Array<float, 24> boundbox = b_ob.bound_box();
	float3 bb_min = make_float3(FLT_MAX, FLT_MAX, FLT_MAX),
	       bb_max = make_float3(-FLT_MAX, -FLT_MAX, -FLT_MAX);
	bool all_behind = true;
	for(int i = 0; i < 8; ++i) {
		float3 p = make_float3(boundbox[3 * i + 0],
		                       boundbox[3 * i + 1],
		                       boundbox[3 * i + 2]);
		p = transform_point(&tfm, p);

		float4 b = make_float4(p.x, p.y, p.z, 1.0f);
		float4 c = make_float4(dot(worldtondc.x, b),
		                       dot(worldtondc.y, b),
		                       dot(worldtondc.z, b),
		                       dot(worldtondc.w, b));
		p = float4_to_float3(c / c.w);
		if(c.z < 0.0f) {
			p.x = 1.0f - p.x;
			p.y = 1.0f - p.y;
		}
		if(c.z >= -margin) {
			all_behind = false;
		}
		bb_min = min(bb_min, p);
		bb_max = max(bb_max, p);
	}
	if(!all_behind) {
		if(bb_min.x >= 1.0f + margin ||
		   bb_min.y >= 1.0f + margin ||
		   bb_max.x <= -margin ||
		   bb_max.y <= -margin)
		{
			return true;
		}
		return false;
	}
	return true;
}
コード例 #13
0
OCT_NAMESPACE_BEGIN

//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Sync hair data
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void BlenderSync::sync_hair(Mesh *mesh, BL::Mesh b_mesh, BL::Object b_ob, bool motion, int time_index) {
    if(!motion) {
        mesh->hair_points.clear();
        mesh->vert_per_hair.clear();
        mesh->hair_thickness.clear();
        mesh->hair_mat_indices.clear();
        mesh->hair_uvs.clear();
    }

    if(b_ob.mode() == b_ob.mode_PARTICLE_EDIT) return;

    fill_mesh_hair_data(mesh, &b_mesh, &b_ob);
} //sync_hair()
コード例 #14
0
void BlenderSync::sync_camera_motion(BL::Object b_ob, float motion_time)
{
	Camera *cam = scene->camera;
	BL::Array<float, 16> b_ob_matrix;
	b_engine.camera_model_matrix(b_ob, b_ob_matrix);
	Transform tfm = get_transform(b_ob_matrix);
	tfm = blender_camera_matrix(tfm, cam->type);

	if(tfm != cam->matrix) {
		VLOG(1) << "Camera " << b_ob.name() << " motion detected.";
		if(motion_time == -1.0f) {
			cam->motion.pre = tfm;
			cam->use_motion = true;
		}
		else if(motion_time == 1.0f) {
			cam->motion.post = tfm;
			cam->use_motion = true;
		}
	}
}
コード例 #15
0
bool BlenderObjectCulling::test(Scene *scene, BL::Object& b_ob, Transform& tfm)
{
	if(!use_camera_cull_ && !use_distance_cull_) {
		return false;
	}

	/* Compute world space bounding box corners. */
	float3 bb[8];
	BL::Array<float, 24> boundbox = b_ob.bound_box();
	for(int i = 0; i < 8; ++i) {
		float3 p = make_float3(boundbox[3 * i + 0],
		                       boundbox[3 * i + 1],
		                       boundbox[3 * i + 2]);
		bb[i] = transform_point(&tfm, p);
	}

	bool camera_culled = use_camera_cull_ && test_camera(scene, bb);
	bool distance_culled = use_distance_cull_ && test_distance(scene, bb);

	return ((camera_culled && distance_culled) ||
	        (camera_culled && !use_distance_cull_) ||
	        (distance_culled && !use_camera_cull_));
}
コード例 #16
0
ファイル: blender_object.cpp プロジェクト: Moguri/blender
/* TODO(sergey): Not really optimal, consider approaches based on k-DOP in order
 * to reduce number of objects which are wrongly considered visible.
 */
static bool object_boundbox_clip(Scene *scene,
                                 BL::Object b_ob,
                                 Transform& tfm,
                                 float margin)
{
	Camera *cam = scene->camera;
	Transform& worldtondc = cam->worldtondc;
	BL::Array<float, 24> boundbox = b_ob.bound_box();
	float3 bb_min = make_float3(FLT_MAX, FLT_MAX, FLT_MAX),
	       bb_max = make_float3(-FLT_MAX, -FLT_MAX, -FLT_MAX);
	bool all_behind = true;
	for(int i = 0; i < 8; ++i) {
		float3 p = make_float3(boundbox[3 * i + 0],
		                       boundbox[3 * i + 1],
		                       boundbox[3 * i + 2]);
		p = transform_point(&tfm, p);
		p = transform_point(&worldtondc, p);
		if(p.z >= -margin) {
			all_behind = false;
		}
		p /= p.z;
		bb_min = min(bb_min, p);
		bb_max = max(bb_max, p);
	}
	if(!all_behind) {
		if(bb_min.x >= 1.0f + margin ||
		   bb_min.y >= 1.0f + margin ||
		   bb_max.x <= -margin ||
		   bb_max.y <= -margin)
		{
			return true;
		}
		return false;
	}
	return true;
}
コード例 #17
0
static void create_subd_mesh(Scene *scene,
                             Mesh *mesh,
                             BL::Object& b_ob,
                             BL::Mesh& b_mesh,
                             PointerRNA *cmesh,
                             const vector<Shader*>& used_shaders,
                             float dicing_rate,
                             int max_subdivisions)
{
	Mesh basemesh;
	create_mesh(scene, &basemesh, b_mesh, used_shaders);

	SubdParams sdparams(mesh, 0, true, false);
	sdparams.dicing_rate = max(0.1f, RNA_float_get(cmesh, "dicing_rate") * dicing_rate);
	sdparams.max_level = max_subdivisions;

	scene->camera->update();
	sdparams.camera = scene->camera;
	sdparams.objecttoworld = get_transform(b_ob.matrix_world());

	/* tesselate */
	DiagSplit dsplit(sdparams);
	basemesh.tessellate(&dsplit);
}
コード例 #18
0
ファイル: blender_curves.cpp プロジェクト: dfelinto/blender
void BlenderSync::sync_curves(
    Mesh *mesh, BL::Mesh &b_mesh, BL::Object &b_ob, bool motion, int motion_step)
{
  if (!motion) {
    /* Clear stored curve data */
    mesh->curve_keys.clear();
    mesh->curve_radius.clear();
    mesh->curve_first_key.clear();
    mesh->curve_shader.clear();
    mesh->curve_attributes.clear();
  }

  /* obtain general settings */
  const bool use_curves = scene->curve_system_manager->use_curves;

  if (!(use_curves && b_ob.mode() != b_ob.mode_PARTICLE_EDIT && b_ob.mode() != b_ob.mode_EDIT)) {
    if (!motion)
      mesh->compute_bounds();
    return;
  }

  const int primitive = scene->curve_system_manager->primitive;
  const int triangle_method = scene->curve_system_manager->triangle_method;
  const int resolution = scene->curve_system_manager->resolution;
  const size_t vert_num = mesh->verts.size();
  const size_t tri_num = mesh->num_triangles();
  int used_res = 1;

  /* extract particle hair data - should be combined with connecting to mesh later*/

  ParticleCurveData CData;

  ObtainCacheParticleData(mesh, &b_mesh, &b_ob, &CData, !preview);

  /* add hair geometry to mesh */
  if (primitive == CURVE_TRIANGLES) {
    if (triangle_method == CURVE_CAMERA_TRIANGLES) {
      /* obtain camera parameters */
      float3 RotCam;
      Camera *camera = scene->camera;
      Transform &ctfm = camera->matrix;
      if (camera->type == CAMERA_ORTHOGRAPHIC) {
        RotCam = -make_float3(ctfm.x.z, ctfm.y.z, ctfm.z.z);
      }
      else {
        Transform tfm = get_transform(b_ob.matrix_world());
        Transform itfm = transform_quick_inverse(tfm);
        RotCam = transform_point(&itfm, make_float3(ctfm.x.w, ctfm.y.w, ctfm.z.w));
      }
      bool is_ortho = camera->type == CAMERA_ORTHOGRAPHIC;
      ExportCurveTrianglePlanes(mesh, &CData, RotCam, is_ortho);
    }
    else {
      ExportCurveTriangleGeometry(mesh, &CData, resolution);
      used_res = resolution;
    }
  }
  else {
    if (motion)
      ExportCurveSegmentsMotion(mesh, &CData, motion_step);
    else
      ExportCurveSegments(scene, mesh, &CData);
  }

  /* generated coordinates from first key. we should ideally get this from
   * blender to handle deforming objects */
  if (!motion) {
    if (mesh->need_attribute(scene, ATTR_STD_GENERATED)) {
      float3 loc, size;
      mesh_texture_space(b_mesh, loc, size);

      if (primitive == CURVE_TRIANGLES) {
        Attribute *attr_generated = mesh->attributes.add(ATTR_STD_GENERATED);
        float3 *generated = attr_generated->data_float3();

        for (size_t i = vert_num; i < mesh->verts.size(); i++)
          generated[i] = mesh->verts[i] * size - loc;
      }
      else {
        Attribute *attr_generated = mesh->curve_attributes.add(ATTR_STD_GENERATED);
        float3 *generated = attr_generated->data_float3();

        for (size_t i = 0; i < mesh->num_curves(); i++) {
          float3 co = mesh->curve_keys[mesh->get_curve(i).first_key];
          generated[i] = co * size - loc;
        }
      }
    }
  }

  /* create vertex color attributes */
  if (!motion) {
    BL::Mesh::vertex_colors_iterator l;
    int vcol_num = 0;

    for (b_mesh.vertex_colors.begin(l); l != b_mesh.vertex_colors.end(); ++l, vcol_num++) {
      if (!mesh->need_attribute(scene, ustring(l->name().c_str())))
        continue;

      ObtainCacheParticleVcol(mesh, &b_mesh, &b_ob, &CData, !preview, vcol_num);

      if (primitive == CURVE_TRIANGLES) {
        Attribute *attr_vcol = mesh->attributes.add(
            ustring(l->name().c_str()), TypeDesc::TypeColor, ATTR_ELEMENT_CORNER_BYTE);

        uchar4 *cdata = attr_vcol->data_uchar4();

        ExportCurveTriangleVcol(&CData, tri_num * 3, used_res, cdata);
      }
      else {
        Attribute *attr_vcol = mesh->curve_attributes.add(
            ustring(l->name().c_str()), TypeDesc::TypeColor, ATTR_ELEMENT_CURVE);

        float3 *fdata = attr_vcol->data_float3();

        if (fdata) {
          size_t i = 0;

          /* Encode vertex color using the sRGB curve. */
          for (size_t curve = 0; curve < CData.curve_vcol.size(); curve++) {
            fdata[i++] = color_srgb_to_linear_v3(CData.curve_vcol[curve]);
          }
        }
      }
    }
  }

  /* create UV attributes */
  if (!motion) {
    BL::Mesh::uv_layers_iterator l;
    int uv_num = 0;

    for (b_mesh.uv_layers.begin(l); l != b_mesh.uv_layers.end(); ++l, uv_num++) {
      bool active_render = l->active_render();
      AttributeStandard std = (active_render) ? ATTR_STD_UV : ATTR_STD_NONE;
      ustring name = ustring(l->name().c_str());

      /* UV map */
      if (mesh->need_attribute(scene, name) || mesh->need_attribute(scene, std)) {
        Attribute *attr_uv;

        ObtainCacheParticleUV(mesh, &b_mesh, &b_ob, &CData, !preview, uv_num);

        if (primitive == CURVE_TRIANGLES) {
          if (active_render)
            attr_uv = mesh->attributes.add(std, name);
          else
            attr_uv = mesh->attributes.add(name, TypeFloat2, ATTR_ELEMENT_CORNER);

          float2 *uv = attr_uv->data_float2();

          ExportCurveTriangleUV(&CData, tri_num * 3, used_res, uv);
        }
        else {
          if (active_render)
            attr_uv = mesh->curve_attributes.add(std, name);
          else
            attr_uv = mesh->curve_attributes.add(name, TypeFloat2, ATTR_ELEMENT_CURVE);

          float2 *uv = attr_uv->data_float2();

          if (uv) {
            size_t i = 0;

            for (size_t curve = 0; curve < CData.curve_uv.size(); curve++) {
              uv[i++] = CData.curve_uv[curve];
            }
          }
        }
      }
    }
  }

  mesh->compute_bounds();
}
コード例 #19
0
static void blender_camera_from_object(BlenderCamera *bcam,
                                       BL::RenderEngine& b_engine,
                                       BL::Object& b_ob,
                                       bool skip_panorama = false)
{
	BL::ID b_ob_data = b_ob.data();

	if(b_ob_data.is_a(&RNA_Camera)) {
		BL::Camera b_camera(b_ob_data);
		PointerRNA ccamera = RNA_pointer_get(&b_camera.ptr, "cycles");

		bcam->nearclip = b_camera.clip_start();
		bcam->farclip = b_camera.clip_end();

		switch(b_camera.type())
		{
			case BL::Camera::type_ORTHO:
				bcam->type = CAMERA_ORTHOGRAPHIC;
				break;
			case BL::Camera::type_PANO:
				if(!skip_panorama)
					bcam->type = CAMERA_PANORAMA;
				else
					bcam->type = CAMERA_PERSPECTIVE;
				break;
			case BL::Camera::type_PERSP:
			default:
				bcam->type = CAMERA_PERSPECTIVE;
				break;
		}	

		switch(RNA_enum_get(&ccamera, "panorama_type"))
		{
			case 1:
				bcam->panorama_type = PANORAMA_FISHEYE_EQUIDISTANT;
				break;
			case 2:
				bcam->panorama_type = PANORAMA_FISHEYE_EQUISOLID;
				break;
			case 3:
				bcam->panorama_type = PANORAMA_MIRRORBALL;
				break;
			case 0:
			default:
				bcam->panorama_type = PANORAMA_EQUIRECTANGULAR;
				break;
		}	

		bcam->fisheye_fov = RNA_float_get(&ccamera, "fisheye_fov");
		bcam->fisheye_lens = RNA_float_get(&ccamera, "fisheye_lens");
		bcam->latitude_min = RNA_float_get(&ccamera, "latitude_min");
		bcam->latitude_max = RNA_float_get(&ccamera, "latitude_max");
		bcam->longitude_min = RNA_float_get(&ccamera, "longitude_min");
		bcam->longitude_max = RNA_float_get(&ccamera, "longitude_max");

		bcam->ortho_scale = b_camera.ortho_scale();

		bcam->lens = b_camera.lens();

		/* allow f/stop number to change aperture_size but still
		 * give manual control over aperture radius */
		int aperture_type = RNA_enum_get(&ccamera, "aperture_type");

		if(aperture_type == 1) {
			float fstop = RNA_float_get(&ccamera, "aperture_fstop");
			fstop = max(fstop, 1e-5f);

			if(bcam->type == CAMERA_ORTHOGRAPHIC)
				bcam->aperturesize = 1.0f/(2.0f*fstop);
			else
				bcam->aperturesize = (bcam->lens*1e-3f)/(2.0f*fstop);
		}
		else
			bcam->aperturesize = RNA_float_get(&ccamera, "aperture_size");

		bcam->apertureblades = RNA_int_get(&ccamera, "aperture_blades");
		bcam->aperturerotation = RNA_float_get(&ccamera, "aperture_rotation");
		bcam->focaldistance = blender_camera_focal_distance(b_engine, b_ob, b_camera);
		bcam->aperture_ratio = RNA_float_get(&ccamera, "aperture_ratio");

		bcam->shift.x = b_engine.camera_shift_x(b_ob);
		bcam->shift.y = b_camera.shift_y();

		bcam->sensor_width = b_camera.sensor_width();
		bcam->sensor_height = b_camera.sensor_height();

		if(b_camera.sensor_fit() == BL::Camera::sensor_fit_AUTO)
			bcam->sensor_fit = BlenderCamera::AUTO;
		else if(b_camera.sensor_fit() == BL::Camera::sensor_fit_HORIZONTAL)
			bcam->sensor_fit = BlenderCamera::HORIZONTAL;
		else
			bcam->sensor_fit = BlenderCamera::VERTICAL;
	}
	else {
		/* from lamp not implemented yet */
	}
}
コード例 #20
0
ファイル: blender_mesh.cpp プロジェクト: dfelinto/blender
void BlenderSync::sync_mesh_motion(BL::Depsgraph &b_depsgraph,
                                   BL::Object &b_ob,
                                   Object *object,
                                   float motion_time)
{
  /* ensure we only sync instanced meshes once */
  Mesh *mesh = object->mesh;

  if (mesh_motion_synced.find(mesh) != mesh_motion_synced.end())
    return;

  mesh_motion_synced.insert(mesh);

  /* ensure we only motion sync meshes that also had mesh synced, to avoid
   * unnecessary work and to ensure that its attributes were clear */
  if (mesh_synced.find(mesh) == mesh_synced.end())
    return;

  /* Find time matching motion step required by mesh. */
  int motion_step = mesh->motion_step(motion_time);
  if (motion_step < 0) {
    return;
  }

  /* skip empty meshes */
  const size_t numverts = mesh->verts.size();
  const size_t numkeys = mesh->curve_keys.size();

  if (!numverts && !numkeys)
    return;

  /* skip objects without deforming modifiers. this is not totally reliable,
   * would need a more extensive check to see which objects are animated */
  BL::Mesh b_mesh(PointerRNA_NULL);

  /* fluid motion is exported immediate with mesh, skip here */
  BL::DomainFluidSettings b_fluid_domain = object_fluid_domain_find(b_ob);
  if (b_fluid_domain)
    return;

  if (ccl::BKE_object_is_deform_modified(b_ob, b_scene, preview)) {
    /* get derived mesh */
    b_mesh = object_to_mesh(b_data, b_ob, b_depsgraph, false, Mesh::SUBDIVISION_NONE);
  }

  if (!b_mesh) {
    /* if we have no motion blur on this frame, but on other frames, copy */
    if (numverts) {
      /* triangles */
      Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);

      if (attr_mP) {
        Attribute *attr_mN = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_NORMAL);
        Attribute *attr_N = mesh->attributes.find(ATTR_STD_VERTEX_NORMAL);
        float3 *P = &mesh->verts[0];
        float3 *N = (attr_N) ? attr_N->data_float3() : NULL;

        memcpy(attr_mP->data_float3() + motion_step * numverts, P, sizeof(float3) * numverts);
        if (attr_mN)
          memcpy(attr_mN->data_float3() + motion_step * numverts, N, sizeof(float3) * numverts);
      }
    }

    if (numkeys) {
      /* curves */
      Attribute *attr_mP = mesh->curve_attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);

      if (attr_mP) {
        float3 *keys = &mesh->curve_keys[0];
        memcpy(attr_mP->data_float3() + motion_step * numkeys, keys, sizeof(float3) * numkeys);
      }
    }

    return;
  }

  /* TODO(sergey): Perform preliminary check for number of verticies. */
  if (numverts) {
    /* Find attributes. */
    Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
    Attribute *attr_mN = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_NORMAL);
    Attribute *attr_N = mesh->attributes.find(ATTR_STD_VERTEX_NORMAL);
    bool new_attribute = false;
    /* Add new attributes if they don't exist already. */
    if (!attr_mP) {
      attr_mP = mesh->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
      if (attr_N)
        attr_mN = mesh->attributes.add(ATTR_STD_MOTION_VERTEX_NORMAL);

      new_attribute = true;
    }
    /* Load vertex data from mesh. */
    float3 *mP = attr_mP->data_float3() + motion_step * numverts;
    float3 *mN = (attr_mN) ? attr_mN->data_float3() + motion_step * numverts : NULL;
    /* NOTE: We don't copy more that existing amount of vertices to prevent
     * possible memory corruption.
     */
    BL::Mesh::vertices_iterator v;
    int i = 0;
    for (b_mesh.vertices.begin(v); v != b_mesh.vertices.end() && i < numverts; ++v, ++i) {
      mP[i] = get_float3(v->co());
      if (mN)
        mN[i] = get_float3(v->normal());
    }
    if (new_attribute) {
      /* In case of new attribute, we verify if there really was any motion. */
      if (b_mesh.vertices.length() != numverts ||
          memcmp(mP, &mesh->verts[0], sizeof(float3) * numverts) == 0) {
        /* no motion, remove attributes again */
        if (b_mesh.vertices.length() != numverts) {
          VLOG(1) << "Topology differs, disabling motion blur for object " << b_ob.name();
        }
        else {
          VLOG(1) << "No actual deformation motion for object " << b_ob.name();
        }
        mesh->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
        if (attr_mN)
          mesh->attributes.remove(ATTR_STD_MOTION_VERTEX_NORMAL);
      }
      else if (motion_step > 0) {
        VLOG(1) << "Filling deformation motion for object " << b_ob.name();
        /* motion, fill up previous steps that we might have skipped because
         * they had no motion, but we need them anyway now */
        float3 *P = &mesh->verts[0];
        float3 *N = (attr_N) ? attr_N->data_float3() : NULL;
        for (int step = 0; step < motion_step; step++) {
          memcpy(attr_mP->data_float3() + step * numverts, P, sizeof(float3) * numverts);
          if (attr_mN)
            memcpy(attr_mN->data_float3() + step * numverts, N, sizeof(float3) * numverts);
        }
      }
    }
    else {
      if (b_mesh.vertices.length() != numverts) {
        VLOG(1) << "Topology differs, discarding motion blur for object " << b_ob.name()
                << " at time " << motion_step;
        memcpy(mP, &mesh->verts[0], sizeof(float3) * numverts);
        if (mN != NULL) {
          memcpy(mN, attr_N->data_float3(), sizeof(float3) * numverts);
        }
      }
    }
  }

  /* hair motion */
  if (numkeys)
    sync_curves(mesh, b_mesh, b_ob, true, motion_step);

  /* free derived mesh */
  free_object_to_mesh(b_data, b_ob, b_mesh);
}
コード例 #21
0
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Fill the Octane Camera properties from Blender data
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void BlenderSync::load_camera_from_object(Camera* cam, BL::Object b_ob, int width, int height, float2& offset, bool skip_panorama) {
    BL::ID b_ob_data    = b_ob.data();

    if(b_ob_data.is_a(&RNA_Camera)) {
        BL::Camera b_camera(b_ob_data);
        PointerRNA oct_camera = RNA_pointer_get(&b_camera.ptr, "octane");

        switch(b_camera.type()) {
            case BL::Camera::type_ORTHO:
                cam->type   = CAMERA_PERSPECTIVE;
                cam->ortho  = true;
                break;
            case BL::Camera::type_PANO:
                if(!skip_panorama)
                    cam->type = CAMERA_PANORAMA;
                else
                    cam->type = CAMERA_PERSPECTIVE;
                cam->ortho = false;
                break;
            case BL::Camera::type_PERSP:
            default:
                cam->type   = CAMERA_PERSPECTIVE;
                cam->ortho  = false;
                break;
        }	
        cam->near_clip_depth = b_camera.clip_start();
        cam->far_clip_depth  = b_camera.clip_end();
        cam->set_focal_depth(b_ob, b_camera);

        get_cam_settings(cam, oct_camera);

        cam->lens_shift_x   = b_camera.shift_x() / cam->zoom;
        cam->lens_shift_y   = b_camera.shift_y() / cam->zoom;

        cam->sensorwidth    = b_camera.sensor_width();
        cam->sensorheight   = b_camera.sensor_height();

        cam->offset_x = offset.x * 2.0f / cam->zoom;
        cam->offset_y = offset.y * 2.0f / cam->zoom;

        if(b_camera.sensor_fit() == BL::Camera::sensor_fit_AUTO) cam->sensor_fit = Camera::AUTO;
        else if(b_camera.sensor_fit() == BL::Camera::sensor_fit_HORIZONTAL) cam->sensor_fit = Camera::HORIZONTAL;
        else cam->sensor_fit = Camera::VERTICAL;

        if(cam->ortho) {
            float ortho_scale;
            get_camera_ortho_scale(cam, b_camera, width, height, &ortho_scale);
            cam->fov = ortho_scale * cam->zoom;
        }
        else {
            float sensor_size;
            get_camera_sensor_size(cam, width, height, &sensor_size);
            cam->fov = 2.0f * atanf((0.5f * sensor_size * cam->zoom) / b_camera.lens()) *180.0f / M_PI_F;
        }	

        // Position
        cam->eye_point.x = cam->matrix.x.w;
        cam->eye_point.y = cam->matrix.y.w;
        cam->eye_point.z = cam->matrix.z.w;

        float3 dir = transform_direction(&cam->matrix, make_float3(0.0f, 0.0f, -1.0f));
        cam->look_at.x = cam->eye_point.x + dir.x;
        cam->look_at.y = cam->eye_point.y + dir.y;
        cam->look_at.z = cam->eye_point.z + dir.z;

        cam->up = normalize(transform_direction(&cam->matrix, make_float3(0.0f, 1.0f, 0.0f)));
    }
    else {
        //TODO: Implement it for Lamp
    }
} //camera_from_object()
コード例 #22
0
ファイル: blender_object.cpp プロジェクト: wisaac407/blender
bool BlenderSync::object_is_light(BL::Object& b_ob)
{
	BL::ID b_ob_data = b_ob.data();

	return (b_ob_data && b_ob_data.is_a(&RNA_Lamp));
}
コード例 #23
0
ファイル: blender_object.cpp プロジェクト: nttputus/blensor
void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint layer_flag, int motion)
{
	/* light is handled separately */
	if(object_is_light(b_ob)) {
		if(!motion)
			sync_light(b_parent, b_index, b_ob, tfm);
		return;
	}

	/* only interested in object that we can create meshes from */
	if(!object_is_mesh(b_ob))
		return;

	/* key to lookup object */
	ObjectKey key(b_parent, b_index, b_ob);
	Object *object;

	/* motion vector case */
	if(motion) {
		object = object_map.find(key);

		if(object) {
			if(tfm != object->tfm) {
				if(motion == -1)
					object->motion.pre = tfm;
				else
					object->motion.post = tfm;

				object->use_motion = true;
			}

			sync_mesh_motion(b_ob, object->mesh, motion);
		}

		return;
	}

	/* test if we need to sync */
	bool object_updated = false;

	if(object_map.sync(&object, b_ob, b_parent, key))
		object_updated = true;
	
	bool use_holdout = (layer_flag & render_layer.holdout_layer) != 0;
	
	/* mesh sync */
	object->mesh = sync_mesh(b_ob, object_updated);

	if(use_holdout != object->use_holdout) {
		object->use_holdout = use_holdout;
		scene->object_manager->tag_update(scene);
	}

	/* object sync */
	if(object_updated || (object->mesh && object->mesh->need_update)) {
		object->name = b_ob.name().c_str();
		object->pass_id = b_ob.pass_index();
		object->tfm = tfm;
		object->motion.pre = tfm;
		object->motion.post = tfm;
		object->use_motion = false;

		/* visibility flags for both parent */
		object->visibility = object_ray_visibility(b_ob) & PATH_RAY_ALL;
		if(b_parent.ptr.data != b_ob.ptr.data)
			object->visibility &= object_ray_visibility(b_parent);

		/* camera flag is not actually used, instead is tested
		   against render layer flags */
		if(object->visibility & PATH_RAY_CAMERA) {
			object->visibility |= layer_flag << PATH_RAY_LAYER_SHIFT;
			object->visibility &= ~PATH_RAY_CAMERA;
		}

		object->tag_update(scene);
	}
}
コード例 #24
0
static bool object_render_hide_duplis(BL::Object b_ob)
{
	BL::Object parent = b_ob.parent();

	return (parent && object_render_hide_original(b_ob.type(), parent.dupli_type()));
}
コード例 #25
0
ファイル: blender_object.cpp プロジェクト: wisaac407/blender
void BlenderSync::sync_light(BL::Object& b_parent,
                             int persistent_id[OBJECT_PERSISTENT_ID_SIZE],
                             BL::Object& b_ob,
                             BL::DupliObject& b_dupli_ob,
                             Transform& tfm,
                             bool *use_portal)
{
	/* test if we need to sync */
	Light *light;
	ObjectKey key(b_parent, persistent_id, b_ob);

	if(!light_map.sync(&light, b_ob, b_parent, key)) {
		if(light->is_portal)
			*use_portal = true;
		return;
	}
	
	BL::Lamp b_lamp(b_ob.data());

	/* type */
	switch(b_lamp.type()) {
		case BL::Lamp::type_POINT: {
			BL::PointLamp b_point_lamp(b_lamp);
			light->size = b_point_lamp.shadow_soft_size();
			light->type = LIGHT_POINT;
			break;
		}
		case BL::Lamp::type_SPOT: {
			BL::SpotLamp b_spot_lamp(b_lamp);
			light->size = b_spot_lamp.shadow_soft_size();
			light->type = LIGHT_SPOT;
			light->spot_angle = b_spot_lamp.spot_size();
			light->spot_smooth = b_spot_lamp.spot_blend();
			break;
		}
		case BL::Lamp::type_HEMI: {
			light->type = LIGHT_DISTANT;
			light->size = 0.0f;
			break;
		}
		case BL::Lamp::type_SUN: {
			BL::SunLamp b_sun_lamp(b_lamp);
			light->size = b_sun_lamp.shadow_soft_size();
			light->type = LIGHT_DISTANT;
			break;
		}
		case BL::Lamp::type_AREA: {
			BL::AreaLamp b_area_lamp(b_lamp);
			light->size = 1.0f;
			light->axisu = transform_get_column(&tfm, 0);
			light->axisv = transform_get_column(&tfm, 1);
			light->sizeu = b_area_lamp.size();
			if(b_area_lamp.shape() == BL::AreaLamp::shape_RECTANGLE)
				light->sizev = b_area_lamp.size_y();
			else
				light->sizev = light->sizeu;
			light->type = LIGHT_AREA;
			break;
		}
	}

	/* location and (inverted!) direction */
	light->co = transform_get_column(&tfm, 3);
	light->dir = -transform_get_column(&tfm, 2);
	light->tfm = tfm;

	/* shader */
	vector<Shader*> used_shaders;
	find_shader(b_lamp, used_shaders, scene->default_light);
	light->shader = used_shaders[0];

	/* shadow */
	PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
	PointerRNA clamp = RNA_pointer_get(&b_lamp.ptr, "cycles");
	light->cast_shadow = get_boolean(clamp, "cast_shadow");
	light->use_mis = get_boolean(clamp, "use_multiple_importance_sampling");
	
	int samples = get_int(clamp, "samples");
	if(get_boolean(cscene, "use_square_samples"))
		light->samples = samples * samples;
	else
		light->samples = samples;

	light->max_bounces = get_int(clamp, "max_bounces");

	if(b_dupli_ob) {
		light->random_id = b_dupli_ob.random_id();
	}
	else {
		light->random_id = hash_int_2d(hash_string(b_ob.name().c_str()), 0);
	}

	if(light->type == LIGHT_AREA)
		light->is_portal = get_boolean(clamp, "is_portal");
	else
		light->is_portal = false;

	if(light->is_portal)
		*use_portal = true;

	/* visibility */
	uint visibility = object_ray_visibility(b_ob);
	light->use_diffuse = (visibility & PATH_RAY_DIFFUSE) != 0;
	light->use_glossy = (visibility & PATH_RAY_GLOSSY) != 0;
	light->use_transmission = (visibility & PATH_RAY_TRANSMIT) != 0;
	light->use_scatter = (visibility & PATH_RAY_VOLUME_SCATTER) != 0;

	/* tag */
	light->tag_update(scene);
}
コード例 #26
0
ファイル: blender_object.cpp プロジェクト: nttputus/blensor
void BlenderSync::sync_light(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm)
{
	/* test if we need to sync */
	Light *light;
	ObjectKey key(b_parent, b_index, b_ob);

	if(!light_map.sync(&light, b_ob, b_parent, key))
		return;
	
	BL::Lamp b_lamp(b_ob.data());

	/* type */
	switch(b_lamp.type()) {
		case BL::Lamp::type_POINT: {
			BL::PointLamp b_point_lamp(b_lamp);
			light->size = b_point_lamp.shadow_soft_size();
			light->type = LIGHT_POINT;
			break;
		}
		case BL::Lamp::type_SPOT: {
			BL::SpotLamp b_spot_lamp(b_lamp);
			light->size = b_spot_lamp.shadow_soft_size();
			light->type = LIGHT_POINT;
			break;
		}
		case BL::Lamp::type_HEMI: {
			light->type = LIGHT_DISTANT;
			light->size = 0.0f;
			break;
		}
		case BL::Lamp::type_SUN: {
			BL::SunLamp b_sun_lamp(b_lamp);
			light->size = b_sun_lamp.shadow_soft_size();
			light->type = LIGHT_DISTANT;
			break;
		}
		case BL::Lamp::type_AREA: {
			BL::AreaLamp b_area_lamp(b_lamp);
			light->size = 1.0f;
			light->axisu = make_float3(tfm.x.x, tfm.y.x, tfm.z.x);
			light->axisv = make_float3(tfm.x.y, tfm.y.y, tfm.z.y);
			light->sizeu = b_area_lamp.size();
			if(b_area_lamp.shape() == BL::AreaLamp::shape_RECTANGLE)
				light->sizev = b_area_lamp.size_y();
			else
				light->sizev = light->sizeu;
			light->type = LIGHT_AREA;
			break;
		}
	}

	/* location and (inverted!) direction */
	light->co = make_float3(tfm.x.w, tfm.y.w, tfm.z.w);
	light->dir = -make_float3(tfm.x.z, tfm.y.z, tfm.z.z);

	/* shader */
	vector<uint> used_shaders;

	find_shader(b_lamp, used_shaders, scene->default_light);

	if(used_shaders.size() == 0)
		used_shaders.push_back(scene->default_light);

	light->shader = used_shaders[0];

	/* shadow */
	PointerRNA clamp = RNA_pointer_get(&b_lamp.ptr, "cycles");
	light->cast_shadow = get_boolean(clamp, "cast_shadow");

	/* tag */
	light->tag_update(scene);
}
コード例 #27
0
ファイル: blender_object.cpp プロジェクト: sobotka/blender
Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
                                 BL::ViewLayer &b_view_layer,
                                 BL::DepsgraphObjectInstance &b_instance,
                                 float motion_time,
                                 bool show_self,
                                 bool show_particles,
                                 BlenderObjectCulling &culling,
                                 bool *use_portal)
{
  const bool is_instance = b_instance.is_instance();
  BL::Object b_ob = b_instance.object();
  BL::Object b_parent = is_instance ? b_instance.parent() : b_instance.object();
  BL::Object b_ob_instance = is_instance ? b_instance.instance_object() : b_ob;
  const bool motion = motion_time != 0.0f;
  /*const*/ Transform tfm = get_transform(b_ob.matrix_world());
  int *persistent_id = NULL;
  BL::Array<int, OBJECT_PERSISTENT_ID_SIZE> persistent_id_array;
  if (is_instance) {
    persistent_id_array = b_instance.persistent_id();
    persistent_id = persistent_id_array.data;
  }

  /* light is handled separately */
  if (!motion && object_is_light(b_ob)) {
    /* TODO: don't use lights for excluded layers used as mask layer,
     * when dynamic overrides are back. */
#if 0
    if (!((layer_flag & view_layer.holdout_layer) && (layer_flag & view_layer.exclude_layer)))
#endif
    {
      sync_light(b_parent,
                 persistent_id,
                 b_ob,
                 b_ob_instance,
                 is_instance ? b_instance.random_id() : 0,
                 tfm,
                 use_portal);
    }

    return NULL;
  }

  /* only interested in object that we can create meshes from */
  if (!object_is_mesh(b_ob)) {
    return NULL;
  }

  /* Perform object culling. */
  if (culling.test(scene, b_ob, tfm)) {
    return NULL;
  }

  /* Visibility flags for both parent and child. */
  PointerRNA cobject = RNA_pointer_get(&b_ob.ptr, "cycles");
  bool use_holdout = get_boolean(cobject, "is_holdout") ||
                     b_parent.holdout_get(PointerRNA_NULL, b_view_layer);
  uint visibility = object_ray_visibility(b_ob) & PATH_RAY_ALL_VISIBILITY;

  if (b_parent.ptr.data != b_ob.ptr.data) {
    visibility &= object_ray_visibility(b_parent);
  }

  /* TODO: make holdout objects on excluded layer invisible for non-camera rays. */
#if 0
  if (use_holdout && (layer_flag & view_layer.exclude_layer)) {
    visibility &= ~(PATH_RAY_ALL_VISIBILITY - PATH_RAY_CAMERA);
  }
#endif

  /* Clear camera visibility for indirect only objects. */
  bool use_indirect_only = b_parent.indirect_only_get(PointerRNA_NULL, b_view_layer);
  if (use_indirect_only) {
    visibility &= ~PATH_RAY_CAMERA;
  }

  /* Don't export completely invisible objects. */
  if (visibility == 0) {
    return NULL;
  }

  /* key to lookup object */
  ObjectKey key(b_parent, persistent_id, b_ob_instance);
  Object *object;

  /* motion vector case */
  if (motion) {
    object = object_map.find(key);

    if (object && object->use_motion()) {
      /* Set transform at matching motion time step. */
      int time_index = object->motion_step(motion_time);
      if (time_index >= 0) {
        object->motion[time_index] = tfm;
      }

      /* mesh deformation */
      if (object->mesh)
        sync_mesh_motion(b_depsgraph, b_ob, object, motion_time);
    }

    return object;
  }

  /* test if we need to sync */
  bool object_updated = false;

  if (object_map.sync(&object, b_ob, b_parent, key))
    object_updated = true;

  /* mesh sync */
  object->mesh = sync_mesh(
      b_depsgraph, b_ob, b_ob_instance, object_updated, show_self, show_particles);

  /* special case not tracked by object update flags */

  /* holdout */
  if (use_holdout != object->use_holdout) {
    object->use_holdout = use_holdout;
    scene->object_manager->tag_update(scene);
    object_updated = true;
  }

  if (visibility != object->visibility) {
    object->visibility = visibility;
    object_updated = true;
  }

  bool is_shadow_catcher = get_boolean(cobject, "is_shadow_catcher");
  if (is_shadow_catcher != object->is_shadow_catcher) {
    object->is_shadow_catcher = is_shadow_catcher;
    object_updated = true;
  }

  /* sync the asset name for Cryptomatte */
  BL::Object parent = b_ob.parent();
  ustring parent_name;
  if (parent) {
    while (parent.parent()) {
      parent = parent.parent();
    }
    parent_name = parent.name();
  }
  else {
    parent_name = b_ob.name();
  }
  if (object->asset_name != parent_name) {
    object->asset_name = parent_name;
    object_updated = true;
  }

  /* object sync
   * transform comparison should not be needed, but duplis don't work perfect
   * in the depsgraph and may not signal changes, so this is a workaround */
  if (object_updated || (object->mesh && object->mesh->need_update) || tfm != object->tfm) {
    object->name = b_ob.name().c_str();
    object->pass_id = b_ob.pass_index();
    object->tfm = tfm;
    object->motion.clear();

    /* motion blur */
    Scene::MotionType need_motion = scene->need_motion();
    if (need_motion != Scene::MOTION_NONE && object->mesh) {
      Mesh *mesh = object->mesh;
      mesh->use_motion_blur = false;
      mesh->motion_steps = 0;

      uint motion_steps;

      if (need_motion == Scene::MOTION_BLUR) {
        motion_steps = object_motion_steps(b_parent, b_ob);
        mesh->motion_steps = motion_steps;
        if (motion_steps && object_use_deform_motion(b_parent, b_ob)) {
          mesh->use_motion_blur = true;
        }
      }
      else {
        motion_steps = 3;
        mesh->motion_steps = motion_steps;
      }

      object->motion.clear();
      object->motion.resize(motion_steps, transform_empty());

      if (motion_steps) {
        object->motion[motion_steps / 2] = tfm;

        for (size_t step = 0; step < motion_steps; step++) {
          motion_times.insert(object->motion_time(step));
        }
      }
    }

    /* dupli texture coordinates and random_id */
    if (is_instance) {
      object->dupli_generated = 0.5f * get_float3(b_instance.orco()) -
                                make_float3(0.5f, 0.5f, 0.5f);
      object->dupli_uv = get_float2(b_instance.uv());
      object->random_id = b_instance.random_id();
    }
    else {
      object->dupli_generated = make_float3(0.0f, 0.0f, 0.0f);
      object->dupli_uv = make_float2(0.0f, 0.0f);
      object->random_id = hash_int_2d(hash_string(object->name.c_str()), 0);
    }

    object->tag_update(scene);
  }

  if (is_instance) {
    /* Sync possible particle data. */
    sync_dupli_particle(b_parent, b_instance, object);
  }

  return object;
}
コード例 #28
0
void BlenderSync::sync_light(BL::Object b_parent, int persistent_id[OBJECT_PERSISTENT_ID_SIZE], BL::Object b_ob, Transform& tfm)
{
    /* test if we need to sync */
    Light *light;
    ObjectKey key(b_parent, persistent_id, b_ob);

    if(!light_map.sync(&light, b_ob, b_parent, key))
        return;

    BL::Lamp b_lamp(b_ob.data());

    /* type */
    switch(b_lamp.type()) {
    case BL::Lamp::type_POINT: {
        BL::PointLamp b_point_lamp(b_lamp);
        light->size = b_point_lamp.shadow_soft_size();
        light->type = LIGHT_POINT;
        break;
    }
    case BL::Lamp::type_SPOT: {
        BL::SpotLamp b_spot_lamp(b_lamp);
        light->size = b_spot_lamp.shadow_soft_size();
        light->type = LIGHT_SPOT;
        light->spot_angle = b_spot_lamp.spot_size();
        light->spot_smooth = b_spot_lamp.spot_blend();
        break;
    }
    case BL::Lamp::type_HEMI: {
        light->type = LIGHT_DISTANT;
        light->size = 0.0f;
        break;
    }
    case BL::Lamp::type_SUN: {
        BL::SunLamp b_sun_lamp(b_lamp);
        light->size = b_sun_lamp.shadow_soft_size();
        light->type = LIGHT_DISTANT;
        break;
    }
    case BL::Lamp::type_AREA: {
        BL::AreaLamp b_area_lamp(b_lamp);
        light->size = 1.0f;
        light->axisu = transform_get_column(&tfm, 0);
        light->axisv = transform_get_column(&tfm, 1);
        light->sizeu = b_area_lamp.size();
        if(b_area_lamp.shape() == BL::AreaLamp::shape_RECTANGLE)
            light->sizev = b_area_lamp.size_y();
        else
            light->sizev = light->sizeu;
        light->type = LIGHT_AREA;
        break;
    }
    }

    /* location and (inverted!) direction */
    light->co = transform_get_column(&tfm, 3);
    light->dir = -transform_get_column(&tfm, 2);

    /* shader */
    vector<uint> used_shaders;

    find_shader(b_lamp, used_shaders, scene->default_light);

    if(used_shaders.size() == 0)
        used_shaders.push_back(scene->default_light);

    light->shader = used_shaders[0];

    /* shadow */
    PointerRNA clamp = RNA_pointer_get(&b_lamp.ptr, "cycles");
    light->cast_shadow = get_boolean(clamp, "cast_shadow");
    light->use_mis = get_boolean(clamp, "use_multiple_importance_sampling");
    light->samples = get_int(clamp, "samples");

    /* tag */
    light->tag_update(scene);
}
コード例 #29
0
ファイル: blender_session.cpp プロジェクト: eirikhex/blender
void BlenderSession::bake(BL::Object b_object, const string& pass_type, const int object_id, BL::BakePixel pixel_array, const size_t num_pixels, const int /*depth*/, float result[])
{
	ShaderEvalType shader_type = get_shader_type(pass_type);
	size_t object_index = OBJECT_NONE;
	int tri_offset = 0;

	/* Set baking flag in advance, so kernel loading can check if we need
	 * any baking capabilities.
	 */
	scene->bake_manager->set_baking(true);

	/* ensure kernels are loaded before we do any scene updates */
	session->load_kernels();

	if(session->progress.get_cancel())
		return;

	if(shader_type == SHADER_EVAL_UV) {
		/* force UV to be available */
		Pass::add(PASS_UV, scene->film->passes);
	}

	if(BakeManager::is_light_pass(shader_type)) {
		/* force use_light_pass to be true */
		Pass::add(PASS_LIGHT, scene->film->passes);
	}

	/* create device and update scene */
	scene->film->tag_update(scene);
	scene->integrator->tag_update(scene);

	/* update scene */
	sync->sync_camera(b_render, b_engine.camera_override(), width, height);
	sync->sync_data(b_render,
	                b_v3d,
	                b_engine.camera_override(),
	                width, height,
	                &python_thread_state,
	                b_rlay_name.c_str());

	/* get buffer parameters */
	SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_v3d, b_rv3d, scene->camera, width, height);

	scene->bake_manager->set_shader_limit((size_t)b_engine.tile_x(), (size_t)b_engine.tile_y());

	/* set number of samples */
	session->tile_manager.set_samples(session_params.samples);
	session->reset(buffer_params, session_params.samples);
	session->update_scene();

	/* find object index. todo: is arbitrary - copied from mesh_displace.cpp */
	for(size_t i = 0; i < scene->objects.size(); i++) {
		if(strcmp(scene->objects[i]->name.c_str(), b_object.name().c_str()) == 0) {
			object_index = i;
			tri_offset = scene->objects[i]->mesh->tri_offset;
			break;
		}
	}

	/* when used, non-instanced convention: object = ~object */
	int object = ~object_index;

	BakeData *bake_data = scene->bake_manager->init(object, tri_offset, num_pixels);

	populate_bake_data(bake_data, object_id, pixel_array, num_pixels);

	/* set number of samples */
	session->tile_manager.set_samples(session_params.samples);
	session->reset(buffer_params, session_params.samples);
	session->update_scene();

	session->progress.set_update_callback(function_bind(&BlenderSession::update_bake_progress, this));

	scene->bake_manager->bake(scene->device, &scene->dscene, scene, session->progress, shader_type, bake_data, result);

	/* free all memory used (host and device), so we wouldn't leave render
	 * engine with extra memory allocated
	 */

	session->device_free();

	delete sync;
	sync = NULL;
}