コード例 #1
0
ファイル: rigidbody.c プロジェクト: benkahle/blendertests
/* Sync rigid body and object transformations */
void BKE_rigidbody_sync_transforms(RigidBodyWorld *rbw, Object *ob, float ctime)
{
	RigidBodyOb *rbo = ob->rigidbody_object;

	/* keep original transform for kinematic and passive objects */
	if (ELEM(NULL, rbw, rbo) || rbo->flag & RBO_FLAG_KINEMATIC || rbo->type == RBO_TYPE_PASSIVE)
		return;

	/* use rigid body transform after cache start frame if objects is not being transformed */
	if (BKE_rigidbody_check_sim_running(rbw, ctime) && !(ob->flag & SELECT && G.moving & G_TRANSFORM_OBJ)) {
		float mat[4][4], size_mat[4][4], size[3];

		normalize_qt(rbo->orn); // RB_TODO investigate why quaternion isn't normalized at this point
		quat_to_mat4(mat, rbo->orn);
		copy_v3_v3(mat[3], rbo->pos);

		mat4_to_size(size, ob->obmat);
		size_to_mat4(size_mat, size);
		mul_m4_m4m4(mat, mat, size_mat);

		copy_m4_m4(ob->obmat, mat);
	}
	/* otherwise set rigid body transform to current obmat */
	else {
		mat4_to_loc_quat(rbo->pos, rbo->orn, ob->obmat);
	}
}
コード例 #2
0
void TransformBase::decompose(float mat[4][4], float *loc, float eul[3], float quat[4], float *size)
{
	mat4_to_size(size, mat);
	if (eul) {
		mat4_to_eul(eul, mat);
	}
	if (quat) {
		mat4_to_quat(quat, mat);
	}
	copy_v3_v3(loc, mat[3]);
}
コード例 #3
0
void AnimationExporter::sample_animation(float *v, std::vector<float> &frames, int type, Bone *bone, Object *ob_arm, bPoseChannel *pchan)
{
	bPoseChannel *parchan = NULL;
	bPose *pose = ob_arm->pose;

	pchan = BKE_pose_channel_find_name(pose, bone->name);

	if (!pchan)
		return;

	parchan = pchan->parent;

	enable_fcurves(ob_arm->adt->action, bone->name);

	std::vector<float>::iterator it;
	for (it = frames.begin(); it != frames.end(); it++) {
		float mat[4][4], ipar[4][4];

		float ctime = BKE_scene_frame_get_from_ctime(scene, *it);


		BKE_animsys_evaluate_animdata(scene, &ob_arm->id, ob_arm->adt, ctime, ADT_RECALC_ANIM);
		BKE_pose_where_is_bone(scene, ob_arm, pchan, ctime, 1);

		// compute bone local mat
		if (bone->parent) {
			invert_m4_m4(ipar, parchan->pose_mat);
			mult_m4_m4m4(mat, ipar, pchan->pose_mat);
		}
		else
			copy_m4_m4(mat, pchan->pose_mat);

		switch (type) {
			case 0:
				mat4_to_eul(v, mat);
				break;
			case 1:
				mat4_to_size(v, mat);
				break;
			case 2:
				copy_v3_v3(v, mat[3]);
				break;
		}

		v += 3;
	}

	enable_fcurves(ob_arm->adt->action, NULL);
}
コード例 #4
0
ファイル: object_dupli.c プロジェクト: DrangPo/blender
/* OB_DUPLIPARTS */
static void make_duplis_particle_system(const DupliContext *ctx, ParticleSystem *psys)
{
	Scene *scene = ctx->scene;
	Object *par = ctx->object;
	bool for_render = ctx->eval_ctx->mode == DAG_EVAL_RENDER;
	bool use_texcoords = ELEM(ctx->eval_ctx->mode, DAG_EVAL_RENDER, DAG_EVAL_PREVIEW);

	GroupObject *go;
	Object *ob = NULL, **oblist = NULL, obcopy, *obcopylist = NULL;
	DupliObject *dob;
	ParticleDupliWeight *dw;
	ParticleSettings *part;
	ParticleData *pa;
	ChildParticle *cpa = NULL;
	ParticleKey state;
	ParticleCacheKey *cache;
	float ctime, pa_time, scale = 1.0f;
	float tmat[4][4], mat[4][4], pamat[4][4], vec[3], size = 0.0;
	float (*obmat)[4];
	int a, b, hair = 0;
	int totpart, totchild, totgroup = 0 /*, pa_num */;
	const bool dupli_type_hack = !BKE_scene_use_new_shading_nodes(scene);

	int no_draw_flag = PARS_UNEXIST;

	if (psys == NULL) return;

	part = psys->part;

	if (part == NULL)
		return;

	if (!psys_check_enabled(par, psys))
		return;

	if (!for_render)
		no_draw_flag |= PARS_NO_DISP;

	ctime = BKE_scene_frame_get(scene); /* NOTE: in old animsys, used parent object's timeoffset... */

	totpart = psys->totpart;
	totchild = psys->totchild;

	BLI_srandom((unsigned int)(31415926 + psys->seed));

	if ((psys->renderdata || part->draw_as == PART_DRAW_REND) && ELEM(part->ren_as, PART_DRAW_OB, PART_DRAW_GR)) {
		ParticleSimulationData sim = {NULL};
		sim.scene = scene;
		sim.ob = par;
		sim.psys = psys;
		sim.psmd = psys_get_modifier(par, psys);
		/* make sure emitter imat is in global coordinates instead of render view coordinates */
		invert_m4_m4(par->imat, par->obmat);

		/* first check for loops (particle system object used as dupli object) */
		if (part->ren_as == PART_DRAW_OB) {
			if (ELEM(part->dup_ob, NULL, par))
				return;
		}
		else { /*PART_DRAW_GR */
			if (part->dup_group == NULL || BLI_listbase_is_empty(&part->dup_group->gobject))
				return;

			if (BLI_findptr(&part->dup_group->gobject, par, offsetof(GroupObject, ob))) {
				return;
			}
		}

		/* if we have a hair particle system, use the path cache */
		if (part->type == PART_HAIR) {
			if (psys->flag & PSYS_HAIR_DONE)
				hair = (totchild == 0 || psys->childcache) && psys->pathcache;
			if (!hair)
				return;

			/* we use cache, update totchild according to cached data */
			totchild = psys->totchildcache;
			totpart = psys->totcached;
		}

		psys_check_group_weights(part);

		psys->lattice_deform_data = psys_create_lattice_deform_data(&sim);

		/* gather list of objects or single object */
		if (part->ren_as == PART_DRAW_GR) {
			if (ctx->do_update) {
				BKE_group_handle_recalc_and_update(ctx->eval_ctx, scene, par, part->dup_group);
			}

			if (part->draw & PART_DRAW_COUNT_GR) {
				for (dw = part->dupliweights.first; dw; dw = dw->next)
					totgroup += dw->count;
			}
			else {
				for (go = part->dup_group->gobject.first; go; go = go->next)
					totgroup++;
			}

			/* we also copy the actual objects to restore afterwards, since
			 * BKE_object_where_is_calc_time will change the object which breaks transform */
			oblist = MEM_callocN((size_t)totgroup * sizeof(Object *), "dupgroup object list");
			obcopylist = MEM_callocN((size_t)totgroup * sizeof(Object), "dupgroup copy list");

			if (part->draw & PART_DRAW_COUNT_GR && totgroup) {
				dw = part->dupliweights.first;

				for (a = 0; a < totgroup; dw = dw->next) {
					for (b = 0; b < dw->count; b++, a++) {
						oblist[a] = dw->ob;
						obcopylist[a] = *dw->ob;
					}
				}
			}
			else {
				go = part->dup_group->gobject.first;
				for (a = 0; a < totgroup; a++, go = go->next) {
					oblist[a] = go->ob;
					obcopylist[a] = *go->ob;
				}
			}
		}
		else {
			ob = part->dup_ob;
			obcopy = *ob;
		}

		if (totchild == 0 || part->draw & PART_DRAW_PARENT)
			a = 0;
		else
			a = totpart;

		for (pa = psys->particles; a < totpart + totchild; a++, pa++) {
			if (a < totpart) {
				/* handle parent particle */
				if (pa->flag & no_draw_flag)
					continue;

				/* pa_num = pa->num; */ /* UNUSED */
				pa_time = pa->time;
				size = pa->size;
			}
			else {
				/* handle child particle */
				cpa = &psys->child[a - totpart];

				/* pa_num = a; */ /* UNUSED */
				pa_time = psys->particles[cpa->parent].time;
				size = psys_get_child_size(psys, cpa, ctime, NULL);
			}

			/* some hair paths might be non-existent so they can't be used for duplication */
			if (hair && psys->pathcache &&
			    ((a < totpart && psys->pathcache[a]->segments < 0) ||
			     (a >= totpart && psys->childcache[a - totpart]->segments < 0)))
			{
				continue;
			}

			if (part->ren_as == PART_DRAW_GR) {
				/* prevent divide by zero below [#28336] */
				if (totgroup == 0)
					continue;

				/* for groups, pick the object based on settings */
				if (part->draw & PART_DRAW_RAND_GR)
					b = BLI_rand() % totgroup;
				else
					b = a % totgroup;

				ob = oblist[b];
				obmat = oblist[b]->obmat;
			}
			else {
				obmat = ob->obmat;
			}

			if (hair) {
				/* hair we handle separate and compute transform based on hair keys */
				if (a < totpart) {
					cache = psys->pathcache[a];
					psys_get_dupli_path_transform(&sim, pa, NULL, cache, pamat, &scale);
				}
				else {
					cache = psys->childcache[a - totpart];
					psys_get_dupli_path_transform(&sim, NULL, cpa, cache, pamat, &scale);
				}

				copy_v3_v3(pamat[3], cache->co);
				pamat[3][3] = 1.0f;

			}
			else {
				/* first key */
				state.time = ctime;
				if (psys_get_particle_state(&sim, a, &state, 0) == 0) {
					continue;
				}
				else {
					float tquat[4];
					normalize_qt_qt(tquat, state.rot);
					quat_to_mat4(pamat, tquat);
					copy_v3_v3(pamat[3], state.co);
					pamat[3][3] = 1.0f;
				}
			}

			if (part->ren_as == PART_DRAW_GR && psys->part->draw & PART_DRAW_WHOLE_GR) {
				for (go = part->dup_group->gobject.first, b = 0; go; go = go->next, b++) {

					copy_m4_m4(tmat, oblist[b]->obmat);
					/* apply particle scale */
					mul_mat3_m4_fl(tmat, size * scale);
					mul_v3_fl(tmat[3], size * scale);
					/* group dupli offset, should apply after everything else */
					if (!is_zero_v3(part->dup_group->dupli_ofs))
						sub_v3_v3(tmat[3], part->dup_group->dupli_ofs);
					/* individual particle transform */
					mul_m4_m4m4(mat, pamat, tmat);

					dob = make_dupli(ctx, go->ob, mat, a, false, false);
					dob->particle_system = psys;
					if (use_texcoords)
						psys_get_dupli_texture(psys, part, sim.psmd, pa, cpa, dob->uv, dob->orco);
				}
			}
			else {
				/* to give ipos in object correct offset */
				BKE_object_where_is_calc_time(scene, ob, ctime - pa_time);

				copy_v3_v3(vec, obmat[3]);
				obmat[3][0] = obmat[3][1] = obmat[3][2] = 0.0f;

				/* particle rotation uses x-axis as the aligned axis, so pre-rotate the object accordingly */
				if ((part->draw & PART_DRAW_ROTATE_OB) == 0) {
					float xvec[3], q[4], size_mat[4][4], original_size[3];

					mat4_to_size(original_size, obmat);
					size_to_mat4(size_mat, original_size);

					xvec[0] = -1.f;
					xvec[1] = xvec[2] = 0;
					vec_to_quat(q, xvec, ob->trackflag, ob->upflag);
					quat_to_mat4(obmat, q);
					obmat[3][3] = 1.0f;

					/* add scaling if requested */
					if ((part->draw & PART_DRAW_NO_SCALE_OB) == 0)
						mul_m4_m4m4(obmat, obmat, size_mat);
				}
				else if (part->draw & PART_DRAW_NO_SCALE_OB) {
					/* remove scaling */
					float size_mat[4][4], original_size[3];

					mat4_to_size(original_size, obmat);
					size_to_mat4(size_mat, original_size);
					invert_m4(size_mat);

					mul_m4_m4m4(obmat, obmat, size_mat);
				}

				mul_m4_m4m4(tmat, pamat, obmat);
				mul_mat3_m4_fl(tmat, size * scale);

				copy_m4_m4(mat, tmat);

				if (part->draw & PART_DRAW_GLOBAL_OB)
					add_v3_v3v3(mat[3], mat[3], vec);

				dob = make_dupli(ctx, ob, mat, a, false, false);
				dob->particle_system = psys;
				if (use_texcoords)
					psys_get_dupli_texture(psys, part, sim.psmd, pa, cpa, dob->uv, dob->orco);
				/* XXX blender internal needs this to be set to dupligroup to render
				 * groups correctly, but we don't want this hack for cycles */
				if (dupli_type_hack && ctx->group)
					dob->type = OB_DUPLIGROUP;
			}
		}

		/* restore objects since they were changed in BKE_object_where_is_calc_time */
		if (part->ren_as == PART_DRAW_GR) {
			for (a = 0; a < totgroup; a++)
				*(oblist[a]) = obcopylist[a];
		}
		else
			*ob = obcopy;
	}

	/* clean up */
	if (oblist)
		MEM_freeN(oblist);
	if (obcopylist)
		MEM_freeN(obcopylist);

	if (psys->lattice_deform_data) {
		end_latt_deform(psys->lattice_deform_data);
		psys->lattice_deform_data = NULL;
	}
}
コード例 #5
0
ファイル: MOD_array.c プロジェクト: akonneker/blensor
static DerivedMesh *arrayModifier_doArray(
        ArrayModifierData *amd,
        Scene *scene, Object *ob, DerivedMesh *dm,
        ModifierApplyFlag flag)
{
	const float eps = 1e-6f;
	const MVert *src_mvert;
	MVert *mv, *mv_prev, *result_dm_verts;

	MEdge *me;
	MLoop *ml;
	MPoly *mp;
	int i, j, c, count;
	float length = amd->length;
	/* offset matrix */
	float offset[4][4];
	float scale[3];
	bool offset_has_scale;
	float current_offset[4][4];
	float final_offset[4][4];
	int *full_doubles_map = NULL;
	int tot_doubles;

	const bool use_merge = (amd->flags & MOD_ARR_MERGE) != 0;
	const bool use_recalc_normals = (dm->dirty & DM_DIRTY_NORMALS) || use_merge;
	const bool use_offset_ob = ((amd->offset_type & MOD_ARR_OFF_OBJ) && amd->offset_ob);
	/* allow pole vertices to be used by many faces */
	const bool with_follow = use_offset_ob;

	int start_cap_nverts = 0, start_cap_nedges = 0, start_cap_npolys = 0, start_cap_nloops = 0;
	int end_cap_nverts = 0, end_cap_nedges = 0, end_cap_npolys = 0, end_cap_nloops = 0;
	int result_nverts = 0, result_nedges = 0, result_npolys = 0, result_nloops = 0;
	int chunk_nverts, chunk_nedges, chunk_nloops, chunk_npolys;
	int first_chunk_start, first_chunk_nverts, last_chunk_start, last_chunk_nverts;

	DerivedMesh *result, *start_cap_dm = NULL, *end_cap_dm = NULL;

	chunk_nverts = dm->getNumVerts(dm);
	chunk_nedges = dm->getNumEdges(dm);
	chunk_nloops = dm->getNumLoops(dm);
	chunk_npolys = dm->getNumPolys(dm);

	count = amd->count;

	if (amd->start_cap && amd->start_cap != ob && amd->start_cap->type == OB_MESH) {
		start_cap_dm = get_dm_for_modifier(amd->start_cap, flag);
		if (start_cap_dm) {
			start_cap_nverts = start_cap_dm->getNumVerts(start_cap_dm);
			start_cap_nedges = start_cap_dm->getNumEdges(start_cap_dm);
			start_cap_nloops = start_cap_dm->getNumLoops(start_cap_dm);
			start_cap_npolys = start_cap_dm->getNumPolys(start_cap_dm);
		}
	}
	if (amd->end_cap && amd->end_cap != ob && amd->end_cap->type == OB_MESH) {
		end_cap_dm = get_dm_for_modifier(amd->end_cap, flag);
		if (end_cap_dm) {
			end_cap_nverts = end_cap_dm->getNumVerts(end_cap_dm);
			end_cap_nedges = end_cap_dm->getNumEdges(end_cap_dm);
			end_cap_nloops = end_cap_dm->getNumLoops(end_cap_dm);
			end_cap_npolys = end_cap_dm->getNumPolys(end_cap_dm);
		}
	}

	/* Build up offset array, cumulating all settings options */

	unit_m4(offset);
	src_mvert = dm->getVertArray(dm);

	if (amd->offset_type & MOD_ARR_OFF_CONST)
		add_v3_v3v3(offset[3], offset[3], amd->offset);

	if (amd->offset_type & MOD_ARR_OFF_RELATIVE) {
		for (j = 0; j < 3; j++)
			offset[3][j] += amd->scale[j] * vertarray_size(src_mvert, chunk_nverts, j);
	}

	if (use_offset_ob) {
		float obinv[4][4];
		float result_mat[4][4];

		if (ob)
			invert_m4_m4(obinv, ob->obmat);
		else
			unit_m4(obinv);

		mul_m4_series(result_mat, offset,
		              obinv, amd->offset_ob->obmat);
		copy_m4_m4(offset, result_mat);
	}

	/* Check if there is some scaling.  If scaling, then we will not translate mapping */
	mat4_to_size(scale, offset);
	offset_has_scale = !is_one_v3(scale);

	if (amd->fit_type == MOD_ARR_FITCURVE && amd->curve_ob) {
		Curve *cu = amd->curve_ob->data;
		if (cu) {
#ifdef CYCLIC_DEPENDENCY_WORKAROUND
			if (amd->curve_ob->curve_cache == NULL) {
				BKE_displist_make_curveTypes(scene, amd->curve_ob, false);
			}
#endif

			if (amd->curve_ob->curve_cache && amd->curve_ob->curve_cache->path) {
				float scale = mat4_to_scale(amd->curve_ob->obmat);
				length = scale * amd->curve_ob->curve_cache->path->totdist;
			}
		}
	}

	/* calculate the maximum number of copies which will fit within the
	 * prescribed length */
	if (amd->fit_type == MOD_ARR_FITLENGTH || amd->fit_type == MOD_ARR_FITCURVE) {
		float dist = len_v3(offset[3]);

		if (dist > eps) {
			/* this gives length = first copy start to last copy end
			 * add a tiny offset for floating point rounding errors */
			count = (length + eps) / dist;
		}
		else {
			/* if the offset has no translation, just make one copy */
			count = 1;
		}
	}

	if (count < 1)
		count = 1;

	/* The number of verts, edges, loops, polys, before eventually merging doubles */
	result_nverts = chunk_nverts * count + start_cap_nverts + end_cap_nverts;
	result_nedges = chunk_nedges * count + start_cap_nedges + end_cap_nedges;
	result_nloops = chunk_nloops * count + start_cap_nloops + end_cap_nloops;
	result_npolys = chunk_npolys * count + start_cap_npolys + end_cap_npolys;

	/* Initialize a result dm */
	result = CDDM_from_template(dm, result_nverts, result_nedges, 0, result_nloops, result_npolys);
	result_dm_verts = CDDM_get_verts(result);

	if (use_merge) {
		/* Will need full_doubles_map for handling merge */
		full_doubles_map = MEM_mallocN(sizeof(int) * result_nverts, "mod array doubles map");
		fill_vn_i(full_doubles_map, result_nverts, -1);
	}

	/* copy customdata to original geometry */
	DM_copy_vert_data(dm, result, 0, 0, chunk_nverts);
	DM_copy_edge_data(dm, result, 0, 0, chunk_nedges);
	DM_copy_loop_data(dm, result, 0, 0, chunk_nloops);
	DM_copy_poly_data(dm, result, 0, 0, chunk_npolys);

	/* subsurf for eg wont have mesh data in the
	 * now add mvert/medge/mface layers */

	if (!CustomData_has_layer(&dm->vertData, CD_MVERT)) {
		dm->copyVertArray(dm, result_dm_verts);
	}
	if (!CustomData_has_layer(&dm->edgeData, CD_MEDGE)) {
		dm->copyEdgeArray(dm, CDDM_get_edges(result));
	}
	if (!CustomData_has_layer(&dm->polyData, CD_MPOLY)) {
		dm->copyLoopArray(dm, CDDM_get_loops(result));
		dm->copyPolyArray(dm, CDDM_get_polys(result));
	}

	/* Remember first chunk, in case of cap merge */
	first_chunk_start = 0;
	first_chunk_nverts = chunk_nverts;

	unit_m4(current_offset);
	for (c = 1; c < count; c++) {
		/* copy customdata to new geometry */
		DM_copy_vert_data(result, result, 0, c * chunk_nverts, chunk_nverts);
		DM_copy_edge_data(result, result, 0, c * chunk_nedges, chunk_nedges);
		DM_copy_loop_data(result, result, 0, c * chunk_nloops, chunk_nloops);
		DM_copy_poly_data(result, result, 0, c * chunk_npolys, chunk_npolys);

		mv_prev = result_dm_verts;
		mv = mv_prev + c * chunk_nverts;

		/* recalculate cumulative offset here */
		mul_m4_m4m4(current_offset, current_offset, offset);

		/* apply offset to all new verts */
		for (i = 0; i < chunk_nverts; i++, mv++, mv_prev++) {
			mul_m4_v3(current_offset, mv->co);

			/* We have to correct normals too, if we do not tag them as dirty! */
			if (!use_recalc_normals) {
				float no[3];
				normal_short_to_float_v3(no, mv->no);
				mul_mat3_m4_v3(current_offset, no);
				normalize_v3(no);
				normal_float_to_short_v3(mv->no, no);
			}
		}

		/* adjust edge vertex indices */
		me = CDDM_get_edges(result) + c * chunk_nedges;
		for (i = 0; i < chunk_nedges; i++, me++) {
			me->v1 += c * chunk_nverts;
			me->v2 += c * chunk_nverts;
		}

		mp = CDDM_get_polys(result) + c * chunk_npolys;
		for (i = 0; i < chunk_npolys; i++, mp++) {
			mp->loopstart += c * chunk_nloops;
		}

		/* adjust loop vertex and edge indices */
		ml = CDDM_get_loops(result) + c * chunk_nloops;
		for (i = 0; i < chunk_nloops; i++, ml++) {
			ml->v += c * chunk_nverts;
			ml->e += c * chunk_nedges;
		}

		/* Handle merge between chunk n and n-1 */
		if (use_merge && (c >= 1)) {
			if (!offset_has_scale && (c >= 2)) {
				/* Mapping chunk 3 to chunk 2 is a translation of mapping 2 to 1
				 * ... that is except if scaling makes the distance grow */
				int k;
				int this_chunk_index = c * chunk_nverts;
				int prev_chunk_index = (c - 1) * chunk_nverts;
				for (k = 0; k < chunk_nverts; k++, this_chunk_index++, prev_chunk_index++) {
					int target = full_doubles_map[prev_chunk_index];
					if (target != -1) {
						target += chunk_nverts; /* translate mapping */
						if (full_doubles_map[target] != -1) {
							if (with_follow) {
								target = full_doubles_map[target];
							}
							else {
								/* The rule here is to not follow mapping to chunk N-2, which could be too far
								 * so if target vertex was itself mapped, then this vertex is not mapped */
								target = -1;
							}
						}
					}
					full_doubles_map[this_chunk_index] = target;
				}
			}
			else {
				dm_mvert_map_doubles(
				        full_doubles_map,
				        result_dm_verts,
				        (c - 1) * chunk_nverts,
				        chunk_nverts,
				        c * chunk_nverts,
				        chunk_nverts,
				        amd->merge_dist,
				        with_follow);
			}
		}
	}

	last_chunk_start = (count - 1) * chunk_nverts;
	last_chunk_nverts = chunk_nverts;

	copy_m4_m4(final_offset, current_offset);

	if (use_merge && (amd->flags & MOD_ARR_MERGEFINAL) && (count > 1)) {
		/* Merge first and last copies */
		dm_mvert_map_doubles(
		        full_doubles_map,
		        result_dm_verts,
		        last_chunk_start,
		        last_chunk_nverts,
		        first_chunk_start,
		        first_chunk_nverts,
		        amd->merge_dist,
		        with_follow);
	}

	/* start capping */
	if (start_cap_dm) {
		float start_offset[4][4];
		int start_cap_start = result_nverts - start_cap_nverts - end_cap_nverts;
		invert_m4_m4(start_offset, offset);
		dm_merge_transform(
		        result, start_cap_dm, start_offset,
		        result_nverts - start_cap_nverts - end_cap_nverts,
		        result_nedges - start_cap_nedges - end_cap_nedges,
		        result_nloops - start_cap_nloops - end_cap_nloops,
		        result_npolys - start_cap_npolys - end_cap_npolys,
		        start_cap_nverts, start_cap_nedges, start_cap_nloops, start_cap_npolys);
		/* Identify doubles with first chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        first_chunk_start,
			        first_chunk_nverts,
			        start_cap_start,
			        start_cap_nverts,
			        amd->merge_dist,
			        false);
		}
	}

	if (end_cap_dm) {
		float end_offset[4][4];
		int end_cap_start = result_nverts - end_cap_nverts;
		mul_m4_m4m4(end_offset, current_offset, offset);
		dm_merge_transform(
		        result, end_cap_dm, end_offset,
		        result_nverts - end_cap_nverts,
		        result_nedges - end_cap_nedges,
		        result_nloops - end_cap_nloops,
		        result_npolys - end_cap_npolys,
		        end_cap_nverts, end_cap_nedges, end_cap_nloops, end_cap_npolys);
		/* Identify doubles with last chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        last_chunk_start,
			        last_chunk_nverts,
			        end_cap_start,
			        end_cap_nverts,
			        amd->merge_dist,
			        false);
		}
	}
	/* done capping */

	/* Handle merging */
	tot_doubles = 0;
	if (use_merge) {
		for (i = 0; i < result_nverts; i++) {
			if (full_doubles_map[i] != -1) {
				if (i == full_doubles_map[i]) {
					full_doubles_map[i] = -1;
				}
				else {
					tot_doubles++;
				}
			}
		}
		if (tot_doubles > 0) {
			result = CDDM_merge_verts(result, full_doubles_map, tot_doubles, CDDM_MERGE_VERTS_DUMP_IF_EQUAL);
		}
		MEM_freeN(full_doubles_map);
	}

	/* In case org dm has dirty normals, or we made some merging, mark normals as dirty in new dm!
	 * TODO: we may need to set other dirty flags as well?
	 */
	if (use_recalc_normals) {
		result->dirty |= DM_DIRTY_NORMALS;
	}

	return result;
}
コード例 #6
0
static void camera_stereo3d_model_matrix(Object *camera, const bool is_left, float r_modelmat[4][4])
{
	Camera *data = (Camera *)camera->data;
	float interocular_distance, convergence_distance;
	short convergence_mode, pivot;
	float sizemat[4][4];

	float fac = 1.0f;
	float fac_signed;

	interocular_distance = data->stereo.interocular_distance;
	convergence_distance = data->stereo.convergence_distance;
	convergence_mode = data->stereo.convergence_mode;
	pivot = data->stereo.pivot;

	if (((pivot == CAM_S3D_PIVOT_LEFT) && is_left) ||
	    ((pivot == CAM_S3D_PIVOT_RIGHT) && !is_left))
	{
		camera_model_matrix(camera, r_modelmat);
		return;
	}
	else {
		float size[3];
		mat4_to_size(size, camera->obmat);
		size_to_mat4(sizemat, size);
	}

	if (pivot == CAM_S3D_PIVOT_CENTER)
		fac = 0.5f;

	fac_signed = is_left ? fac : -fac;

	/* rotation */
	if (convergence_mode == CAM_S3D_TOE) {
		float angle;
		float angle_sin, angle_cos;
		float toeinmat[4][4];
		float rotmat[4][4];

		unit_m4(rotmat);

		if (pivot == CAM_S3D_PIVOT_CENTER) {
			fac = -fac;
			fac_signed = -fac_signed;
		}

		angle = atanf((interocular_distance * 0.5f) / convergence_distance) / fac;

		angle_cos = cosf(angle * fac_signed);
		angle_sin = sinf(angle * fac_signed);

		rotmat[0][0] =  angle_cos;
		rotmat[2][0] = -angle_sin;
		rotmat[0][2] =  angle_sin;
		rotmat[2][2] =  angle_cos;

		if (pivot == CAM_S3D_PIVOT_CENTER) {
			/* set the rotation */
			copy_m4_m4(toeinmat, rotmat);
			/* set the translation */
			toeinmat[3][0] = interocular_distance * fac_signed;

			/* transform */
			normalize_m4_m4(r_modelmat, camera->obmat);
			mul_m4_m4m4(r_modelmat, r_modelmat, toeinmat);

			/* scale back to the original size */
			mul_m4_m4m4(r_modelmat, r_modelmat, sizemat);
		}
		else { /* CAM_S3D_PIVOT_LEFT, CAM_S3D_PIVOT_RIGHT */
			/* rotate perpendicular to the interocular line */
			normalize_m4_m4(r_modelmat, camera->obmat);
			mul_m4_m4m4(r_modelmat, r_modelmat, rotmat);

			/* translate along the interocular line */
			unit_m4(toeinmat);
			toeinmat[3][0] = -interocular_distance * fac_signed;
			mul_m4_m4m4(r_modelmat, r_modelmat, toeinmat);

			/* rotate to toe-in angle */
			mul_m4_m4m4(r_modelmat, r_modelmat, rotmat);

			/* scale back to the original size */
			mul_m4_m4m4(r_modelmat, r_modelmat, sizemat);
		}
	}
	else {
		normalize_m4_m4(r_modelmat, camera->obmat);

		/* translate - no rotation in CAM_S3D_OFFAXIS, CAM_S3D_PARALLEL */
		translate_m4(r_modelmat, -interocular_distance * fac_signed, 0.0f, 0.0f);

		/* scale back to the original size */
		mul_m4_m4m4(r_modelmat, r_modelmat, sizemat);
	}
}
コード例 #7
0
ファイル: MOD_array.c プロジェクト: Ichthyostega/blender
static DerivedMesh *arrayModifier_doArray(
        ArrayModifierData *amd,
        Scene *scene, Object *ob, DerivedMesh *dm,
        ModifierApplyFlag flag)
{
	const float eps = 1e-6f;
	const MVert *src_mvert;
	MVert *mv, *mv_prev, *result_dm_verts;

	MEdge *me;
	MLoop *ml;
	MPoly *mp;
	int i, j, c, count;
	float length = amd->length;
	/* offset matrix */
	float offset[4][4];
	float scale[3];
	bool offset_has_scale;
	float current_offset[4][4];
	float final_offset[4][4];
	int *full_doubles_map = NULL;
	int tot_doubles;

	const bool use_merge = (amd->flags & MOD_ARR_MERGE) != 0;
	const bool use_recalc_normals = (dm->dirty & DM_DIRTY_NORMALS) || use_merge;
	const bool use_offset_ob = ((amd->offset_type & MOD_ARR_OFF_OBJ) && amd->offset_ob);

	int start_cap_nverts = 0, start_cap_nedges = 0, start_cap_npolys = 0, start_cap_nloops = 0;
	int end_cap_nverts = 0, end_cap_nedges = 0, end_cap_npolys = 0, end_cap_nloops = 0;
	int result_nverts = 0, result_nedges = 0, result_npolys = 0, result_nloops = 0;
	int chunk_nverts, chunk_nedges, chunk_nloops, chunk_npolys;
	int first_chunk_start, first_chunk_nverts, last_chunk_start, last_chunk_nverts;

	DerivedMesh *result, *start_cap_dm = NULL, *end_cap_dm = NULL;

	int *vgroup_start_cap_remap = NULL;
	int vgroup_start_cap_remap_len = 0;
	int *vgroup_end_cap_remap = NULL;
	int vgroup_end_cap_remap_len = 0;

	chunk_nverts = dm->getNumVerts(dm);
	chunk_nedges = dm->getNumEdges(dm);
	chunk_nloops = dm->getNumLoops(dm);
	chunk_npolys = dm->getNumPolys(dm);

	count = amd->count;

	if (amd->start_cap && amd->start_cap != ob && amd->start_cap->type == OB_MESH) {
		vgroup_start_cap_remap = BKE_object_defgroup_index_map_create(amd->start_cap, ob, &vgroup_start_cap_remap_len);

		start_cap_dm = get_dm_for_modifier(amd->start_cap, flag);
		if (start_cap_dm) {
			start_cap_nverts = start_cap_dm->getNumVerts(start_cap_dm);
			start_cap_nedges = start_cap_dm->getNumEdges(start_cap_dm);
			start_cap_nloops = start_cap_dm->getNumLoops(start_cap_dm);
			start_cap_npolys = start_cap_dm->getNumPolys(start_cap_dm);
		}
	}
	if (amd->end_cap && amd->end_cap != ob && amd->end_cap->type == OB_MESH) {
		vgroup_end_cap_remap = BKE_object_defgroup_index_map_create(amd->end_cap, ob, &vgroup_end_cap_remap_len);

		end_cap_dm = get_dm_for_modifier(amd->end_cap, flag);
		if (end_cap_dm) {
			end_cap_nverts = end_cap_dm->getNumVerts(end_cap_dm);
			end_cap_nedges = end_cap_dm->getNumEdges(end_cap_dm);
			end_cap_nloops = end_cap_dm->getNumLoops(end_cap_dm);
			end_cap_npolys = end_cap_dm->getNumPolys(end_cap_dm);
		}
	}

	/* Build up offset array, cumulating all settings options */

	unit_m4(offset);
	src_mvert = dm->getVertArray(dm);

	if (amd->offset_type & MOD_ARR_OFF_CONST) {
		add_v3_v3(offset[3], amd->offset);
	}

	if (amd->offset_type & MOD_ARR_OFF_RELATIVE) {
		float min[3], max[3];
		const MVert *src_mv;

		INIT_MINMAX(min, max);
		for (src_mv = src_mvert, j = chunk_nverts; j--; src_mv++) {
			minmax_v3v3_v3(min, max, src_mv->co);
		}

		for (j = 3; j--; ) {
			offset[3][j] += amd->scale[j] * (max[j] - min[j]);
		}
	}

	if (use_offset_ob) {
		float obinv[4][4];
		float result_mat[4][4];

		if (ob)
			invert_m4_m4(obinv, ob->obmat);
		else
			unit_m4(obinv);

		mul_m4_series(result_mat, offset,
		              obinv, amd->offset_ob->obmat);
		copy_m4_m4(offset, result_mat);
	}

	/* Check if there is some scaling.  If scaling, then we will not translate mapping */
	mat4_to_size(scale, offset);
	offset_has_scale = !is_one_v3(scale);

	if (amd->fit_type == MOD_ARR_FITCURVE && amd->curve_ob) {
		Curve *cu = amd->curve_ob->data;
		if (cu) {
#ifdef CYCLIC_DEPENDENCY_WORKAROUND
			if (amd->curve_ob->curve_cache == NULL) {
				BKE_displist_make_curveTypes(scene, amd->curve_ob, false);
			}
#endif

			if (amd->curve_ob->curve_cache && amd->curve_ob->curve_cache->path) {
				float scale_fac = mat4_to_scale(amd->curve_ob->obmat);
				length = scale_fac * amd->curve_ob->curve_cache->path->totdist;
			}
		}
	}

	/* calculate the maximum number of copies which will fit within the
	 * prescribed length */
	if (amd->fit_type == MOD_ARR_FITLENGTH || amd->fit_type == MOD_ARR_FITCURVE) {
		float dist = len_v3(offset[3]);

		if (dist > eps) {
			/* this gives length = first copy start to last copy end
			 * add a tiny offset for floating point rounding errors */
			count = (length + eps) / dist + 1;
		}
		else {
			/* if the offset has no translation, just make one copy */
			count = 1;
		}
	}

	if (count < 1)
		count = 1;

	/* The number of verts, edges, loops, polys, before eventually merging doubles */
	result_nverts = chunk_nverts * count + start_cap_nverts + end_cap_nverts;
	result_nedges = chunk_nedges * count + start_cap_nedges + end_cap_nedges;
	result_nloops = chunk_nloops * count + start_cap_nloops + end_cap_nloops;
	result_npolys = chunk_npolys * count + start_cap_npolys + end_cap_npolys;

	/* Initialize a result dm */
	result = CDDM_from_template(dm, result_nverts, result_nedges, 0, result_nloops, result_npolys);
	result_dm_verts = CDDM_get_verts(result);

	if (use_merge) {
		/* Will need full_doubles_map for handling merge */
		full_doubles_map = MEM_malloc_arrayN(result_nverts, sizeof(int), "mod array doubles map");
		copy_vn_i(full_doubles_map, result_nverts, -1);
	}

	/* copy customdata to original geometry */
	DM_copy_vert_data(dm, result, 0, 0, chunk_nverts);
	DM_copy_edge_data(dm, result, 0, 0, chunk_nedges);
	DM_copy_loop_data(dm, result, 0, 0, chunk_nloops);
	DM_copy_poly_data(dm, result, 0, 0, chunk_npolys);

	/* Subsurf for eg won't have mesh data in the custom data arrays.
	 * now add mvert/medge/mpoly layers. */

	if (!CustomData_has_layer(&dm->vertData, CD_MVERT)) {
		dm->copyVertArray(dm, result_dm_verts);
	}
	if (!CustomData_has_layer(&dm->edgeData, CD_MEDGE)) {
		dm->copyEdgeArray(dm, CDDM_get_edges(result));
	}
	if (!CustomData_has_layer(&dm->polyData, CD_MPOLY)) {
		dm->copyLoopArray(dm, CDDM_get_loops(result));
		dm->copyPolyArray(dm, CDDM_get_polys(result));
	}

	/* Remember first chunk, in case of cap merge */
	first_chunk_start = 0;
	first_chunk_nverts = chunk_nverts;

	unit_m4(current_offset);
	for (c = 1; c < count; c++) {
		/* copy customdata to new geometry */
		DM_copy_vert_data(result, result, 0, c * chunk_nverts, chunk_nverts);
		DM_copy_edge_data(result, result, 0, c * chunk_nedges, chunk_nedges);
		DM_copy_loop_data(result, result, 0, c * chunk_nloops, chunk_nloops);
		DM_copy_poly_data(result, result, 0, c * chunk_npolys, chunk_npolys);

		mv_prev = result_dm_verts;
		mv = mv_prev + c * chunk_nverts;

		/* recalculate cumulative offset here */
		mul_m4_m4m4(current_offset, current_offset, offset);

		/* apply offset to all new verts */
		for (i = 0; i < chunk_nverts; i++, mv++, mv_prev++) {
			mul_m4_v3(current_offset, mv->co);

			/* We have to correct normals too, if we do not tag them as dirty! */
			if (!use_recalc_normals) {
				float no[3];
				normal_short_to_float_v3(no, mv->no);
				mul_mat3_m4_v3(current_offset, no);
				normalize_v3(no);
				normal_float_to_short_v3(mv->no, no);
			}
		}

		/* adjust edge vertex indices */
		me = CDDM_get_edges(result) + c * chunk_nedges;
		for (i = 0; i < chunk_nedges; i++, me++) {
			me->v1 += c * chunk_nverts;
			me->v2 += c * chunk_nverts;
		}

		mp = CDDM_get_polys(result) + c * chunk_npolys;
		for (i = 0; i < chunk_npolys; i++, mp++) {
			mp->loopstart += c * chunk_nloops;
		}

		/* adjust loop vertex and edge indices */
		ml = CDDM_get_loops(result) + c * chunk_nloops;
		for (i = 0; i < chunk_nloops; i++, ml++) {
			ml->v += c * chunk_nverts;
			ml->e += c * chunk_nedges;
		}

		/* Handle merge between chunk n and n-1 */
		if (use_merge && (c >= 1)) {
			if (!offset_has_scale && (c >= 2)) {
				/* Mapping chunk 3 to chunk 2 is a translation of mapping 2 to 1
				 * ... that is except if scaling makes the distance grow */
				int k;
				int this_chunk_index = c * chunk_nverts;
				int prev_chunk_index = (c - 1) * chunk_nverts;
				for (k = 0; k < chunk_nverts; k++, this_chunk_index++, prev_chunk_index++) {
					int target = full_doubles_map[prev_chunk_index];
					if (target != -1) {
						target += chunk_nverts; /* translate mapping */
						while (target != -1 && !ELEM(full_doubles_map[target], -1, target)) {
							/* If target is already mapped, we only follow that mapping if final target remains
							 * close enough from current vert (otherwise no mapping at all). */
							if (compare_len_v3v3(result_dm_verts[this_chunk_index].co,
							                     result_dm_verts[full_doubles_map[target]].co,
							                     amd->merge_dist))
							{
								target = full_doubles_map[target];
							}
							else {
								target = -1;
							}
						}
					}
					full_doubles_map[this_chunk_index] = target;
				}
			}
			else {
				dm_mvert_map_doubles(
				        full_doubles_map,
				        result_dm_verts,
				        (c - 1) * chunk_nverts,
				        chunk_nverts,
				        c * chunk_nverts,
				        chunk_nverts,
				        amd->merge_dist);
			}
		}
	}

	/* handle UVs */
	if (chunk_nloops > 0 && is_zero_v2(amd->uv_offset) == false) {
		const int totuv = CustomData_number_of_layers(&result->loopData, CD_MLOOPUV);
		for (i = 0; i < totuv; i++) {
			MLoopUV *dmloopuv = CustomData_get_layer_n(&result->loopData, CD_MLOOPUV, i);
			dmloopuv += chunk_nloops;
			for (c = 1; c < count; c++) {
				const float uv_offset[2] = {
					amd->uv_offset[0] * (float)c,
					amd->uv_offset[1] * (float)c,
				};
				int l_index = chunk_nloops;
				for (; l_index-- != 0; dmloopuv++) {
					dmloopuv->uv[0] += uv_offset[0];
					dmloopuv->uv[1] += uv_offset[1];
				}
			}
		}
	}

	last_chunk_start = (count - 1) * chunk_nverts;
	last_chunk_nverts = chunk_nverts;

	copy_m4_m4(final_offset, current_offset);

	if (use_merge && (amd->flags & MOD_ARR_MERGEFINAL) && (count > 1)) {
		/* Merge first and last copies */
		dm_mvert_map_doubles(
		        full_doubles_map,
		        result_dm_verts,
		        last_chunk_start,
		        last_chunk_nverts,
		        first_chunk_start,
		        first_chunk_nverts,
		        amd->merge_dist);
	}

	/* start capping */
	if (start_cap_dm) {
		float start_offset[4][4];
		int start_cap_start = result_nverts - start_cap_nverts - end_cap_nverts;
		invert_m4_m4(start_offset, offset);
		dm_merge_transform(
		        result, start_cap_dm, start_offset,
		        result_nverts - start_cap_nverts - end_cap_nverts,
		        result_nedges - start_cap_nedges - end_cap_nedges,
		        result_nloops - start_cap_nloops - end_cap_nloops,
		        result_npolys - start_cap_npolys - end_cap_npolys,
		        start_cap_nverts, start_cap_nedges, start_cap_nloops, start_cap_npolys,
		        vgroup_start_cap_remap, vgroup_start_cap_remap_len);
		/* Identify doubles with first chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        first_chunk_start,
			        first_chunk_nverts,
			        start_cap_start,
			        start_cap_nverts,
			        amd->merge_dist);
		}
	}

	if (end_cap_dm) {
		float end_offset[4][4];
		int end_cap_start = result_nverts - end_cap_nverts;
		mul_m4_m4m4(end_offset, current_offset, offset);
		dm_merge_transform(
		        result, end_cap_dm, end_offset,
		        result_nverts - end_cap_nverts,
		        result_nedges - end_cap_nedges,
		        result_nloops - end_cap_nloops,
		        result_npolys - end_cap_npolys,
		        end_cap_nverts, end_cap_nedges, end_cap_nloops, end_cap_npolys,
		        vgroup_end_cap_remap, vgroup_end_cap_remap_len);
		/* Identify doubles with last chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        last_chunk_start,
			        last_chunk_nverts,
			        end_cap_start,
			        end_cap_nverts,
			        amd->merge_dist);
		}
	}
	/* done capping */

	/* Handle merging */
	tot_doubles = 0;
	if (use_merge) {
		for (i = 0; i < result_nverts; i++) {
			int new_i = full_doubles_map[i];
			if (new_i != -1) {
				/* We have to follow chains of doubles (merge start/end especially is likely to create some),
				 * those are not supported at all by CDDM_merge_verts! */
				while (!ELEM(full_doubles_map[new_i], -1, new_i)) {
					new_i = full_doubles_map[new_i];
				}
				if (i == new_i) {
					full_doubles_map[i] = -1;
				}
				else {
					full_doubles_map[i] = new_i;
					tot_doubles++;
				}
			}
		}
		if (tot_doubles > 0) {
			result = CDDM_merge_verts(result, full_doubles_map, tot_doubles, CDDM_MERGE_VERTS_DUMP_IF_EQUAL);
		}
		MEM_freeN(full_doubles_map);
	}

	/* In case org dm has dirty normals, or we made some merging, mark normals as dirty in new dm!
	 * TODO: we may need to set other dirty flags as well?
	 */
	if (use_recalc_normals) {
		result->dirty |= DM_DIRTY_NORMALS;
	}

	if (vgroup_start_cap_remap) {
		MEM_freeN(vgroup_start_cap_remap);
	}
	if (vgroup_end_cap_remap) {
		MEM_freeN(vgroup_end_cap_remap);
	}

	return result;
}