Пример #1
0
bool BKE_mball_minmax_ex(MetaBall *mb, float min[3], float max[3],
                         float obmat[4][4], const short flag)
{
    const float scale = obmat ? mat4_to_scale(obmat) : 1.0f;
    MetaElem *ml;
    bool changed = false;
    float centroid[3], vec[3];

    INIT_MINMAX(min, max);

    for (ml = mb->elems.first; ml; ml = ml->next) {
        if ((ml->flag & flag) == flag) {
            const float scale_mb = (ml->rad * 0.5f) * scale;
            int i;

            if (obmat) {
                mul_v3_m4v3(centroid, obmat, &ml->x);
            }
            else {
                copy_v3_v3(centroid, &ml->x);
            }

            /* TODO, non circle shapes cubes etc, probably nobody notices - campbell */
            for (i = -1; i != 3; i += 2) {
                copy_v3_v3(vec, centroid);
                add_v3_fl(vec, scale_mb * i);
                minmax_v3v3_v3(min, max, vec);
            }
            changed = true;
        }
    }

    return changed;
}
Пример #2
0
void ED_armature_transform_bones(struct bArmature *arm, float mat[4][4])
{
	EditBone *ebone;
	float scale = mat4_to_scale(mat);   /* store the scale of the matrix here to use on envelopes */
	float mat3[3][3];

	copy_m3_m4(mat3, mat);
	normalize_m3(mat3);
	/* Do the rotations */
	for (ebone = arm->edbo->first; ebone; ebone = ebone->next) {
		float tmat[3][3];
		
		/* find the current bone's roll matrix */
		ED_armature_ebone_to_mat3(ebone, tmat);
		
		/* transform the roll matrix */
		mul_m3_m3m3(tmat, mat3, tmat);
		
		/* transform the bone */
		mul_m4_v3(mat, ebone->head);
		mul_m4_v3(mat, ebone->tail);

		/* apply the transformed roll back */
		mat3_to_vec_roll(tmat, NULL, &ebone->roll);
		
		ebone->rad_head *= scale;
		ebone->rad_tail *= scale;
		ebone->dist     *= scale;
		
		/* we could be smarter and scale by the matrix along the x & z axis */
		ebone->xwidth   *= scale;
		ebone->zwidth   *= scale;
	}
}
Пример #3
0
/*
 * This function raycast a single vertex and updates the hit if the "hit" is considered valid.
 * Returns TRUE if "hit" was updated.
 * Opts control whether an hit is valid or not
 * Supported options are:
 *	MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE (front faces hits are ignored)
 *	MOD_SHRINKWRAP_CULL_TARGET_BACKFACE (back faces hits are ignored)
 */
int normal_projection_project_vertex(char options, const float *vert, const float *dir, const SpaceTransform *transf, BVHTree *tree, BVHTreeRayHit *hit, BVHTree_RayCastCallback callback, void *userdata)
{
	float tmp_co[3], tmp_no[3];
	const float *co, *no;
	BVHTreeRayHit hit_tmp;

	//Copy from hit (we need to convert hit rays from one space coordinates to the other
	memcpy(&hit_tmp, hit, sizeof(hit_tmp));

	//Apply space transform (TODO readjust dist)
	if (transf) {
		copy_v3_v3(tmp_co, vert);
		space_transform_apply(transf, tmp_co);
		co = tmp_co;

		copy_v3_v3(tmp_no, dir);
		space_transform_apply_normal(transf, tmp_no);
		no = tmp_no;

		hit_tmp.dist *= mat4_to_scale(((SpaceTransform*)transf)->local2target);
	}
	else {
		co = vert;
		no = dir;
	}

	hit_tmp.index = -1;

	BLI_bvhtree_ray_cast(tree, co, no, 0.0f, &hit_tmp, callback, userdata);

	if (hit_tmp.index != -1) {
		/* invert the normal first so face culling works on rotated objects */
		if (transf) {
			space_transform_invert_normal(transf, hit_tmp.no);
		}

		if (options & (MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE|MOD_SHRINKWRAP_CULL_TARGET_BACKFACE)) {
			/* apply backface */
			const float dot= dot_v3v3(dir, hit_tmp.no);
			if (	((options & MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE) && dot <= 0.0f) ||
				((options & MOD_SHRINKWRAP_CULL_TARGET_BACKFACE) && dot >= 0.0f)
			) {
				return FALSE; /* Ignore hit */
			}
		}

		if (transf) {
			/* Inverting space transform (TODO make coeherent with the initial dist readjust) */
			space_transform_invert(transf, hit_tmp.co);
			hit_tmp.dist = len_v3v3((float *)vert, hit_tmp.co);
		}

		memcpy(hit, &hit_tmp, sizeof(hit_tmp));
		return TRUE;
	}
	return FALSE;
}
Пример #4
0
float paint_calc_object_space_radius(ViewContext *vc, const float center[3],
                                     float pixel_radius)
{
	Object *ob = vc->obact;
	float delta[3], scale, loc[3];
	const float mval_f[2] = {pixel_radius, 0.0f};
	float zfac;

	mul_v3_m4v3(loc, ob->obmat, center);

	zfac = ED_view3d_calc_zfac(vc->rv3d, loc, NULL);
	ED_view3d_win_to_delta(vc->ar, mval_f, delta, zfac);

	scale = fabsf(mat4_to_scale(ob->obmat));
	scale = (scale == 0.0f) ? 1.0f : scale;

	return len_v3(delta) / scale;
}
Пример #5
0
void ED_armature_apply_transform(Object *ob, float mat[4][4])
{
	EditBone *ebone;
	bArmature *arm = ob->data;
	float scale = mat4_to_scale(mat);   /* store the scale of the matrix here to use on envelopes */
	float mat3[3][3];
	
	copy_m3_m4(mat3, mat);
	normalize_m3(mat3);
	
	/* Put the armature into editmode */
	ED_armature_to_edit(ob);
	
	/* Do the rotations */
	for (ebone = arm->edbo->first; ebone; ebone = ebone->next) {
		float delta[3], tmat[3][3];
		
		/* find the current bone's roll matrix */
		sub_v3_v3v3(delta, ebone->tail, ebone->head);
		vec_roll_to_mat3(delta, ebone->roll, tmat);
		
		/* transform the roll matrix */
		mul_m3_m3m3(tmat, mat3, tmat);
		
		/* transform the bone */
		mul_m4_v3(mat, ebone->head);
		mul_m4_v3(mat, ebone->tail);
		
		/* apply the transfiormed roll back */
		mat3_to_vec_roll(tmat, NULL, &ebone->roll);
		
		ebone->rad_head *= scale;
		ebone->rad_tail *= scale;
		ebone->dist     *= scale;
		
		/* we could be smarter and scale by the matrix along the x & z axis */
		ebone->xwidth   *= scale;
		ebone->zwidth   *= scale;
	}
	
	/* Turn the list into an armature */
	ED_armature_from_edit(ob);
	ED_armature_edit_free(ob);
}
Пример #6
0
float paint_calc_object_space_radius(ViewContext *vc, float center[3],
				     float pixel_radius)
{
	Object *ob = vc->obact;
	float delta[3], scale, loc[3];
	float mval_f[2];

	mul_v3_m4v3(loc, ob->obmat, center);

	initgrabz(vc->rv3d, loc[0], loc[1], loc[2]);

	mval_f[0]= pixel_radius;
	mval_f[1]= 0.0f;
	ED_view3d_win_to_delta(vc->ar, mval_f, delta);

	scale= fabsf(mat4_to_scale(ob->obmat));
	scale= (scale == 0.0f)? 1.0f: scale;

	return len_v3(delta)/scale;
}
Пример #7
0
void BKE_mball_transform(MetaBall *mb, float mat[4][4])
{
    MetaElem *me;
    float quat[4];
    const float scale = mat4_to_scale(mat);
    const float scale_sqrt = sqrtf(scale);

    mat4_to_quat(quat, mat);

    for (me = mb->elems.first; me; me = me->next) {
        mul_m4_v3(mat, &me->x);
        mul_qt_qtqt(me->quat, quat, me->quat);
        me->rad *= scale;
        /* hrmf, probably elems shouldn't be
         * treating scale differently - campbell */
        if (!MB_TYPE_SIZE_SQUARED(me->type)) {
            mul_v3_fl(&me->expx, scale);
        }
        else {
            mul_v3_fl(&me->expx, scale_sqrt);
        }
    }
}
Пример #8
0
/*
 * This function raycast a single vertex and updates the hit if the "hit" is considered valid.
 * Returns true if "hit" was updated.
 * Opts control whether an hit is valid or not
 * Supported options are:
 *	MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE (front faces hits are ignored)
 *	MOD_SHRINKWRAP_CULL_TARGET_BACKFACE (back faces hits are ignored)
 */
bool BKE_shrinkwrap_project_normal(
        char options, const float vert[3],
        const float dir[3], const SpaceTransform *transf,
        BVHTree *tree, BVHTreeRayHit *hit,
        BVHTree_RayCastCallback callback, void *userdata)
{
	/* don't use this because this dist value could be incompatible
	 * this value used by the callback for comparing prev/new dist values.
	 * also, at the moment there is no need to have a corrected 'dist' value */
// #define USE_DIST_CORRECT

	float tmp_co[3], tmp_no[3];
	const float *co, *no;
	BVHTreeRayHit hit_tmp;

	/* Copy from hit (we need to convert hit rays from one space coordinates to the other */
	memcpy(&hit_tmp, hit, sizeof(hit_tmp));

	/* Apply space transform (TODO readjust dist) */
	if (transf) {
		copy_v3_v3(tmp_co, vert);
		space_transform_apply(transf, tmp_co);
		co = tmp_co;

		copy_v3_v3(tmp_no, dir);
		space_transform_apply_normal(transf, tmp_no);
		no = tmp_no;

#ifdef USE_DIST_CORRECT
		hit_tmp.dist *= mat4_to_scale(((SpaceTransform *)transf)->local2target);
#endif
	}
	else {
		co = vert;
		no = dir;
	}

	hit_tmp.index = -1;

	BLI_bvhtree_ray_cast(tree, co, no, 0.0f, &hit_tmp, callback, userdata);

	if (hit_tmp.index != -1) {
		/* invert the normal first so face culling works on rotated objects */
		if (transf) {
			space_transform_invert_normal(transf, hit_tmp.no);
		}

		if (options & (MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE | MOD_SHRINKWRAP_CULL_TARGET_BACKFACE)) {
			/* apply backface */
			const float dot = dot_v3v3(dir, hit_tmp.no);
			if (((options & MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE) && dot <= 0.0f) ||
			    ((options & MOD_SHRINKWRAP_CULL_TARGET_BACKFACE)  && dot >= 0.0f))
			{
				return false;  /* Ignore hit */
			}
		}

		if (transf) {
			/* Inverting space transform (TODO make coeherent with the initial dist readjust) */
			space_transform_invert(transf, hit_tmp.co);
#ifdef USE_DIST_CORRECT
			hit_tmp.dist = len_v3v3(vert, hit_tmp.co);
#endif
		}

		BLI_assert(hit_tmp.dist <= hit->dist);

		memcpy(hit, &hit_tmp, sizeof(hit_tmp));
		return true;
	}
	return false;
}
static void add_verts_to_dgroups(ReportList *reports, Scene *scene, Object *ob, Object *par,
                                 int heat, const bool mirror)
{
	/* This functions implements the automatic computation of vertex group
	 * weights, either through envelopes or using a heat equilibrium.
	 *
	 * This function can be called both when parenting a mesh to an armature,
	 * or in weightpaint + posemode. In the latter case selection is taken
	 * into account and vertex weights can be mirrored.
	 *
	 * The mesh vertex positions used are either the final deformed coords
	 * from the derivedmesh in weightpaint mode, the final subsurf coords
	 * when parenting, or simply the original mesh coords.
	 */

	bArmature *arm = par->data;
	Bone **bonelist, *bone;
	bDeformGroup **dgrouplist, **dgroupflip;
	bDeformGroup *dgroup;
	bPoseChannel *pchan;
	Mesh *mesh;
	Mat4 bbone_array[MAX_BBONE_SUBDIV], *bbone = NULL;
	float (*root)[3], (*tip)[3], (*verts)[3];
	int *selected;
	int numbones, vertsfilled = 0, i, j, segments = 0;
	int wpmode = (ob->mode & OB_MODE_WEIGHT_PAINT);
	struct { Object *armob; void *list; int heat; } looper_data;

	looper_data.armob = par;
	looper_data.heat = heat;
	looper_data.list = NULL;

	/* count the number of skinnable bones */
	numbones = bone_looper(ob, arm->bonebase.first, &looper_data, bone_skinnable_cb);
	
	if (numbones == 0)
		return;
	
	if (BKE_object_defgroup_data_create(ob->data) == NULL)
		return;

	/* create an array of pointer to bones that are skinnable
	 * and fill it with all of the skinnable bones */
	bonelist = MEM_callocN(numbones * sizeof(Bone *), "bonelist");
	looper_data.list = bonelist;
	bone_looper(ob, arm->bonebase.first, &looper_data, bone_skinnable_cb);

	/* create an array of pointers to the deform groups that
	 * correspond to the skinnable bones (creating them
	 * as necessary. */
	dgrouplist = MEM_callocN(numbones * sizeof(bDeformGroup *), "dgrouplist");
	dgroupflip = MEM_callocN(numbones * sizeof(bDeformGroup *), "dgroupflip");

	looper_data.list = dgrouplist;
	bone_looper(ob, arm->bonebase.first, &looper_data, dgroup_skinnable_cb);

	/* create an array of root and tip positions transformed into
	 * global coords */
	root = MEM_callocN(numbones * sizeof(float) * 3, "root");
	tip = MEM_callocN(numbones * sizeof(float) * 3, "tip");
	selected = MEM_callocN(numbones * sizeof(int), "selected");

	for (j = 0; j < numbones; ++j) {
		bone = bonelist[j];
		dgroup = dgrouplist[j];
		
		/* handle bbone */
		if (heat) {
			if (segments == 0) {
				segments = 1;
				bbone = NULL;
				
				if ((par->pose) && (pchan = BKE_pose_channel_find_name(par->pose, bone->name))) {
					if (bone->segments > 1) {
						segments = bone->segments;
						b_bone_spline_setup(pchan, 1, bbone_array);
						bbone = bbone_array;
					}
				}
			}
			
			segments--;
		}
		
		/* compute root and tip */
		if (bbone) {
			mul_v3_m4v3(root[j], bone->arm_mat, bbone[segments].mat[3]);
			if ((segments + 1) < bone->segments) {
				mul_v3_m4v3(tip[j], bone->arm_mat, bbone[segments + 1].mat[3]);
			}
			else {
				copy_v3_v3(tip[j], bone->arm_tail);
			}
		}
		else {
			copy_v3_v3(root[j], bone->arm_head);
			copy_v3_v3(tip[j], bone->arm_tail);
		}
		
		mul_m4_v3(par->obmat, root[j]);
		mul_m4_v3(par->obmat, tip[j]);
		
		/* set selected */
		if (wpmode) {
			if ((arm->layer & bone->layer) && (bone->flag & BONE_SELECTED))
				selected[j] = 1;
		}
		else
			selected[j] = 1;
		
		/* find flipped group */
		if (dgroup && mirror) {
			char name_flip[MAXBONENAME];

			BKE_deform_flip_side_name(name_flip, dgroup->name, false);
			dgroupflip[j] = defgroup_find_name(ob, name_flip);
		}
	}

	/* create verts */
	mesh = (Mesh *)ob->data;
	verts = MEM_callocN(mesh->totvert * sizeof(*verts), "closestboneverts");

	if (wpmode) {
		/* if in weight paint mode, use final verts from derivedmesh */
		DerivedMesh *dm = mesh_get_derived_final(scene, ob, CD_MASK_BAREMESH);
		
		if (dm->foreachMappedVert) {
			mesh_get_mapped_verts_coords(dm, verts, mesh->totvert);
			vertsfilled = 1;
		}
		
		dm->release(dm);
	}
	else if (modifiers_findByType(ob, eModifierType_Subsurf)) {
		/* is subsurf on? Lets use the verts on the limit surface then.
		 * = same amount of vertices as mesh, but vertices  moved to the
		 * subsurfed position, like for 'optimal'. */
		subsurf_calculate_limit_positions(mesh, verts);
		vertsfilled = 1;
	}

	/* transform verts to global space */
	for (i = 0; i < mesh->totvert; i++) {
		if (!vertsfilled)
			copy_v3_v3(verts[i], mesh->mvert[i].co);
		mul_m4_v3(ob->obmat, verts[i]);
	}

	/* compute the weights based on gathered vertices and bones */
	if (heat) {
		const char *error = NULL;

		heat_bone_weighting(ob, mesh, verts, numbones, dgrouplist, dgroupflip,
		                    root, tip, selected, &error);
		if (error) {
			BKE_report(reports, RPT_WARNING, error);
		}
	}
	else {
		envelope_bone_weighting(ob, mesh, verts, numbones, bonelist, dgrouplist,
		                        dgroupflip, root, tip, selected, mat4_to_scale(par->obmat));
	}

	/* only generated in some cases but can call anyway */
	ED_mesh_mirror_spatial_table(ob, NULL, NULL, NULL, 'e');

	/* free the memory allocated */
	MEM_freeN(bonelist);
	MEM_freeN(dgrouplist);
	MEM_freeN(dgroupflip);
	MEM_freeN(root);
	MEM_freeN(tip);
	MEM_freeN(selected);
	MEM_freeN(verts);
}
Пример #10
0
static bool edbm_bevel_init(bContext *C, wmOperator *op, const bool is_modal)
{
  Scene *scene = CTX_data_scene(C);
  BevelData *opdata;
  ViewLayer *view_layer = CTX_data_view_layer(C);
  float pixels_per_inch;
  int i, otype;

  if (is_modal) {
    RNA_float_set(op->ptr, "offset", 0.0f);
    RNA_float_set(op->ptr, "offset_pct", 0.0f);
  }

  op->customdata = opdata = MEM_mallocN(sizeof(BevelData), "beveldata_mesh_operator");
  uint objects_used_len = 0;
  opdata->max_obj_scale = FLT_MIN;

  {
    uint ob_store_len = 0;
    Object **objects = BKE_view_layer_array_from_objects_in_edit_mode_unique_data(
        view_layer, CTX_wm_view3d(C), &ob_store_len);
    opdata->ob_store = MEM_malloc_arrayN(ob_store_len, sizeof(*opdata->ob_store), __func__);
    for (uint ob_index = 0; ob_index < ob_store_len; ob_index++) {
      Object *obedit = objects[ob_index];
      float scale = mat4_to_scale(obedit->obmat);
      opdata->max_obj_scale = max_ff(opdata->max_obj_scale, scale);
      BMEditMesh *em = BKE_editmesh_from_object(obedit);
      if (em->bm->totvertsel > 0) {
        opdata->ob_store[objects_used_len].em = em;
        objects_used_len++;
      }
    }
    MEM_freeN(objects);
    opdata->ob_store_len = objects_used_len;
  }

  opdata->is_modal = is_modal;
  otype = RNA_enum_get(op->ptr, "offset_type");
  opdata->value_mode = (otype == BEVEL_AMT_PERCENT) ? OFFSET_VALUE_PERCENT : OFFSET_VALUE;
  opdata->segments = (float)RNA_int_get(op->ptr, "segments");
  pixels_per_inch = U.dpi * U.pixelsize;

  for (i = 0; i < NUM_VALUE_KINDS; i++) {
    opdata->shift_value[i] = -1.0f;
    opdata->initial_length[i] = -1.0f;
    /* note: scale for OFFSET_VALUE will get overwritten in edbm_bevel_invoke */
    opdata->scale[i] = value_scale_per_inch[i] / pixels_per_inch;

    initNumInput(&opdata->num_input[i]);
    opdata->num_input[i].idx_max = 0;
    opdata->num_input[i].val_flag[0] |= NUM_NO_NEGATIVE;
    if (i == SEGMENTS_VALUE) {
      opdata->num_input[i].val_flag[0] |= NUM_NO_FRACTION | NUM_NO_ZERO;
    }
    if (i == OFFSET_VALUE) {
      opdata->num_input[i].unit_sys = scene->unit.system;
    }
    /* Not sure this is a factor or a unit? */
    opdata->num_input[i].unit_type[0] = B_UNIT_NONE;
  }

  /* avoid the cost of allocating a bm copy */
  if (is_modal) {
    View3D *v3d = CTX_wm_view3d(C);
    ARegion *ar = CTX_wm_region(C);

    for (uint ob_index = 0; ob_index < opdata->ob_store_len; ob_index++) {
      opdata->ob_store[ob_index].mesh_backup = EDBM_redo_state_store(
          opdata->ob_store[ob_index].em);
    }
    opdata->draw_handle_pixel = ED_region_draw_cb_activate(
        ar->type, ED_region_draw_mouse_line_cb, opdata->mcenter, REGION_DRAW_POST_PIXEL);
    G.moving = G_TRANSFORM_EDIT;

    if (v3d) {
      opdata->gizmo_flag = v3d->gizmo_flag;
      v3d->gizmo_flag = V3D_GIZMO_HIDE;
    }
  }

  return true;
}
Пример #11
0
static DerivedMesh *arrayModifier_doArray(
        ArrayModifierData *amd,
        Scene *scene, Object *ob, DerivedMesh *dm,
        ModifierApplyFlag flag)
{
	const float eps = 1e-6f;
	const MVert *src_mvert;
	MVert *mv, *mv_prev, *result_dm_verts;

	MEdge *me;
	MLoop *ml;
	MPoly *mp;
	int i, j, c, count;
	float length = amd->length;
	/* offset matrix */
	float offset[4][4];
	float scale[3];
	bool offset_has_scale;
	float current_offset[4][4];
	float final_offset[4][4];
	int *full_doubles_map = NULL;
	int tot_doubles;

	const bool use_merge = (amd->flags & MOD_ARR_MERGE) != 0;
	const bool use_recalc_normals = (dm->dirty & DM_DIRTY_NORMALS) || use_merge;
	const bool use_offset_ob = ((amd->offset_type & MOD_ARR_OFF_OBJ) && amd->offset_ob);
	/* allow pole vertices to be used by many faces */
	const bool with_follow = use_offset_ob;

	int start_cap_nverts = 0, start_cap_nedges = 0, start_cap_npolys = 0, start_cap_nloops = 0;
	int end_cap_nverts = 0, end_cap_nedges = 0, end_cap_npolys = 0, end_cap_nloops = 0;
	int result_nverts = 0, result_nedges = 0, result_npolys = 0, result_nloops = 0;
	int chunk_nverts, chunk_nedges, chunk_nloops, chunk_npolys;
	int first_chunk_start, first_chunk_nverts, last_chunk_start, last_chunk_nverts;

	DerivedMesh *result, *start_cap_dm = NULL, *end_cap_dm = NULL;

	chunk_nverts = dm->getNumVerts(dm);
	chunk_nedges = dm->getNumEdges(dm);
	chunk_nloops = dm->getNumLoops(dm);
	chunk_npolys = dm->getNumPolys(dm);

	count = amd->count;

	if (amd->start_cap && amd->start_cap != ob && amd->start_cap->type == OB_MESH) {
		start_cap_dm = get_dm_for_modifier(amd->start_cap, flag);
		if (start_cap_dm) {
			start_cap_nverts = start_cap_dm->getNumVerts(start_cap_dm);
			start_cap_nedges = start_cap_dm->getNumEdges(start_cap_dm);
			start_cap_nloops = start_cap_dm->getNumLoops(start_cap_dm);
			start_cap_npolys = start_cap_dm->getNumPolys(start_cap_dm);
		}
	}
	if (amd->end_cap && amd->end_cap != ob && amd->end_cap->type == OB_MESH) {
		end_cap_dm = get_dm_for_modifier(amd->end_cap, flag);
		if (end_cap_dm) {
			end_cap_nverts = end_cap_dm->getNumVerts(end_cap_dm);
			end_cap_nedges = end_cap_dm->getNumEdges(end_cap_dm);
			end_cap_nloops = end_cap_dm->getNumLoops(end_cap_dm);
			end_cap_npolys = end_cap_dm->getNumPolys(end_cap_dm);
		}
	}

	/* Build up offset array, cumulating all settings options */

	unit_m4(offset);
	src_mvert = dm->getVertArray(dm);

	if (amd->offset_type & MOD_ARR_OFF_CONST)
		add_v3_v3v3(offset[3], offset[3], amd->offset);

	if (amd->offset_type & MOD_ARR_OFF_RELATIVE) {
		for (j = 0; j < 3; j++)
			offset[3][j] += amd->scale[j] * vertarray_size(src_mvert, chunk_nverts, j);
	}

	if (use_offset_ob) {
		float obinv[4][4];
		float result_mat[4][4];

		if (ob)
			invert_m4_m4(obinv, ob->obmat);
		else
			unit_m4(obinv);

		mul_m4_series(result_mat, offset,
		              obinv, amd->offset_ob->obmat);
		copy_m4_m4(offset, result_mat);
	}

	/* Check if there is some scaling.  If scaling, then we will not translate mapping */
	mat4_to_size(scale, offset);
	offset_has_scale = !is_one_v3(scale);

	if (amd->fit_type == MOD_ARR_FITCURVE && amd->curve_ob) {
		Curve *cu = amd->curve_ob->data;
		if (cu) {
#ifdef CYCLIC_DEPENDENCY_WORKAROUND
			if (amd->curve_ob->curve_cache == NULL) {
				BKE_displist_make_curveTypes(scene, amd->curve_ob, false);
			}
#endif

			if (amd->curve_ob->curve_cache && amd->curve_ob->curve_cache->path) {
				float scale = mat4_to_scale(amd->curve_ob->obmat);
				length = scale * amd->curve_ob->curve_cache->path->totdist;
			}
		}
	}

	/* calculate the maximum number of copies which will fit within the
	 * prescribed length */
	if (amd->fit_type == MOD_ARR_FITLENGTH || amd->fit_type == MOD_ARR_FITCURVE) {
		float dist = len_v3(offset[3]);

		if (dist > eps) {
			/* this gives length = first copy start to last copy end
			 * add a tiny offset for floating point rounding errors */
			count = (length + eps) / dist;
		}
		else {
			/* if the offset has no translation, just make one copy */
			count = 1;
		}
	}

	if (count < 1)
		count = 1;

	/* The number of verts, edges, loops, polys, before eventually merging doubles */
	result_nverts = chunk_nverts * count + start_cap_nverts + end_cap_nverts;
	result_nedges = chunk_nedges * count + start_cap_nedges + end_cap_nedges;
	result_nloops = chunk_nloops * count + start_cap_nloops + end_cap_nloops;
	result_npolys = chunk_npolys * count + start_cap_npolys + end_cap_npolys;

	/* Initialize a result dm */
	result = CDDM_from_template(dm, result_nverts, result_nedges, 0, result_nloops, result_npolys);
	result_dm_verts = CDDM_get_verts(result);

	if (use_merge) {
		/* Will need full_doubles_map for handling merge */
		full_doubles_map = MEM_mallocN(sizeof(int) * result_nverts, "mod array doubles map");
		fill_vn_i(full_doubles_map, result_nverts, -1);
	}

	/* copy customdata to original geometry */
	DM_copy_vert_data(dm, result, 0, 0, chunk_nverts);
	DM_copy_edge_data(dm, result, 0, 0, chunk_nedges);
	DM_copy_loop_data(dm, result, 0, 0, chunk_nloops);
	DM_copy_poly_data(dm, result, 0, 0, chunk_npolys);

	/* subsurf for eg wont have mesh data in the
	 * now add mvert/medge/mface layers */

	if (!CustomData_has_layer(&dm->vertData, CD_MVERT)) {
		dm->copyVertArray(dm, result_dm_verts);
	}
	if (!CustomData_has_layer(&dm->edgeData, CD_MEDGE)) {
		dm->copyEdgeArray(dm, CDDM_get_edges(result));
	}
	if (!CustomData_has_layer(&dm->polyData, CD_MPOLY)) {
		dm->copyLoopArray(dm, CDDM_get_loops(result));
		dm->copyPolyArray(dm, CDDM_get_polys(result));
	}

	/* Remember first chunk, in case of cap merge */
	first_chunk_start = 0;
	first_chunk_nverts = chunk_nverts;

	unit_m4(current_offset);
	for (c = 1; c < count; c++) {
		/* copy customdata to new geometry */
		DM_copy_vert_data(result, result, 0, c * chunk_nverts, chunk_nverts);
		DM_copy_edge_data(result, result, 0, c * chunk_nedges, chunk_nedges);
		DM_copy_loop_data(result, result, 0, c * chunk_nloops, chunk_nloops);
		DM_copy_poly_data(result, result, 0, c * chunk_npolys, chunk_npolys);

		mv_prev = result_dm_verts;
		mv = mv_prev + c * chunk_nverts;

		/* recalculate cumulative offset here */
		mul_m4_m4m4(current_offset, current_offset, offset);

		/* apply offset to all new verts */
		for (i = 0; i < chunk_nverts; i++, mv++, mv_prev++) {
			mul_m4_v3(current_offset, mv->co);

			/* We have to correct normals too, if we do not tag them as dirty! */
			if (!use_recalc_normals) {
				float no[3];
				normal_short_to_float_v3(no, mv->no);
				mul_mat3_m4_v3(current_offset, no);
				normalize_v3(no);
				normal_float_to_short_v3(mv->no, no);
			}
		}

		/* adjust edge vertex indices */
		me = CDDM_get_edges(result) + c * chunk_nedges;
		for (i = 0; i < chunk_nedges; i++, me++) {
			me->v1 += c * chunk_nverts;
			me->v2 += c * chunk_nverts;
		}

		mp = CDDM_get_polys(result) + c * chunk_npolys;
		for (i = 0; i < chunk_npolys; i++, mp++) {
			mp->loopstart += c * chunk_nloops;
		}

		/* adjust loop vertex and edge indices */
		ml = CDDM_get_loops(result) + c * chunk_nloops;
		for (i = 0; i < chunk_nloops; i++, ml++) {
			ml->v += c * chunk_nverts;
			ml->e += c * chunk_nedges;
		}

		/* Handle merge between chunk n and n-1 */
		if (use_merge && (c >= 1)) {
			if (!offset_has_scale && (c >= 2)) {
				/* Mapping chunk 3 to chunk 2 is a translation of mapping 2 to 1
				 * ... that is except if scaling makes the distance grow */
				int k;
				int this_chunk_index = c * chunk_nverts;
				int prev_chunk_index = (c - 1) * chunk_nverts;
				for (k = 0; k < chunk_nverts; k++, this_chunk_index++, prev_chunk_index++) {
					int target = full_doubles_map[prev_chunk_index];
					if (target != -1) {
						target += chunk_nverts; /* translate mapping */
						if (full_doubles_map[target] != -1) {
							if (with_follow) {
								target = full_doubles_map[target];
							}
							else {
								/* The rule here is to not follow mapping to chunk N-2, which could be too far
								 * so if target vertex was itself mapped, then this vertex is not mapped */
								target = -1;
							}
						}
					}
					full_doubles_map[this_chunk_index] = target;
				}
			}
			else {
				dm_mvert_map_doubles(
				        full_doubles_map,
				        result_dm_verts,
				        (c - 1) * chunk_nverts,
				        chunk_nverts,
				        c * chunk_nverts,
				        chunk_nverts,
				        amd->merge_dist,
				        with_follow);
			}
		}
	}

	last_chunk_start = (count - 1) * chunk_nverts;
	last_chunk_nverts = chunk_nverts;

	copy_m4_m4(final_offset, current_offset);

	if (use_merge && (amd->flags & MOD_ARR_MERGEFINAL) && (count > 1)) {
		/* Merge first and last copies */
		dm_mvert_map_doubles(
		        full_doubles_map,
		        result_dm_verts,
		        last_chunk_start,
		        last_chunk_nverts,
		        first_chunk_start,
		        first_chunk_nverts,
		        amd->merge_dist,
		        with_follow);
	}

	/* start capping */
	if (start_cap_dm) {
		float start_offset[4][4];
		int start_cap_start = result_nverts - start_cap_nverts - end_cap_nverts;
		invert_m4_m4(start_offset, offset);
		dm_merge_transform(
		        result, start_cap_dm, start_offset,
		        result_nverts - start_cap_nverts - end_cap_nverts,
		        result_nedges - start_cap_nedges - end_cap_nedges,
		        result_nloops - start_cap_nloops - end_cap_nloops,
		        result_npolys - start_cap_npolys - end_cap_npolys,
		        start_cap_nverts, start_cap_nedges, start_cap_nloops, start_cap_npolys);
		/* Identify doubles with first chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        first_chunk_start,
			        first_chunk_nverts,
			        start_cap_start,
			        start_cap_nverts,
			        amd->merge_dist,
			        false);
		}
	}

	if (end_cap_dm) {
		float end_offset[4][4];
		int end_cap_start = result_nverts - end_cap_nverts;
		mul_m4_m4m4(end_offset, current_offset, offset);
		dm_merge_transform(
		        result, end_cap_dm, end_offset,
		        result_nverts - end_cap_nverts,
		        result_nedges - end_cap_nedges,
		        result_nloops - end_cap_nloops,
		        result_npolys - end_cap_npolys,
		        end_cap_nverts, end_cap_nedges, end_cap_nloops, end_cap_npolys);
		/* Identify doubles with last chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        last_chunk_start,
			        last_chunk_nverts,
			        end_cap_start,
			        end_cap_nverts,
			        amd->merge_dist,
			        false);
		}
	}
	/* done capping */

	/* Handle merging */
	tot_doubles = 0;
	if (use_merge) {
		for (i = 0; i < result_nverts; i++) {
			if (full_doubles_map[i] != -1) {
				if (i == full_doubles_map[i]) {
					full_doubles_map[i] = -1;
				}
				else {
					tot_doubles++;
				}
			}
		}
		if (tot_doubles > 0) {
			result = CDDM_merge_verts(result, full_doubles_map, tot_doubles, CDDM_MERGE_VERTS_DUMP_IF_EQUAL);
		}
		MEM_freeN(full_doubles_map);
	}

	/* In case org dm has dirty normals, or we made some merging, mark normals as dirty in new dm!
	 * TODO: we may need to set other dirty flags as well?
	 */
	if (use_recalc_normals) {
		result->dirty |= DM_DIRTY_NORMALS;
	}

	return result;
}
Пример #12
0
/* add a gpencil object to cache to defer drawing */
tGPencilObjectCache *gpencil_object_cache_add(tGPencilObjectCache *cache_array,
                                              Object *ob,
                                              int *gp_cache_size,
                                              int *gp_cache_used)
{
  const DRWContextState *draw_ctx = DRW_context_state_get();
  tGPencilObjectCache *cache_elem = NULL;
  RegionView3D *rv3d = draw_ctx->rv3d;
  View3D *v3d = draw_ctx->v3d;
  tGPencilObjectCache *p = NULL;

  /* By default a cache is created with one block with a predefined number of free slots,
   * if the size is not enough, the cache is reallocated adding a new block of free slots.
   * This is done in order to keep cache small. */
  if (*gp_cache_used + 1 > *gp_cache_size) {
    if ((*gp_cache_size == 0) || (cache_array == NULL)) {
      p = MEM_callocN(sizeof(struct tGPencilObjectCache) * GP_CACHE_BLOCK_SIZE,
                      "tGPencilObjectCache");
      *gp_cache_size = GP_CACHE_BLOCK_SIZE;
    }
    else {
      *gp_cache_size += GP_CACHE_BLOCK_SIZE;
      p = MEM_recallocN(cache_array, sizeof(struct tGPencilObjectCache) * *gp_cache_size);
    }
    cache_array = p;
  }
  /* zero out all pointers */
  cache_elem = &cache_array[*gp_cache_used];
  memset(cache_elem, 0, sizeof(*cache_elem));

  cache_elem->ob = ob;
  cache_elem->gpd = (bGPdata *)ob->data;
  cache_elem->name = BKE_id_to_unique_string_key(&ob->id);

  copy_v3_v3(cache_elem->loc, ob->obmat[3]);
  copy_m4_m4(cache_elem->obmat, ob->obmat);
  cache_elem->idx = *gp_cache_used;

  /* object is duplicated (particle) */
  if (ob->base_flag & BASE_FROM_DUPLI) {
    /* Check if the original object is not in the viewlayer
     * and cannot be managed as dupli. This is slower, but required to keep
     * the particle drawing FPS and display instanced objects in scene
     * without the original object */
    bool has_original = gpencil_has_noninstanced_object(ob);
    cache_elem->is_dup_ob = (has_original) ? ob->base_flag & BASE_FROM_DUPLI : false;
  }
  else {
    cache_elem->is_dup_ob = false;
  }

  cache_elem->scale = mat4_to_scale(ob->obmat);

  /* save FXs */
  cache_elem->pixfactor = cache_elem->gpd->pixfactor;
  cache_elem->shader_fx = ob->shader_fx;

  /* save wire mode (object mode is always primary option) */
  if (ob->dt == OB_WIRE) {
    cache_elem->shading_type[0] = (int)OB_WIRE;
  }
  else {
    if (v3d) {
      cache_elem->shading_type[0] = (int)v3d->shading.type;
    }
  }

  /* shgrp array */
  cache_elem->tot_layers = 0;
  int totgpl = BLI_listbase_count(&cache_elem->gpd->layers);
  if (totgpl > 0) {
    cache_elem->shgrp_array = MEM_callocN(sizeof(tGPencilObjectCache_shgrp) * totgpl, __func__);
  }

  /* calculate zdepth from point of view */
  float zdepth = 0.0;
  if (rv3d) {
    if (rv3d->is_persp) {
      zdepth = ED_view3d_calc_zfac(rv3d, ob->obmat[3], NULL);
    }
    else {
      zdepth = -dot_v3v3(rv3d->viewinv[2], ob->obmat[3]);
    }
  }
  else {
    /* In render mode, rv3d is not available, so use the distance to camera.
     * The real distance is not important, but the relative distance to the camera plane
     * in order to sort by z_depth of the objects
     */
    float vn[3] = {0.0f, 0.0f, -1.0f}; /* always face down */
    float plane_cam[4];
    struct Object *camera = draw_ctx->scene->camera;
    if (camera) {
      mul_m4_v3(camera->obmat, vn);
      normalize_v3(vn);
      plane_from_point_normal_v3(plane_cam, camera->loc, vn);
      zdepth = dist_squared_to_plane_v3(ob->obmat[3], plane_cam);
    }
  }
  cache_elem->zdepth = zdepth;
  /* increase slots used in cache */
  (*gp_cache_used)++;

  return cache_array;
}
Пример #13
0
static DerivedMesh *arrayModifier_doArray(
        ArrayModifierData *amd,
        Scene *scene, Object *ob, DerivedMesh *dm,
        ModifierApplyFlag flag)
{
	const float eps = 1e-6f;
	const MVert *src_mvert;
	MVert *mv, *mv_prev, *result_dm_verts;

	MEdge *me;
	MLoop *ml;
	MPoly *mp;
	int i, j, c, count;
	float length = amd->length;
	/* offset matrix */
	float offset[4][4];
	float scale[3];
	bool offset_has_scale;
	float current_offset[4][4];
	float final_offset[4][4];
	int *full_doubles_map = NULL;
	int tot_doubles;

	const bool use_merge = (amd->flags & MOD_ARR_MERGE) != 0;
	const bool use_recalc_normals = (dm->dirty & DM_DIRTY_NORMALS) || use_merge;
	const bool use_offset_ob = ((amd->offset_type & MOD_ARR_OFF_OBJ) && amd->offset_ob);

	int start_cap_nverts = 0, start_cap_nedges = 0, start_cap_npolys = 0, start_cap_nloops = 0;
	int end_cap_nverts = 0, end_cap_nedges = 0, end_cap_npolys = 0, end_cap_nloops = 0;
	int result_nverts = 0, result_nedges = 0, result_npolys = 0, result_nloops = 0;
	int chunk_nverts, chunk_nedges, chunk_nloops, chunk_npolys;
	int first_chunk_start, first_chunk_nverts, last_chunk_start, last_chunk_nverts;

	DerivedMesh *result, *start_cap_dm = NULL, *end_cap_dm = NULL;

	int *vgroup_start_cap_remap = NULL;
	int vgroup_start_cap_remap_len = 0;
	int *vgroup_end_cap_remap = NULL;
	int vgroup_end_cap_remap_len = 0;

	chunk_nverts = dm->getNumVerts(dm);
	chunk_nedges = dm->getNumEdges(dm);
	chunk_nloops = dm->getNumLoops(dm);
	chunk_npolys = dm->getNumPolys(dm);

	count = amd->count;

	if (amd->start_cap && amd->start_cap != ob && amd->start_cap->type == OB_MESH) {
		vgroup_start_cap_remap = BKE_object_defgroup_index_map_create(amd->start_cap, ob, &vgroup_start_cap_remap_len);

		start_cap_dm = get_dm_for_modifier(amd->start_cap, flag);
		if (start_cap_dm) {
			start_cap_nverts = start_cap_dm->getNumVerts(start_cap_dm);
			start_cap_nedges = start_cap_dm->getNumEdges(start_cap_dm);
			start_cap_nloops = start_cap_dm->getNumLoops(start_cap_dm);
			start_cap_npolys = start_cap_dm->getNumPolys(start_cap_dm);
		}
	}
	if (amd->end_cap && amd->end_cap != ob && amd->end_cap->type == OB_MESH) {
		vgroup_end_cap_remap = BKE_object_defgroup_index_map_create(amd->end_cap, ob, &vgroup_end_cap_remap_len);

		end_cap_dm = get_dm_for_modifier(amd->end_cap, flag);
		if (end_cap_dm) {
			end_cap_nverts = end_cap_dm->getNumVerts(end_cap_dm);
			end_cap_nedges = end_cap_dm->getNumEdges(end_cap_dm);
			end_cap_nloops = end_cap_dm->getNumLoops(end_cap_dm);
			end_cap_npolys = end_cap_dm->getNumPolys(end_cap_dm);
		}
	}

	/* Build up offset array, cumulating all settings options */

	unit_m4(offset);
	src_mvert = dm->getVertArray(dm);

	if (amd->offset_type & MOD_ARR_OFF_CONST) {
		add_v3_v3(offset[3], amd->offset);
	}

	if (amd->offset_type & MOD_ARR_OFF_RELATIVE) {
		float min[3], max[3];
		const MVert *src_mv;

		INIT_MINMAX(min, max);
		for (src_mv = src_mvert, j = chunk_nverts; j--; src_mv++) {
			minmax_v3v3_v3(min, max, src_mv->co);
		}

		for (j = 3; j--; ) {
			offset[3][j] += amd->scale[j] * (max[j] - min[j]);
		}
	}

	if (use_offset_ob) {
		float obinv[4][4];
		float result_mat[4][4];

		if (ob)
			invert_m4_m4(obinv, ob->obmat);
		else
			unit_m4(obinv);

		mul_m4_series(result_mat, offset,
		              obinv, amd->offset_ob->obmat);
		copy_m4_m4(offset, result_mat);
	}

	/* Check if there is some scaling.  If scaling, then we will not translate mapping */
	mat4_to_size(scale, offset);
	offset_has_scale = !is_one_v3(scale);

	if (amd->fit_type == MOD_ARR_FITCURVE && amd->curve_ob) {
		Curve *cu = amd->curve_ob->data;
		if (cu) {
#ifdef CYCLIC_DEPENDENCY_WORKAROUND
			if (amd->curve_ob->curve_cache == NULL) {
				BKE_displist_make_curveTypes(scene, amd->curve_ob, false);
			}
#endif

			if (amd->curve_ob->curve_cache && amd->curve_ob->curve_cache->path) {
				float scale_fac = mat4_to_scale(amd->curve_ob->obmat);
				length = scale_fac * amd->curve_ob->curve_cache->path->totdist;
			}
		}
	}

	/* calculate the maximum number of copies which will fit within the
	 * prescribed length */
	if (amd->fit_type == MOD_ARR_FITLENGTH || amd->fit_type == MOD_ARR_FITCURVE) {
		float dist = len_v3(offset[3]);

		if (dist > eps) {
			/* this gives length = first copy start to last copy end
			 * add a tiny offset for floating point rounding errors */
			count = (length + eps) / dist + 1;
		}
		else {
			/* if the offset has no translation, just make one copy */
			count = 1;
		}
	}

	if (count < 1)
		count = 1;

	/* The number of verts, edges, loops, polys, before eventually merging doubles */
	result_nverts = chunk_nverts * count + start_cap_nverts + end_cap_nverts;
	result_nedges = chunk_nedges * count + start_cap_nedges + end_cap_nedges;
	result_nloops = chunk_nloops * count + start_cap_nloops + end_cap_nloops;
	result_npolys = chunk_npolys * count + start_cap_npolys + end_cap_npolys;

	/* Initialize a result dm */
	result = CDDM_from_template(dm, result_nverts, result_nedges, 0, result_nloops, result_npolys);
	result_dm_verts = CDDM_get_verts(result);

	if (use_merge) {
		/* Will need full_doubles_map for handling merge */
		full_doubles_map = MEM_malloc_arrayN(result_nverts, sizeof(int), "mod array doubles map");
		copy_vn_i(full_doubles_map, result_nverts, -1);
	}

	/* copy customdata to original geometry */
	DM_copy_vert_data(dm, result, 0, 0, chunk_nverts);
	DM_copy_edge_data(dm, result, 0, 0, chunk_nedges);
	DM_copy_loop_data(dm, result, 0, 0, chunk_nloops);
	DM_copy_poly_data(dm, result, 0, 0, chunk_npolys);

	/* Subsurf for eg won't have mesh data in the custom data arrays.
	 * now add mvert/medge/mpoly layers. */

	if (!CustomData_has_layer(&dm->vertData, CD_MVERT)) {
		dm->copyVertArray(dm, result_dm_verts);
	}
	if (!CustomData_has_layer(&dm->edgeData, CD_MEDGE)) {
		dm->copyEdgeArray(dm, CDDM_get_edges(result));
	}
	if (!CustomData_has_layer(&dm->polyData, CD_MPOLY)) {
		dm->copyLoopArray(dm, CDDM_get_loops(result));
		dm->copyPolyArray(dm, CDDM_get_polys(result));
	}

	/* Remember first chunk, in case of cap merge */
	first_chunk_start = 0;
	first_chunk_nverts = chunk_nverts;

	unit_m4(current_offset);
	for (c = 1; c < count; c++) {
		/* copy customdata to new geometry */
		DM_copy_vert_data(result, result, 0, c * chunk_nverts, chunk_nverts);
		DM_copy_edge_data(result, result, 0, c * chunk_nedges, chunk_nedges);
		DM_copy_loop_data(result, result, 0, c * chunk_nloops, chunk_nloops);
		DM_copy_poly_data(result, result, 0, c * chunk_npolys, chunk_npolys);

		mv_prev = result_dm_verts;
		mv = mv_prev + c * chunk_nverts;

		/* recalculate cumulative offset here */
		mul_m4_m4m4(current_offset, current_offset, offset);

		/* apply offset to all new verts */
		for (i = 0; i < chunk_nverts; i++, mv++, mv_prev++) {
			mul_m4_v3(current_offset, mv->co);

			/* We have to correct normals too, if we do not tag them as dirty! */
			if (!use_recalc_normals) {
				float no[3];
				normal_short_to_float_v3(no, mv->no);
				mul_mat3_m4_v3(current_offset, no);
				normalize_v3(no);
				normal_float_to_short_v3(mv->no, no);
			}
		}

		/* adjust edge vertex indices */
		me = CDDM_get_edges(result) + c * chunk_nedges;
		for (i = 0; i < chunk_nedges; i++, me++) {
			me->v1 += c * chunk_nverts;
			me->v2 += c * chunk_nverts;
		}

		mp = CDDM_get_polys(result) + c * chunk_npolys;
		for (i = 0; i < chunk_npolys; i++, mp++) {
			mp->loopstart += c * chunk_nloops;
		}

		/* adjust loop vertex and edge indices */
		ml = CDDM_get_loops(result) + c * chunk_nloops;
		for (i = 0; i < chunk_nloops; i++, ml++) {
			ml->v += c * chunk_nverts;
			ml->e += c * chunk_nedges;
		}

		/* Handle merge between chunk n and n-1 */
		if (use_merge && (c >= 1)) {
			if (!offset_has_scale && (c >= 2)) {
				/* Mapping chunk 3 to chunk 2 is a translation of mapping 2 to 1
				 * ... that is except if scaling makes the distance grow */
				int k;
				int this_chunk_index = c * chunk_nverts;
				int prev_chunk_index = (c - 1) * chunk_nverts;
				for (k = 0; k < chunk_nverts; k++, this_chunk_index++, prev_chunk_index++) {
					int target = full_doubles_map[prev_chunk_index];
					if (target != -1) {
						target += chunk_nverts; /* translate mapping */
						while (target != -1 && !ELEM(full_doubles_map[target], -1, target)) {
							/* If target is already mapped, we only follow that mapping if final target remains
							 * close enough from current vert (otherwise no mapping at all). */
							if (compare_len_v3v3(result_dm_verts[this_chunk_index].co,
							                     result_dm_verts[full_doubles_map[target]].co,
							                     amd->merge_dist))
							{
								target = full_doubles_map[target];
							}
							else {
								target = -1;
							}
						}
					}
					full_doubles_map[this_chunk_index] = target;
				}
			}
			else {
				dm_mvert_map_doubles(
				        full_doubles_map,
				        result_dm_verts,
				        (c - 1) * chunk_nverts,
				        chunk_nverts,
				        c * chunk_nverts,
				        chunk_nverts,
				        amd->merge_dist);
			}
		}
	}

	/* handle UVs */
	if (chunk_nloops > 0 && is_zero_v2(amd->uv_offset) == false) {
		const int totuv = CustomData_number_of_layers(&result->loopData, CD_MLOOPUV);
		for (i = 0; i < totuv; i++) {
			MLoopUV *dmloopuv = CustomData_get_layer_n(&result->loopData, CD_MLOOPUV, i);
			dmloopuv += chunk_nloops;
			for (c = 1; c < count; c++) {
				const float uv_offset[2] = {
					amd->uv_offset[0] * (float)c,
					amd->uv_offset[1] * (float)c,
				};
				int l_index = chunk_nloops;
				for (; l_index-- != 0; dmloopuv++) {
					dmloopuv->uv[0] += uv_offset[0];
					dmloopuv->uv[1] += uv_offset[1];
				}
			}
		}
	}

	last_chunk_start = (count - 1) * chunk_nverts;
	last_chunk_nverts = chunk_nverts;

	copy_m4_m4(final_offset, current_offset);

	if (use_merge && (amd->flags & MOD_ARR_MERGEFINAL) && (count > 1)) {
		/* Merge first and last copies */
		dm_mvert_map_doubles(
		        full_doubles_map,
		        result_dm_verts,
		        last_chunk_start,
		        last_chunk_nverts,
		        first_chunk_start,
		        first_chunk_nverts,
		        amd->merge_dist);
	}

	/* start capping */
	if (start_cap_dm) {
		float start_offset[4][4];
		int start_cap_start = result_nverts - start_cap_nverts - end_cap_nverts;
		invert_m4_m4(start_offset, offset);
		dm_merge_transform(
		        result, start_cap_dm, start_offset,
		        result_nverts - start_cap_nverts - end_cap_nverts,
		        result_nedges - start_cap_nedges - end_cap_nedges,
		        result_nloops - start_cap_nloops - end_cap_nloops,
		        result_npolys - start_cap_npolys - end_cap_npolys,
		        start_cap_nverts, start_cap_nedges, start_cap_nloops, start_cap_npolys,
		        vgroup_start_cap_remap, vgroup_start_cap_remap_len);
		/* Identify doubles with first chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        first_chunk_start,
			        first_chunk_nverts,
			        start_cap_start,
			        start_cap_nverts,
			        amd->merge_dist);
		}
	}

	if (end_cap_dm) {
		float end_offset[4][4];
		int end_cap_start = result_nverts - end_cap_nverts;
		mul_m4_m4m4(end_offset, current_offset, offset);
		dm_merge_transform(
		        result, end_cap_dm, end_offset,
		        result_nverts - end_cap_nverts,
		        result_nedges - end_cap_nedges,
		        result_nloops - end_cap_nloops,
		        result_npolys - end_cap_npolys,
		        end_cap_nverts, end_cap_nedges, end_cap_nloops, end_cap_npolys,
		        vgroup_end_cap_remap, vgroup_end_cap_remap_len);
		/* Identify doubles with last chunk */
		if (use_merge) {
			dm_mvert_map_doubles(
			        full_doubles_map,
			        result_dm_verts,
			        last_chunk_start,
			        last_chunk_nverts,
			        end_cap_start,
			        end_cap_nverts,
			        amd->merge_dist);
		}
	}
	/* done capping */

	/* Handle merging */
	tot_doubles = 0;
	if (use_merge) {
		for (i = 0; i < result_nverts; i++) {
			int new_i = full_doubles_map[i];
			if (new_i != -1) {
				/* We have to follow chains of doubles (merge start/end especially is likely to create some),
				 * those are not supported at all by CDDM_merge_verts! */
				while (!ELEM(full_doubles_map[new_i], -1, new_i)) {
					new_i = full_doubles_map[new_i];
				}
				if (i == new_i) {
					full_doubles_map[i] = -1;
				}
				else {
					full_doubles_map[i] = new_i;
					tot_doubles++;
				}
			}
		}
		if (tot_doubles > 0) {
			result = CDDM_merge_verts(result, full_doubles_map, tot_doubles, CDDM_MERGE_VERTS_DUMP_IF_EQUAL);
		}
		MEM_freeN(full_doubles_map);
	}

	/* In case org dm has dirty normals, or we made some merging, mark normals as dirty in new dm!
	 * TODO: we may need to set other dirty flags as well?
	 */
	if (use_recalc_normals) {
		result->dirty |= DM_DIRTY_NORMALS;
	}

	if (vgroup_start_cap_remap) {
		MEM_freeN(vgroup_start_cap_remap);
	}
	if (vgroup_end_cap_remap) {
		MEM_freeN(vgroup_end_cap_remap);
	}

	return result;
}