static DerivedMesh *arrayModifier_doArray( ArrayModifierData *amd, Scene *scene, Object *ob, DerivedMesh *dm, ModifierApplyFlag flag) { const float eps = 1e-6f; const MVert *src_mvert; MVert *mv, *mv_prev, *result_dm_verts; MEdge *me; MLoop *ml; MPoly *mp; int i, j, c, count; float length = amd->length; /* offset matrix */ float offset[4][4]; float scale[3]; bool offset_has_scale; float current_offset[4][4]; float final_offset[4][4]; int *full_doubles_map = NULL; int tot_doubles; const bool use_merge = (amd->flags & MOD_ARR_MERGE) != 0; const bool use_recalc_normals = (dm->dirty & DM_DIRTY_NORMALS) || use_merge; const bool use_offset_ob = ((amd->offset_type & MOD_ARR_OFF_OBJ) && amd->offset_ob); /* allow pole vertices to be used by many faces */ const bool with_follow = use_offset_ob; int start_cap_nverts = 0, start_cap_nedges = 0, start_cap_npolys = 0, start_cap_nloops = 0; int end_cap_nverts = 0, end_cap_nedges = 0, end_cap_npolys = 0, end_cap_nloops = 0; int result_nverts = 0, result_nedges = 0, result_npolys = 0, result_nloops = 0; int chunk_nverts, chunk_nedges, chunk_nloops, chunk_npolys; int first_chunk_start, first_chunk_nverts, last_chunk_start, last_chunk_nverts; DerivedMesh *result, *start_cap_dm = NULL, *end_cap_dm = NULL; chunk_nverts = dm->getNumVerts(dm); chunk_nedges = dm->getNumEdges(dm); chunk_nloops = dm->getNumLoops(dm); chunk_npolys = dm->getNumPolys(dm); count = amd->count; if (amd->start_cap && amd->start_cap != ob && amd->start_cap->type == OB_MESH) { start_cap_dm = get_dm_for_modifier(amd->start_cap, flag); if (start_cap_dm) { start_cap_nverts = start_cap_dm->getNumVerts(start_cap_dm); start_cap_nedges = start_cap_dm->getNumEdges(start_cap_dm); start_cap_nloops = start_cap_dm->getNumLoops(start_cap_dm); start_cap_npolys = start_cap_dm->getNumPolys(start_cap_dm); } } if (amd->end_cap && amd->end_cap != ob && amd->end_cap->type == OB_MESH) { end_cap_dm = get_dm_for_modifier(amd->end_cap, flag); if (end_cap_dm) { end_cap_nverts = end_cap_dm->getNumVerts(end_cap_dm); end_cap_nedges = end_cap_dm->getNumEdges(end_cap_dm); end_cap_nloops = end_cap_dm->getNumLoops(end_cap_dm); end_cap_npolys = end_cap_dm->getNumPolys(end_cap_dm); } } /* Build up offset array, cumulating all settings options */ unit_m4(offset); src_mvert = dm->getVertArray(dm); if (amd->offset_type & MOD_ARR_OFF_CONST) add_v3_v3v3(offset[3], offset[3], amd->offset); if (amd->offset_type & MOD_ARR_OFF_RELATIVE) { for (j = 0; j < 3; j++) offset[3][j] += amd->scale[j] * vertarray_size(src_mvert, chunk_nverts, j); } if (use_offset_ob) { float obinv[4][4]; float result_mat[4][4]; if (ob) invert_m4_m4(obinv, ob->obmat); else unit_m4(obinv); mul_m4_series(result_mat, offset, obinv, amd->offset_ob->obmat); copy_m4_m4(offset, result_mat); } /* Check if there is some scaling. If scaling, then we will not translate mapping */ mat4_to_size(scale, offset); offset_has_scale = !is_one_v3(scale); if (amd->fit_type == MOD_ARR_FITCURVE && amd->curve_ob) { Curve *cu = amd->curve_ob->data; if (cu) { #ifdef CYCLIC_DEPENDENCY_WORKAROUND if (amd->curve_ob->curve_cache == NULL) { BKE_displist_make_curveTypes(scene, amd->curve_ob, false); } #endif if (amd->curve_ob->curve_cache && amd->curve_ob->curve_cache->path) { float scale = mat4_to_scale(amd->curve_ob->obmat); length = scale * amd->curve_ob->curve_cache->path->totdist; } } } /* calculate the maximum number of copies which will fit within the * prescribed length */ if (amd->fit_type == MOD_ARR_FITLENGTH || amd->fit_type == MOD_ARR_FITCURVE) { float dist = len_v3(offset[3]); if (dist > eps) { /* this gives length = first copy start to last copy end * add a tiny offset for floating point rounding errors */ count = (length + eps) / dist; } else { /* if the offset has no translation, just make one copy */ count = 1; } } if (count < 1) count = 1; /* The number of verts, edges, loops, polys, before eventually merging doubles */ result_nverts = chunk_nverts * count + start_cap_nverts + end_cap_nverts; result_nedges = chunk_nedges * count + start_cap_nedges + end_cap_nedges; result_nloops = chunk_nloops * count + start_cap_nloops + end_cap_nloops; result_npolys = chunk_npolys * count + start_cap_npolys + end_cap_npolys; /* Initialize a result dm */ result = CDDM_from_template(dm, result_nverts, result_nedges, 0, result_nloops, result_npolys); result_dm_verts = CDDM_get_verts(result); if (use_merge) { /* Will need full_doubles_map for handling merge */ full_doubles_map = MEM_mallocN(sizeof(int) * result_nverts, "mod array doubles map"); fill_vn_i(full_doubles_map, result_nverts, -1); } /* copy customdata to original geometry */ DM_copy_vert_data(dm, result, 0, 0, chunk_nverts); DM_copy_edge_data(dm, result, 0, 0, chunk_nedges); DM_copy_loop_data(dm, result, 0, 0, chunk_nloops); DM_copy_poly_data(dm, result, 0, 0, chunk_npolys); /* subsurf for eg wont have mesh data in the * now add mvert/medge/mface layers */ if (!CustomData_has_layer(&dm->vertData, CD_MVERT)) { dm->copyVertArray(dm, result_dm_verts); } if (!CustomData_has_layer(&dm->edgeData, CD_MEDGE)) { dm->copyEdgeArray(dm, CDDM_get_edges(result)); } if (!CustomData_has_layer(&dm->polyData, CD_MPOLY)) { dm->copyLoopArray(dm, CDDM_get_loops(result)); dm->copyPolyArray(dm, CDDM_get_polys(result)); } /* Remember first chunk, in case of cap merge */ first_chunk_start = 0; first_chunk_nverts = chunk_nverts; unit_m4(current_offset); for (c = 1; c < count; c++) { /* copy customdata to new geometry */ DM_copy_vert_data(result, result, 0, c * chunk_nverts, chunk_nverts); DM_copy_edge_data(result, result, 0, c * chunk_nedges, chunk_nedges); DM_copy_loop_data(result, result, 0, c * chunk_nloops, chunk_nloops); DM_copy_poly_data(result, result, 0, c * chunk_npolys, chunk_npolys); mv_prev = result_dm_verts; mv = mv_prev + c * chunk_nverts; /* recalculate cumulative offset here */ mul_m4_m4m4(current_offset, current_offset, offset); /* apply offset to all new verts */ for (i = 0; i < chunk_nverts; i++, mv++, mv_prev++) { mul_m4_v3(current_offset, mv->co); /* We have to correct normals too, if we do not tag them as dirty! */ if (!use_recalc_normals) { float no[3]; normal_short_to_float_v3(no, mv->no); mul_mat3_m4_v3(current_offset, no); normalize_v3(no); normal_float_to_short_v3(mv->no, no); } } /* adjust edge vertex indices */ me = CDDM_get_edges(result) + c * chunk_nedges; for (i = 0; i < chunk_nedges; i++, me++) { me->v1 += c * chunk_nverts; me->v2 += c * chunk_nverts; } mp = CDDM_get_polys(result) + c * chunk_npolys; for (i = 0; i < chunk_npolys; i++, mp++) { mp->loopstart += c * chunk_nloops; } /* adjust loop vertex and edge indices */ ml = CDDM_get_loops(result) + c * chunk_nloops; for (i = 0; i < chunk_nloops; i++, ml++) { ml->v += c * chunk_nverts; ml->e += c * chunk_nedges; } /* Handle merge between chunk n and n-1 */ if (use_merge && (c >= 1)) { if (!offset_has_scale && (c >= 2)) { /* Mapping chunk 3 to chunk 2 is a translation of mapping 2 to 1 * ... that is except if scaling makes the distance grow */ int k; int this_chunk_index = c * chunk_nverts; int prev_chunk_index = (c - 1) * chunk_nverts; for (k = 0; k < chunk_nverts; k++, this_chunk_index++, prev_chunk_index++) { int target = full_doubles_map[prev_chunk_index]; if (target != -1) { target += chunk_nverts; /* translate mapping */ if (full_doubles_map[target] != -1) { if (with_follow) { target = full_doubles_map[target]; } else { /* The rule here is to not follow mapping to chunk N-2, which could be too far * so if target vertex was itself mapped, then this vertex is not mapped */ target = -1; } } } full_doubles_map[this_chunk_index] = target; } } else { dm_mvert_map_doubles( full_doubles_map, result_dm_verts, (c - 1) * chunk_nverts, chunk_nverts, c * chunk_nverts, chunk_nverts, amd->merge_dist, with_follow); } } } last_chunk_start = (count - 1) * chunk_nverts; last_chunk_nverts = chunk_nverts; copy_m4_m4(final_offset, current_offset); if (use_merge && (amd->flags & MOD_ARR_MERGEFINAL) && (count > 1)) { /* Merge first and last copies */ dm_mvert_map_doubles( full_doubles_map, result_dm_verts, last_chunk_start, last_chunk_nverts, first_chunk_start, first_chunk_nverts, amd->merge_dist, with_follow); } /* start capping */ if (start_cap_dm) { float start_offset[4][4]; int start_cap_start = result_nverts - start_cap_nverts - end_cap_nverts; invert_m4_m4(start_offset, offset); dm_merge_transform( result, start_cap_dm, start_offset, result_nverts - start_cap_nverts - end_cap_nverts, result_nedges - start_cap_nedges - end_cap_nedges, result_nloops - start_cap_nloops - end_cap_nloops, result_npolys - start_cap_npolys - end_cap_npolys, start_cap_nverts, start_cap_nedges, start_cap_nloops, start_cap_npolys); /* Identify doubles with first chunk */ if (use_merge) { dm_mvert_map_doubles( full_doubles_map, result_dm_verts, first_chunk_start, first_chunk_nverts, start_cap_start, start_cap_nverts, amd->merge_dist, false); } } if (end_cap_dm) { float end_offset[4][4]; int end_cap_start = result_nverts - end_cap_nverts; mul_m4_m4m4(end_offset, current_offset, offset); dm_merge_transform( result, end_cap_dm, end_offset, result_nverts - end_cap_nverts, result_nedges - end_cap_nedges, result_nloops - end_cap_nloops, result_npolys - end_cap_npolys, end_cap_nverts, end_cap_nedges, end_cap_nloops, end_cap_npolys); /* Identify doubles with last chunk */ if (use_merge) { dm_mvert_map_doubles( full_doubles_map, result_dm_verts, last_chunk_start, last_chunk_nverts, end_cap_start, end_cap_nverts, amd->merge_dist, false); } } /* done capping */ /* Handle merging */ tot_doubles = 0; if (use_merge) { for (i = 0; i < result_nverts; i++) { if (full_doubles_map[i] != -1) { if (i == full_doubles_map[i]) { full_doubles_map[i] = -1; } else { tot_doubles++; } } } if (tot_doubles > 0) { result = CDDM_merge_verts(result, full_doubles_map, tot_doubles, CDDM_MERGE_VERTS_DUMP_IF_EQUAL); } MEM_freeN(full_doubles_map); } /* In case org dm has dirty normals, or we made some merging, mark normals as dirty in new dm! * TODO: we may need to set other dirty flags as well? */ if (use_recalc_normals) { result->dirty |= DM_DIRTY_NORMALS; } return result; }
static DerivedMesh *doMirrorOnAxis(MirrorModifierData *mmd, Object *ob, DerivedMesh *dm, int axis) { const float tolerance_sq = mmd->tolerance * mmd->tolerance; const int do_vtargetmap = !(mmd->flag & MOD_MIR_NO_MERGE); int is_vtargetmap = FALSE; /* true when it should be used */ DerivedMesh *result; const int maxVerts = dm->getNumVerts(dm); const int maxEdges = dm->getNumEdges(dm); const int maxLoops = dm->getNumLoops(dm); const int maxPolys = dm->getNumPolys(dm); MVert *mv, *mv_prev; MEdge *me; MLoop *ml; MPoly *mp; float mtx[4][4]; int i, j; int a, totshape; int *vtargetmap = NULL, *vtmap_a = NULL, *vtmap_b = NULL; /* mtx is the mirror transformation */ unit_m4(mtx); mtx[axis][axis] = -1.0f; if (mmd->mirror_ob) { float tmp[4][4]; float itmp[4][4]; /* tmp is a transform from coords relative to the object's own origin, * to coords relative to the mirror object origin */ invert_m4_m4(tmp, mmd->mirror_ob->obmat); mult_m4_m4m4(tmp, tmp, ob->obmat); /* itmp is the reverse transform back to origin-relative coordinates */ invert_m4_m4(itmp, tmp); /* combine matrices to get a single matrix that translates coordinates into * mirror-object-relative space, does the mirror, and translates back to * origin-relative space */ mult_m4_m4m4(mtx, mtx, tmp); mult_m4_m4m4(mtx, itmp, mtx); } result = CDDM_from_template(dm, maxVerts * 2, maxEdges * 2, 0, maxLoops * 2, maxPolys * 2); /*copy customdata to original geometry*/ DM_copy_vert_data(dm, result, 0, 0, maxVerts); DM_copy_edge_data(dm, result, 0, 0, maxEdges); DM_copy_loop_data(dm, result, 0, 0, maxLoops); DM_copy_poly_data(dm, result, 0, 0, maxPolys); /* subsurf for eg wont have mesh data in the */ /* now add mvert/medge/mface layers */ if (!CustomData_has_layer(&dm->vertData, CD_MVERT)) { dm->copyVertArray(dm, CDDM_get_verts(result)); } if (!CustomData_has_layer(&dm->edgeData, CD_MEDGE)) { dm->copyEdgeArray(dm, CDDM_get_edges(result)); } if (!CustomData_has_layer(&dm->polyData, CD_MPOLY)) { dm->copyLoopArray(dm, CDDM_get_loops(result)); dm->copyPolyArray(dm, CDDM_get_polys(result)); } /* copy customdata to new geometry, * copy from its self because this data may have been created in the checks above */ DM_copy_vert_data(result, result, 0, maxVerts, maxVerts); DM_copy_edge_data(result, result, 0, maxEdges, maxEdges); /* loops are copied later */ DM_copy_poly_data(result, result, 0, maxPolys, maxPolys); if (do_vtargetmap) { /* second half is filled with -1 */ vtargetmap = MEM_mallocN(sizeof(int) * maxVerts * 2, "MOD_mirror tarmap"); vtmap_a = vtargetmap; vtmap_b = vtargetmap + maxVerts; } /* mirror vertex coordinates */ mv_prev = CDDM_get_verts(result); mv = mv_prev + maxVerts; for (i = 0; i < maxVerts; i++, mv++, mv_prev++) { mul_m4_v3(mtx, mv->co); if (do_vtargetmap) { /* compare location of the original and mirrored vertex, to see if they * should be mapped for merging */ if (UNLIKELY(len_squared_v3v3(mv_prev->co, mv->co) < tolerance_sq)) { *vtmap_a = maxVerts + i; is_vtargetmap = TRUE; } else { *vtmap_a = -1; } *vtmap_b = -1; /* fill here to avoid 2x loops */ vtmap_a++; vtmap_b++; } } /* handle shape keys */ totshape = CustomData_number_of_layers(&result->vertData, CD_SHAPEKEY); for (a = 0; a < totshape; a++) { float (*cos)[3] = CustomData_get_layer_n(&result->vertData, CD_SHAPEKEY, a); for (i = maxVerts; i < result->numVertData; i++) { mul_m4_v3(mtx, cos[i]); } } /* adjust mirrored edge vertex indices */ me = CDDM_get_edges(result) + maxEdges; for (i = 0; i < maxEdges; i++, me++) { me->v1 += maxVerts; me->v2 += maxVerts; } /* adjust mirrored poly loopstart indices, and reverse loop order (normals) */ mp = CDDM_get_polys(result) + maxPolys; ml = CDDM_get_loops(result); for (i = 0; i < maxPolys; i++, mp++) { MLoop *ml2; int e; /* reverse the loop, but we keep the first vertex in the face the same, * to ensure that quads are split the same way as on the other side */ DM_copy_loop_data(result, result, mp->loopstart, mp->loopstart + maxLoops, 1); for (j = 1; j < mp->totloop; j++) DM_copy_loop_data(result, result, mp->loopstart + j, mp->loopstart + maxLoops + mp->totloop - j, 1); ml2 = ml + mp->loopstart + maxLoops; e = ml2[0].e; for (j = 0; j < mp->totloop - 1; j++) { ml2[j].e = ml2[j + 1].e; } ml2[mp->totloop - 1].e = e; mp->loopstart += maxLoops; } /* adjust mirrored loop vertex and edge indices */ ml = CDDM_get_loops(result) + maxLoops; for (i = 0; i < maxLoops; i++, ml++) { ml->v += maxVerts; ml->e += maxEdges; } /* handle uvs, * let tessface recalc handle updating the MTFace data */ if (mmd->flag & (MOD_MIR_MIRROR_U | MOD_MIR_MIRROR_V)) { const int do_mirr_u = (mmd->flag & MOD_MIR_MIRROR_U) != 0; const int do_mirr_v = (mmd->flag & MOD_MIR_MIRROR_V) != 0; const int totuv = CustomData_number_of_layers(&result->loopData, CD_MLOOPUV); for (a = 0; a < totuv; a++) { MLoopUV *dmloopuv = CustomData_get_layer_n(&result->loopData, CD_MLOOPUV, a); int j = maxLoops; dmloopuv += j; /* second set of loops only */ for (; j-- > 0; dmloopuv++) { if (do_mirr_u) dmloopuv->uv[0] = 1.0f - dmloopuv->uv[0]; if (do_mirr_v) dmloopuv->uv[1] = 1.0f - dmloopuv->uv[1]; } } } /* handle vgroup stuff */ if ((mmd->flag & MOD_MIR_VGROUP) && CustomData_has_layer(&result->vertData, CD_MDEFORMVERT)) { MDeformVert *dvert = (MDeformVert *) CustomData_get_layer(&result->vertData, CD_MDEFORMVERT) + maxVerts; int *flip_map = NULL, flip_map_len = 0; flip_map = defgroup_flip_map(ob, &flip_map_len, FALSE); if (flip_map) { for (i = 0; i < maxVerts; dvert++, i++) { /* merged vertices get both groups, others get flipped */ if (do_vtargetmap && (vtargetmap[i] != -1)) defvert_flip_merged(dvert, flip_map, flip_map_len); else defvert_flip(dvert, flip_map, flip_map_len); } MEM_freeN(flip_map); } } if (do_vtargetmap) { /* slow - so only call if one or more merge verts are found, * users may leave this on and not realize there is nothing to merge - campbell */ if (is_vtargetmap) { result = CDDM_merge_verts(result, vtargetmap); } MEM_freeN(vtargetmap); } return result; }
static DerivedMesh *arrayModifier_doArray( ArrayModifierData *amd, Scene *scene, Object *ob, DerivedMesh *dm, ModifierApplyFlag flag) { const float eps = 1e-6f; const MVert *src_mvert; MVert *mv, *mv_prev, *result_dm_verts; MEdge *me; MLoop *ml; MPoly *mp; int i, j, c, count; float length = amd->length; /* offset matrix */ float offset[4][4]; float scale[3]; bool offset_has_scale; float current_offset[4][4]; float final_offset[4][4]; int *full_doubles_map = NULL; int tot_doubles; const bool use_merge = (amd->flags & MOD_ARR_MERGE) != 0; const bool use_recalc_normals = (dm->dirty & DM_DIRTY_NORMALS) || use_merge; const bool use_offset_ob = ((amd->offset_type & MOD_ARR_OFF_OBJ) && amd->offset_ob); int start_cap_nverts = 0, start_cap_nedges = 0, start_cap_npolys = 0, start_cap_nloops = 0; int end_cap_nverts = 0, end_cap_nedges = 0, end_cap_npolys = 0, end_cap_nloops = 0; int result_nverts = 0, result_nedges = 0, result_npolys = 0, result_nloops = 0; int chunk_nverts, chunk_nedges, chunk_nloops, chunk_npolys; int first_chunk_start, first_chunk_nverts, last_chunk_start, last_chunk_nverts; DerivedMesh *result, *start_cap_dm = NULL, *end_cap_dm = NULL; int *vgroup_start_cap_remap = NULL; int vgroup_start_cap_remap_len = 0; int *vgroup_end_cap_remap = NULL; int vgroup_end_cap_remap_len = 0; chunk_nverts = dm->getNumVerts(dm); chunk_nedges = dm->getNumEdges(dm); chunk_nloops = dm->getNumLoops(dm); chunk_npolys = dm->getNumPolys(dm); count = amd->count; if (amd->start_cap && amd->start_cap != ob && amd->start_cap->type == OB_MESH) { vgroup_start_cap_remap = BKE_object_defgroup_index_map_create(amd->start_cap, ob, &vgroup_start_cap_remap_len); start_cap_dm = get_dm_for_modifier(amd->start_cap, flag); if (start_cap_dm) { start_cap_nverts = start_cap_dm->getNumVerts(start_cap_dm); start_cap_nedges = start_cap_dm->getNumEdges(start_cap_dm); start_cap_nloops = start_cap_dm->getNumLoops(start_cap_dm); start_cap_npolys = start_cap_dm->getNumPolys(start_cap_dm); } } if (amd->end_cap && amd->end_cap != ob && amd->end_cap->type == OB_MESH) { vgroup_end_cap_remap = BKE_object_defgroup_index_map_create(amd->end_cap, ob, &vgroup_end_cap_remap_len); end_cap_dm = get_dm_for_modifier(amd->end_cap, flag); if (end_cap_dm) { end_cap_nverts = end_cap_dm->getNumVerts(end_cap_dm); end_cap_nedges = end_cap_dm->getNumEdges(end_cap_dm); end_cap_nloops = end_cap_dm->getNumLoops(end_cap_dm); end_cap_npolys = end_cap_dm->getNumPolys(end_cap_dm); } } /* Build up offset array, cumulating all settings options */ unit_m4(offset); src_mvert = dm->getVertArray(dm); if (amd->offset_type & MOD_ARR_OFF_CONST) { add_v3_v3(offset[3], amd->offset); } if (amd->offset_type & MOD_ARR_OFF_RELATIVE) { float min[3], max[3]; const MVert *src_mv; INIT_MINMAX(min, max); for (src_mv = src_mvert, j = chunk_nverts; j--; src_mv++) { minmax_v3v3_v3(min, max, src_mv->co); } for (j = 3; j--; ) { offset[3][j] += amd->scale[j] * (max[j] - min[j]); } } if (use_offset_ob) { float obinv[4][4]; float result_mat[4][4]; if (ob) invert_m4_m4(obinv, ob->obmat); else unit_m4(obinv); mul_m4_series(result_mat, offset, obinv, amd->offset_ob->obmat); copy_m4_m4(offset, result_mat); } /* Check if there is some scaling. If scaling, then we will not translate mapping */ mat4_to_size(scale, offset); offset_has_scale = !is_one_v3(scale); if (amd->fit_type == MOD_ARR_FITCURVE && amd->curve_ob) { Curve *cu = amd->curve_ob->data; if (cu) { #ifdef CYCLIC_DEPENDENCY_WORKAROUND if (amd->curve_ob->curve_cache == NULL) { BKE_displist_make_curveTypes(scene, amd->curve_ob, false); } #endif if (amd->curve_ob->curve_cache && amd->curve_ob->curve_cache->path) { float scale_fac = mat4_to_scale(amd->curve_ob->obmat); length = scale_fac * amd->curve_ob->curve_cache->path->totdist; } } } /* calculate the maximum number of copies which will fit within the * prescribed length */ if (amd->fit_type == MOD_ARR_FITLENGTH || amd->fit_type == MOD_ARR_FITCURVE) { float dist = len_v3(offset[3]); if (dist > eps) { /* this gives length = first copy start to last copy end * add a tiny offset for floating point rounding errors */ count = (length + eps) / dist + 1; } else { /* if the offset has no translation, just make one copy */ count = 1; } } if (count < 1) count = 1; /* The number of verts, edges, loops, polys, before eventually merging doubles */ result_nverts = chunk_nverts * count + start_cap_nverts + end_cap_nverts; result_nedges = chunk_nedges * count + start_cap_nedges + end_cap_nedges; result_nloops = chunk_nloops * count + start_cap_nloops + end_cap_nloops; result_npolys = chunk_npolys * count + start_cap_npolys + end_cap_npolys; /* Initialize a result dm */ result = CDDM_from_template(dm, result_nverts, result_nedges, 0, result_nloops, result_npolys); result_dm_verts = CDDM_get_verts(result); if (use_merge) { /* Will need full_doubles_map for handling merge */ full_doubles_map = MEM_malloc_arrayN(result_nverts, sizeof(int), "mod array doubles map"); copy_vn_i(full_doubles_map, result_nverts, -1); } /* copy customdata to original geometry */ DM_copy_vert_data(dm, result, 0, 0, chunk_nverts); DM_copy_edge_data(dm, result, 0, 0, chunk_nedges); DM_copy_loop_data(dm, result, 0, 0, chunk_nloops); DM_copy_poly_data(dm, result, 0, 0, chunk_npolys); /* Subsurf for eg won't have mesh data in the custom data arrays. * now add mvert/medge/mpoly layers. */ if (!CustomData_has_layer(&dm->vertData, CD_MVERT)) { dm->copyVertArray(dm, result_dm_verts); } if (!CustomData_has_layer(&dm->edgeData, CD_MEDGE)) { dm->copyEdgeArray(dm, CDDM_get_edges(result)); } if (!CustomData_has_layer(&dm->polyData, CD_MPOLY)) { dm->copyLoopArray(dm, CDDM_get_loops(result)); dm->copyPolyArray(dm, CDDM_get_polys(result)); } /* Remember first chunk, in case of cap merge */ first_chunk_start = 0; first_chunk_nverts = chunk_nverts; unit_m4(current_offset); for (c = 1; c < count; c++) { /* copy customdata to new geometry */ DM_copy_vert_data(result, result, 0, c * chunk_nverts, chunk_nverts); DM_copy_edge_data(result, result, 0, c * chunk_nedges, chunk_nedges); DM_copy_loop_data(result, result, 0, c * chunk_nloops, chunk_nloops); DM_copy_poly_data(result, result, 0, c * chunk_npolys, chunk_npolys); mv_prev = result_dm_verts; mv = mv_prev + c * chunk_nverts; /* recalculate cumulative offset here */ mul_m4_m4m4(current_offset, current_offset, offset); /* apply offset to all new verts */ for (i = 0; i < chunk_nverts; i++, mv++, mv_prev++) { mul_m4_v3(current_offset, mv->co); /* We have to correct normals too, if we do not tag them as dirty! */ if (!use_recalc_normals) { float no[3]; normal_short_to_float_v3(no, mv->no); mul_mat3_m4_v3(current_offset, no); normalize_v3(no); normal_float_to_short_v3(mv->no, no); } } /* adjust edge vertex indices */ me = CDDM_get_edges(result) + c * chunk_nedges; for (i = 0; i < chunk_nedges; i++, me++) { me->v1 += c * chunk_nverts; me->v2 += c * chunk_nverts; } mp = CDDM_get_polys(result) + c * chunk_npolys; for (i = 0; i < chunk_npolys; i++, mp++) { mp->loopstart += c * chunk_nloops; } /* adjust loop vertex and edge indices */ ml = CDDM_get_loops(result) + c * chunk_nloops; for (i = 0; i < chunk_nloops; i++, ml++) { ml->v += c * chunk_nverts; ml->e += c * chunk_nedges; } /* Handle merge between chunk n and n-1 */ if (use_merge && (c >= 1)) { if (!offset_has_scale && (c >= 2)) { /* Mapping chunk 3 to chunk 2 is a translation of mapping 2 to 1 * ... that is except if scaling makes the distance grow */ int k; int this_chunk_index = c * chunk_nverts; int prev_chunk_index = (c - 1) * chunk_nverts; for (k = 0; k < chunk_nverts; k++, this_chunk_index++, prev_chunk_index++) { int target = full_doubles_map[prev_chunk_index]; if (target != -1) { target += chunk_nverts; /* translate mapping */ while (target != -1 && !ELEM(full_doubles_map[target], -1, target)) { /* If target is already mapped, we only follow that mapping if final target remains * close enough from current vert (otherwise no mapping at all). */ if (compare_len_v3v3(result_dm_verts[this_chunk_index].co, result_dm_verts[full_doubles_map[target]].co, amd->merge_dist)) { target = full_doubles_map[target]; } else { target = -1; } } } full_doubles_map[this_chunk_index] = target; } } else { dm_mvert_map_doubles( full_doubles_map, result_dm_verts, (c - 1) * chunk_nverts, chunk_nverts, c * chunk_nverts, chunk_nverts, amd->merge_dist); } } } /* handle UVs */ if (chunk_nloops > 0 && is_zero_v2(amd->uv_offset) == false) { const int totuv = CustomData_number_of_layers(&result->loopData, CD_MLOOPUV); for (i = 0; i < totuv; i++) { MLoopUV *dmloopuv = CustomData_get_layer_n(&result->loopData, CD_MLOOPUV, i); dmloopuv += chunk_nloops; for (c = 1; c < count; c++) { const float uv_offset[2] = { amd->uv_offset[0] * (float)c, amd->uv_offset[1] * (float)c, }; int l_index = chunk_nloops; for (; l_index-- != 0; dmloopuv++) { dmloopuv->uv[0] += uv_offset[0]; dmloopuv->uv[1] += uv_offset[1]; } } } } last_chunk_start = (count - 1) * chunk_nverts; last_chunk_nverts = chunk_nverts; copy_m4_m4(final_offset, current_offset); if (use_merge && (amd->flags & MOD_ARR_MERGEFINAL) && (count > 1)) { /* Merge first and last copies */ dm_mvert_map_doubles( full_doubles_map, result_dm_verts, last_chunk_start, last_chunk_nverts, first_chunk_start, first_chunk_nverts, amd->merge_dist); } /* start capping */ if (start_cap_dm) { float start_offset[4][4]; int start_cap_start = result_nverts - start_cap_nverts - end_cap_nverts; invert_m4_m4(start_offset, offset); dm_merge_transform( result, start_cap_dm, start_offset, result_nverts - start_cap_nverts - end_cap_nverts, result_nedges - start_cap_nedges - end_cap_nedges, result_nloops - start_cap_nloops - end_cap_nloops, result_npolys - start_cap_npolys - end_cap_npolys, start_cap_nverts, start_cap_nedges, start_cap_nloops, start_cap_npolys, vgroup_start_cap_remap, vgroup_start_cap_remap_len); /* Identify doubles with first chunk */ if (use_merge) { dm_mvert_map_doubles( full_doubles_map, result_dm_verts, first_chunk_start, first_chunk_nverts, start_cap_start, start_cap_nverts, amd->merge_dist); } } if (end_cap_dm) { float end_offset[4][4]; int end_cap_start = result_nverts - end_cap_nverts; mul_m4_m4m4(end_offset, current_offset, offset); dm_merge_transform( result, end_cap_dm, end_offset, result_nverts - end_cap_nverts, result_nedges - end_cap_nedges, result_nloops - end_cap_nloops, result_npolys - end_cap_npolys, end_cap_nverts, end_cap_nedges, end_cap_nloops, end_cap_npolys, vgroup_end_cap_remap, vgroup_end_cap_remap_len); /* Identify doubles with last chunk */ if (use_merge) { dm_mvert_map_doubles( full_doubles_map, result_dm_verts, last_chunk_start, last_chunk_nverts, end_cap_start, end_cap_nverts, amd->merge_dist); } } /* done capping */ /* Handle merging */ tot_doubles = 0; if (use_merge) { for (i = 0; i < result_nverts; i++) { int new_i = full_doubles_map[i]; if (new_i != -1) { /* We have to follow chains of doubles (merge start/end especially is likely to create some), * those are not supported at all by CDDM_merge_verts! */ while (!ELEM(full_doubles_map[new_i], -1, new_i)) { new_i = full_doubles_map[new_i]; } if (i == new_i) { full_doubles_map[i] = -1; } else { full_doubles_map[i] = new_i; tot_doubles++; } } } if (tot_doubles > 0) { result = CDDM_merge_verts(result, full_doubles_map, tot_doubles, CDDM_MERGE_VERTS_DUMP_IF_EQUAL); } MEM_freeN(full_doubles_map); } /* In case org dm has dirty normals, or we made some merging, mark normals as dirty in new dm! * TODO: we may need to set other dirty flags as well? */ if (use_recalc_normals) { result->dirty |= DM_DIRTY_NORMALS; } if (vgroup_start_cap_remap) { MEM_freeN(vgroup_start_cap_remap); } if (vgroup_end_cap_remap) { MEM_freeN(vgroup_end_cap_remap); } return result; }