/* addition: obj + obj */ static PyObject *Color_add(PyObject *v1, PyObject *v2) { ColorObject *color1 = NULL, *color2 = NULL; float col[COLOR_SIZE]; if (!ColorObject_Check(v1) || !ColorObject_Check(v2)) { PyErr_SetString(PyExc_TypeError, "Color addition: " "arguments not valid for this operation"); return NULL; } color1 = (ColorObject*)v1; color2 = (ColorObject*)v2; if(BaseMath_ReadCallback(color1) == -1 || BaseMath_ReadCallback(color2) == -1) return NULL; add_vn_vnvn(col, color1->col, color2->col, COLOR_SIZE); return newColorObject(col, Py_NEW, Py_TYPE(v1)); }
/* addition: obj + obj */ static PyObject *Color_add(PyObject *v1, PyObject *v2) { ColorObject *color1 = NULL, *color2 = NULL; float col[COLOR_SIZE]; if (!ColorObject_Check(v1) || !ColorObject_Check(v2)) { PyErr_Format(PyExc_TypeError, "Color addition: (%s + %s) " "invalid type for this operation", Py_TYPE(v1)->tp_name, Py_TYPE(v2)->tp_name); return NULL; } color1 = (ColorObject*)v1; color2 = (ColorObject*)v2; if (BaseMath_ReadCallback(color1) == -1 || BaseMath_ReadCallback(color2) == -1) return NULL; add_vn_vnvn(col, color1->col, color2->col, COLOR_SIZE); return Color_CreatePyObject(col, Py_NEW, Py_TYPE(v1)); }
int curve_fit_cubic_to_points_refit_db( const double *points, const uint points_len, const uint dims, const double error_threshold, const uint calc_flag, const uint *corners, const uint corners_len, const double corner_angle, double **r_cubic_array, uint *r_cubic_array_len, uint **r_cubic_orig_index, uint **r_corner_index_array, uint *r_corner_index_len) { const uint knots_len = points_len; struct Knot *knots = malloc(sizeof(Knot) * knots_len); #ifndef USE_CORNER_DETECT (void)r_corner_index_array; (void)r_corner_index_len; #endif (void)corners; (void)corners_len; const bool is_cyclic = (calc_flag & CURVE_FIT_CALC_CYCLIC) != 0 && (points_len > 2); #ifdef USE_CORNER_DETECT const bool use_corner = (corner_angle < M_PI); #else (void)corner_angle; #endif /* Over alloc the list x2 for cyclic curves, * so we can evaluate across the start/end */ double *points_alloc = NULL; if (is_cyclic) { points_alloc = malloc((sizeof(double) * points_len * dims) * 2); memcpy(points_alloc, points, sizeof(double) * points_len * dims); memcpy(points_alloc + (points_len * dims), points_alloc, sizeof(double) * points_len * dims); points = points_alloc; } double *tangents = malloc(sizeof(double) * knots_len * 2 * dims); { double *t_step = tangents; for (uint i = 0; i < knots_len; i++) { knots[i].next = (knots + i) + 1; knots[i].prev = (knots + i) - 1; knots[i].heap_node = NULL; knots[i].index = i; knots[i].can_remove = true; knots[i].is_removed = false; knots[i].is_corner = false; knots[i].error_sq_next = 0.0; knots[i].tan[0] = t_step; t_step += dims; knots[i].tan[1] = t_step; t_step += dims; } assert(t_step == &tangents[knots_len * 2 * dims]); } if (is_cyclic) { knots[0].prev = &knots[knots_len - 1]; knots[knots_len - 1].next = &knots[0]; } else { knots[0].prev = NULL; knots[knots_len - 1].next = NULL; /* always keep end-points */ knots[0].can_remove = false; knots[knots_len - 1].can_remove = false; } #ifdef USE_LENGTH_CACHE double *points_length_cache = malloc(sizeof(double) * points_len * (is_cyclic ? 2 : 1)); #endif /* Initialize tangents, * also set the values for knot handles since some may not collapse. */ { #ifdef USE_VLA double tan_prev[dims]; double tan_next[dims]; #else double *tan_prev = alloca(sizeof(double) * dims); double *tan_next = alloca(sizeof(double) * dims); #endif double len_prev, len_next; #if 0 /* 2x normalize calculations, but correct */ for (uint i = 0; i < knots_len; i++) { Knot *k = &knots[i]; if (k->prev) { sub_vn_vnvn(tan_prev, &points[k->prev->index * dims], &points[k->index * dims], dims); #ifdef USE_LENGTH_CACHE points_length_cache[i] = #endif len_prev = normalize_vn(tan_prev, dims); } else { zero_vn(tan_prev, dims); len_prev = 0.0; } if (k->next) { sub_vn_vnvn(tan_next, &points[k->index * dims], &points[k->next->index * dims], dims); len_next = normalize_vn(tan_next, dims); } else { zero_vn(tan_next, dims); len_next = 0.0; } add_vn_vnvn(k->tan[0], tan_prev, tan_next, dims); normalize_vn(k->tan[0], dims); copy_vnvn(k->tan[1], k->tan[0], dims); k->handles[0] = len_prev / 3; k->handles[1] = len_next / -3; } #else if (knots_len < 2) { /* NOP, set dummy values */ for (uint i = 0; i < knots_len; i++) { struct Knot *k = &knots[i]; zero_vn(k->tan[0], dims); zero_vn(k->tan[1], dims); k->handles[0] = 0.0; k->handles[1] = 0.0; #ifdef USE_LENGTH_CACHE points_length_cache[i] = 0.0; #endif } } else if (is_cyclic) { len_prev = normalize_vn_vnvn( tan_prev, &points[(knots_len - 2) * dims], &points[(knots_len - 1) * dims], dims); for (uint i_curr = knots_len - 1, i_next = 0; i_next < knots_len; i_curr = i_next++) { struct Knot *k = &knots[i_curr]; #ifdef USE_LENGTH_CACHE points_length_cache[i_next] = #endif len_next = normalize_vn_vnvn(tan_next, &points[i_curr * dims], &points[i_next * dims], dims); add_vn_vnvn(k->tan[0], tan_prev, tan_next, dims); normalize_vn(k->tan[0], dims); copy_vnvn(k->tan[1], k->tan[0], dims); k->handles[0] = len_prev / 3; k->handles[1] = len_next / -3; copy_vnvn(tan_prev, tan_next, dims); len_prev = len_next; } } else { #ifdef USE_LENGTH_CACHE points_length_cache[0] = 0.0; points_length_cache[1] = #endif len_prev = normalize_vn_vnvn( tan_prev, &points[0 * dims], &points[1 * dims], dims); copy_vnvn(knots[0].tan[0], tan_prev, dims); copy_vnvn(knots[0].tan[1], tan_prev, dims); knots[0].handles[0] = len_prev / 3; knots[0].handles[1] = len_prev / -3; for (uint i_curr = 1, i_next = 2; i_next < knots_len; i_curr = i_next++) { struct Knot *k = &knots[i_curr]; #ifdef USE_LENGTH_CACHE points_length_cache[i_next] = #endif len_next = normalize_vn_vnvn(tan_next, &points[i_curr * dims], &points[i_next * dims], dims); add_vn_vnvn(k->tan[0], tan_prev, tan_next, dims); normalize_vn(k->tan[0], dims); copy_vnvn(k->tan[1], k->tan[0], dims); k->handles[0] = len_prev / 3; k->handles[1] = len_next / -3; copy_vnvn(tan_prev, tan_next, dims); len_prev = len_next; } copy_vnvn(knots[knots_len - 1].tan[0], tan_next, dims); copy_vnvn(knots[knots_len - 1].tan[1], tan_next, dims); knots[knots_len - 1].handles[0] = len_next / 3; knots[knots_len - 1].handles[1] = len_next / -3; } #endif } #ifdef USE_LENGTH_CACHE if (is_cyclic) { memcpy(&points_length_cache[points_len], points_length_cache, sizeof(double) * points_len); } #endif #if 0 for (uint i = 0; i < knots_len; i++) { Knot *k = &knots[i]; printf("TAN %.8f %.8f %.8f %.8f\n", k->tan[0][0], k->tan[0][1], k->tan[1][0], k->tan[0][1]); } #endif const struct PointData pd = { .points = points, .points_len = points_len, #ifdef USE_LENGTH_CACHE .points_length_cache = points_length_cache, #endif }; uint knots_len_remaining = knots_len; /* 'curve_incremental_simplify_refit' can be called here, but its very slow * just remove all within the threshold first. */ knots_len_remaining = curve_incremental_simplify( &pd, knots, knots_len, knots_len_remaining, SQUARE(error_threshold), dims); #ifdef USE_CORNER_DETECT if (use_corner) { for (uint i = 0; i < knots_len; i++) { assert(knots[i].heap_node == NULL); } knots_len_remaining = curve_incremental_simplify_corners( &pd, knots, knots_len, knots_len_remaining, SQUARE(error_threshold), SQUARE(error_threshold * 3), corner_angle, dims, r_corner_index_len); } #endif /* USE_CORNER_DETECT */ #ifdef USE_KNOT_REFIT knots_len_remaining = curve_incremental_simplify_refit( &pd, knots, knots_len, knots_len_remaining, SQUARE(error_threshold), dims); #endif /* USE_KNOT_REFIT */ #ifdef USE_CORNER_DETECT if (use_corner) { if (is_cyclic == false) { *r_corner_index_len += 2; } uint *corner_index_array = malloc(sizeof(uint) * (*r_corner_index_len)); uint k_index = 0, c_index = 0; uint i = 0; if (is_cyclic == false) { corner_index_array[c_index++] = k_index; k_index++; i++; } for (; i < knots_len; i++) { if (knots[i].is_removed == false) { if (knots[i].is_corner == true) { corner_index_array[c_index++] = k_index; } k_index++; } } if (is_cyclic == false) { corner_index_array[c_index++] = k_index; k_index++; } assert(c_index == *r_corner_index_len); *r_corner_index_array = corner_index_array; } #endif /* USE_CORNER_DETECT */ #ifdef USE_LENGTH_CACHE free(points_length_cache); #endif uint *cubic_orig_index = NULL; if (r_cubic_orig_index) { cubic_orig_index = malloc(sizeof(uint) * knots_len_remaining); } struct Knot *knots_first = NULL; { struct Knot *k; for (uint i = 0; i < knots_len; i++) { if (knots[i].is_removed == false) { knots_first = &knots[i]; break; } } if (cubic_orig_index) { k = knots_first; for (uint i = 0; i < knots_len_remaining; i++, k = k->next) { cubic_orig_index[i] = k->index; } } } /* Correct unused handle endpoints - not essential, but nice behavior */ if (is_cyclic == false) { struct Knot *knots_last = knots_first; while (knots_last->next) { knots_last = knots_last->next; } knots_first->handles[0] = -knots_first->handles[1]; knots_last->handles[1] = -knots_last->handles[0]; } /* 3x for one knot and two handles */ double *cubic_array = malloc(sizeof(double) * knots_len_remaining * 3 * dims); { double *c_step = cubic_array; struct Knot *k = knots_first; for (uint i = 0; i < knots_len_remaining; i++, k = k->next) { const double *p = &points[k->index * dims]; madd_vn_vnvn_fl(c_step, p, k->tan[0], k->handles[0], dims); c_step += dims; copy_vnvn(c_step, p, dims); c_step += dims; madd_vn_vnvn_fl(c_step, p, k->tan[1], k->handles[1], dims); c_step += dims; } assert(c_step == &cubic_array[knots_len_remaining * 3 * dims]); } if (points_alloc) { free(points_alloc); points_alloc = NULL; } free(knots); free(tangents); if (r_cubic_orig_index) { *r_cubic_orig_index = cubic_orig_index; } *r_cubic_array = cubic_array; *r_cubic_array_len = knots_len_remaining; return 0; }