static float do_clump_level(float result[3], const float co[3], const float par_co[3], float time, float clumpfac, float clumppow, float pa_clump, CurveMapping *clumpcurve) { float clump = 0.0f; if (clumpcurve) { clump = pa_clump * (1.0f - clamp_f(curvemapping_evaluateF(clumpcurve, 0, time), 0.0f, 1.0f)); interp_v3_v3v3(result, co, par_co, clump); } else if (clumpfac != 0.0f) { float cpow; if (clumppow < 0.0f) cpow = 1.0f + clumppow; else cpow = 1.0f + 9.0f * clumppow; if (clumpfac < 0.0f) /* clump roots instead of tips */ clump = -clumpfac * pa_clump * (float)pow(1.0 - (double)time, (double)cpow); else clump = clumpfac * pa_clump * (float)pow((double)time, (double)cpow); interp_v3_v3v3(result, co, par_co, clump); } return clump; }
void TimeNode::convertToOperations(NodeConverter &converter, const CompositorContext &context) const { SetValueOperation *operation = new SetValueOperation(); bNode *node = this->getbNode(); /* stack order output: fac */ float fac = 0.0f; const int framenumber = context.getFramenumber(); if (framenumber < node->custom1) { fac = 0.0f; } else if (framenumber > node->custom2) { fac = 1.0f; } else if (node->custom1 < node->custom2) { fac = (context.getFramenumber() - node->custom1) / (float)(node->custom2 - node->custom1); } curvemapping_initialize((CurveMapping *)node->storage); fac = curvemapping_evaluateF((CurveMapping *)node->storage, 0, fac); operation->setValue(clamp_f(fac, 0.0f, 1.0f)); converter.addOperation(operation); converter.mapOutputSocket(getOutputSocket(0), operation->getOutputSocket()); }
void scrollbar_start_scrolling(ScrollBar *sb, int yco) { int thumb_h_2= scrollbar_get_thumbH(sb)/2; int thumbable_h= scrollbar_get_thumbableH(sb); float npos= scrollbar_co_to_pos(sb, yco); sb->scrolloffs= sb->thumbpos - npos; if (fabs(sb->scrolloffs) >= (float) thumb_h_2/thumbable_h) { sb->scrolloffs= 0.0; } sb->scrolling= 1; sb->thumbpos= clamp_f(npos + sb->scrolloffs, 0.0, 1.0); }
static int bpy_bmdeformvert_ass_subscript(BPy_BMDeformVert *self, PyObject *key, PyObject *value) { if (PyIndex_Check(key)) { int i; i = PyNumber_AsSsize_t(key, PyExc_IndexError); if (i == -1 && PyErr_Occurred()) { return -1; } if (value) { /* dvert[group_index] = 0.5 */ if (i < 0) { PyErr_SetString(PyExc_KeyError, "BMDeformVert[key] = x: " "weight keys can't be negative"); return -1; } else { MDeformWeight *dw = defvert_verify_index(self->data, i); const float f = PyFloat_AsDouble(value); if (f == -1 && PyErr_Occurred()) { // parsed key not a number PyErr_SetString(PyExc_TypeError, "BMDeformVert[key] = x: " "assigned value not a number"); return -1; } dw->weight = clamp_f(f, 0.0f, 1.0f); } } else { /* del dvert[group_index] */ MDeformWeight *dw = defvert_find_index(self->data, i); if (dw == NULL) { PyErr_SetString(PyExc_KeyError, "del BMDeformVert[key]: " "key not found"); } defvert_remove_group(self->data, dw); } return 0; } else { PyErr_Format(PyExc_TypeError, "BMDeformVert keys must be integers, not %.200s", Py_TYPE(key)->tp_name); return -1; } }
static PyObject *bpy_bm_utils_vert_collapse_faces(PyObject *UNUSED(self), PyObject *args) { BPy_BMEdge *py_edge; BPy_BMVert *py_vert; float fac; int do_join_faces; BMesh *bm; BMEdge *e_new = NULL; if (!PyArg_ParseTuple(args, "O!O!fi:vert_collapse_faces", &BPy_BMVert_Type, &py_vert, &BPy_BMEdge_Type, &py_edge, &fac, &do_join_faces)) { return NULL; } BPY_BM_CHECK_OBJ(py_edge); BPY_BM_CHECK_OBJ(py_vert); /* this doubles for checking that the verts are in the same mesh */ if (!(py_edge->e->v1 == py_vert->v || py_edge->e->v2 == py_vert->v)) { PyErr_SetString(PyExc_ValueError, "vert_collapse_faces(vert, edge): the vertex is not found in the edge"); return NULL; } if (BM_vert_edge_count_is_over(py_vert->v, 2)) { PyErr_SetString(PyExc_ValueError, "vert_collapse_faces(vert, edge): vert has more than 2 connected edges"); return NULL; } bm = py_edge->bm; e_new = BM_vert_collapse_faces(bm, py_edge->e, py_vert->v, clamp_f(fac, 0.0f, 1.0f), true, do_join_faces, true); if (e_new) { return BPy_BMEdge_CreatePyObject(bm, e_new); } else { PyErr_SetString(PyExc_ValueError, "vert_collapse_faces(vert, edge): no new edge created, internal error"); return NULL; } }
static float curvemapping_integrate_clamped(CurveMapping *curve, float start, float end, float step) { float integral = 0.0f; float x = start; while (x < end) { float y = curvemapping_evaluateF(curve, 0, x); y = clamp_f(y, 0.0f, 1.0f); /* TODO(sergey): Clamp last step to end. */ integral += y * step; x += step; } return integral; }
static PyObject *bpy_bm_utils_edge_split(PyObject *UNUSED(self), PyObject *args) { BPy_BMEdge *py_edge; BPy_BMVert *py_vert; float fac; BMesh *bm; BMVert *v_new = NULL; BMEdge *e_new = NULL; if (!PyArg_ParseTuple(args, "O!O!f:edge_split", &BPy_BMEdge_Type, &py_edge, &BPy_BMVert_Type, &py_vert, &fac)) { return NULL; } BPY_BM_CHECK_OBJ(py_edge); BPY_BM_CHECK_OBJ(py_vert); /* this doubles for checking that the verts are in the same mesh */ if (!(py_edge->e->v1 == py_vert->v || py_edge->e->v2 == py_vert->v)) { PyErr_SetString(PyExc_ValueError, "edge_split(edge, vert): the vertex is not found in the edge"); return NULL; } bm = py_edge->bm; v_new = BM_edge_split(bm, py_edge->e, py_vert->v, &e_new, clamp_f(fac, 0.0f, 1.0f)); if (v_new && e_new) { PyObject *ret = PyTuple_New(2); PyTuple_SET_ITEMS(ret, BPy_BMEdge_CreatePyObject(bm, e_new), BPy_BMVert_CreatePyObject(bm, v_new)); return ret; } else { PyErr_SetString(PyExc_ValueError, "edge_split(edge, vert): couldn't split the edge, internal error"); return NULL; } }
static void do_rough_curve(const float loc[3], float mat[4][4], float time, float fac, float size, CurveMapping *roughcurve, ParticleKey *state) { float rough[3]; float rco[3]; if (!roughcurve) return; fac *= clamp_f(curvemapping_evaluateF(roughcurve, 0, time), 0.0f, 1.0f); copy_v3_v3(rco, loc); mul_v3_fl(rco, time); rough[0] = -1.0f + 2.0f * BLI_gTurbulence(size, rco[0], rco[1], rco[2], 2, 0, 2); rough[1] = -1.0f + 2.0f * BLI_gTurbulence(size, rco[1], rco[2], rco[0], 2, 0, 2); rough[2] = -1.0f + 2.0f * BLI_gTurbulence(size, rco[2], rco[0], rco[1], 2, 0, 2); madd_v3_v3fl(state->co, mat[0], fac * rough[0]); madd_v3_v3fl(state->co, mat[1], fac * rough[1]); madd_v3_v3fl(state->co, mat[2], fac * rough[2]); }
void scrollbar_set_thumbpos(ScrollBar *sb, float pos) { sb->thumbpos= clamp_f(pos, 0.0, 1.0); }
void scrollbar_keep_scrolling(ScrollBar *sb, int yco) { float npos= scrollbar_co_to_pos(sb, yco); sb->thumbpos= clamp_f(npos + sb->scrolloffs, 0.0, 1.0); }
// Process plugin - called when plugin is asked by ShaderMap to apply a filter to Map Pixels. BOOL on_process(const process_data_s& data, BOOL* is_sRGB_out) { // Local structs struct pixel_64_s { float c, a; }; struct pixel_128_s { float r, g, b, a; }; // Local data float r, g, b, a; unsigned int i, count_i, thread_limit, mask_width, mask_height; BOOL is_use_mask, is_invert_mask; unsigned short* mask_pixel_array, *local_mask_pixel_array; pixel_64_s* pixel_array_64; pixel_128_s* pixel_array_128; // Set filter progress. fp_set_filter_progress(data.map_id, data.filter_position, 0); // ----------------- // Exit early if the map is a normal map - only want to work on color and grayscale images here. if(data.is_normal_map && data.filter_position > 0) { *is_sRGB_out = data.is_sRGB; fp_set_filter_progress(data.map_id, data.filter_position, 100); return TRUE; } // ----------------- // Get Map thread limit from ShaderMap. Would use this if processing map in multiple threads. // This filter does not use multithreading so this line is for demonstration only. thread_limit = fp_get_map_thread_limit(); // ----------------- // Get property values - pay special attention to the property index requested. // Converting red, green, blue, and alpha to floating points with range -1.0f to 1.0f. r = fp_get_property_slider(data.map_id, data.filter_position, 0) / 100.0f; g = fp_get_property_slider(data.map_id, data.filter_position, 1) / 100.0f; b = fp_get_property_slider(data.map_id, data.filter_position, 2) / 100.0f; a = fp_get_property_slider(data.map_id, data.filter_position, 3) / 100.0f; // Don't forget to get the values from the auto added mask properties. is_use_mask = fp_get_property_checkbox(data.map_id, data.filter_position, 4); is_invert_mask = fp_get_property_checkbox(data.map_id, data.filter_position, 5); // Exit early if nothing to do if(r == 0 && g == 0 && b == 0 && a == 0) { *is_sRGB_out = data.is_sRGB; fp_set_filter_progress(data.map_id, data.filter_position, 100); return TRUE; } // ----------------- // The local dynamic pixel array we use to store mask pixels in. local_mask_pixel_array = 0; // Get mask data if enabled if(is_use_mask) { // Get mask size and pixels from ShaderMap. fp_get_map_mask(data.map_id, mask_width, mask_height, &mask_pixel_array); // If no mask is set then disable mask usage. if(!mask_pixel_array) { is_use_mask = FALSE; } else { // Create local copy of mask pixels. local_mask_pixel_array = new (std::nothrow) unsigned short[mask_width * mask_height]; if(!local_mask_pixel_array) { LOG_ERROR_MSG(data.map_id, data.filter_position, _T("Memory Allocation Error: Failed to allocate local_mask_pixel_array.")); return FALSE; } memcpy(local_mask_pixel_array, mask_pixel_array, sizeof(unsigned short) * mask_width * mask_height); // Resize the mask to the map size if not already the same size. // Using a simple nearest neighbor scale. if(mask_width != data.map_width || mask_height != data.map_height) { // Resize mask - local_mask_pixel_array is released by the function and a new array is allocated and returned with scaled pixels. local_mask_pixel_array = resize_mask_pixels(local_mask_pixel_array, mask_width, mask_height, data.map_width, data.map_height); if(!local_mask_pixel_array) { LOG_ERROR_MSG(data.map_id, data.filter_position, _T("Resize mask pixels failed. Most likely caused by a memory allocation error.")); return FALSE; } } // Invert local (resized) mask if required. if(is_invert_mask) { count_i = data.map_width * data.map_height; for(i=0; i<count_i; i++) { local_mask_pixel_array[i] = USHRT_MAX - local_mask_pixel_array[i]; } } } } // ----------------- // Check for cancel. if(fp_is_cancel_process()) { if(local_mask_pixel_array) { delete [] local_mask_pixel_array; } return FALSE; } // ----------------- // If map is grayscale if(data.is_grayscale) { // Cast the map pixel array to pixel_64_s. pixel_array_64 = (pixel_64_s*)data.map_pixel_data; // If using mask if(is_use_mask && local_mask_pixel_array) { // For every map pixel count_i = data.map_width * data.map_height; for(i=0; i<count_i; i++) { // Add the channel modifiers to each channel - use red for grayscale color. // Multiply the channel modifier by the mask pixel converted to scalar. // Clamp to range 0.0f to 1.0f. pixel_array_64[i].c = clamp_f(pixel_array_64[i].c + (r * (local_mask_pixel_array[i] / (float)USHRT_MAX))); pixel_array_64[i].a = clamp_f(pixel_array_64[i].a + (a * (local_mask_pixel_array[i] / (float)USHRT_MAX))); } } // Else no mask else { // For every map pixel count_i = data.map_width * data.map_height; for(i=0; i<count_i; i++) { // Add the channel modifiers to each channel - use red for grayscale color. // Clamp to range 0.0f to 1.0f. pixel_array_64[i].c = clamp_f(pixel_array_64[i].c + r); pixel_array_64[i].a = clamp_f(pixel_array_64[i].a + a); } } } // Else map is RGBA else { // Cast the map pixel array to pixel_128_s. pixel_array_128 = (pixel_128_s*)data.map_pixel_data; // If using mask if(is_use_mask && local_mask_pixel_array) { // For every map pixel count_i = data.map_width * data.map_height; for(i=0; i<count_i; i++) { // Add the channel modifiers to each channel. // Multiply the channel modifier by the mask pixel converted to scalar. // Clamp to range 0.0f to 1.0f. pixel_array_128[i].r = clamp_f(pixel_array_128[i].r + (r * (local_mask_pixel_array[i] / (float)USHRT_MAX))); pixel_array_128[i].g = clamp_f(pixel_array_128[i].g + (g * (local_mask_pixel_array[i] / (float)USHRT_MAX))); pixel_array_128[i].b = clamp_f(pixel_array_128[i].b + (b * (local_mask_pixel_array[i] / (float)USHRT_MAX))); pixel_array_128[i].a = clamp_f(pixel_array_128[i].a + (a * (local_mask_pixel_array[i] / (float)USHRT_MAX))); } } // Else no mask else { // For every map pixel count_i = data.map_width * data.map_height; for(i=0; i<count_i; i++) { // Add the channel modifiers to each channel. // Clamp to range 0.0f to 1.0f. pixel_array_128[i].r = clamp_f(pixel_array_128[i].r + r); pixel_array_128[i].g = clamp_f(pixel_array_128[i].g + g); pixel_array_128[i].b = clamp_f(pixel_array_128[i].b + b); pixel_array_128[i].a = clamp_f(pixel_array_128[i].a + a); } } } // ----------------- // Check for cancel. if(fp_is_cancel_process()) { if(local_mask_pixel_array) { delete [] local_mask_pixel_array; } return FALSE; } // ----------------- // Set the output parameter of the color space the pixels are in. It was not chaged. *is_sRGB_out = data.is_sRGB; // ----------------- // Set progress fp_set_filter_progress(data.map_id, data.filter_position, 100); return TRUE; }
int BPy_BMLayerItem_SetItem(BPy_BMElem *py_ele, BPy_BMLayerItem *py_layer, PyObject *py_value) { int ret = 0; void *value = bpy_bmlayeritem_ptr_get(py_ele, py_layer); if (UNLIKELY(value == NULL)) { return -1; } switch (py_layer->type) { case CD_MDEFORMVERT: { ret = BPy_BMDeformVert_AssignPyObject(value, py_value); break; } case CD_PROP_FLT: case CD_PAINT_MASK: { float tmp_val = PyFloat_AsDouble(py_value); if (UNLIKELY(tmp_val == -1 && PyErr_Occurred())) { PyErr_Format(PyExc_TypeError, "expected a float, not a %.200s", Py_TYPE(py_value)->tp_name); ret = -1; } else { *(float *)value = tmp_val; } break; } case CD_PROP_INT: { int tmp_val = PyC_Long_AsI32(py_value); if (UNLIKELY(tmp_val == -1 && PyErr_Occurred())) { /* error is set */ ret = -1; } else { *(int *)value = tmp_val; } break; } case CD_PROP_STR: { MStringProperty *mstring = value; char *tmp_val; Py_ssize_t tmp_val_len; if (UNLIKELY(PyBytes_AsStringAndSize(py_value, &tmp_val, &tmp_val_len) == -1)) { PyErr_Format(PyExc_TypeError, "expected bytes, not a %.200s", Py_TYPE(py_value)->tp_name); ret = -1; } else { if (tmp_val_len > sizeof(mstring->s)) tmp_val_len = sizeof(mstring->s); memcpy(mstring->s, tmp_val, tmp_val_len); mstring->s_len = tmp_val_len; } break; } case CD_MTEXPOLY: { ret = BPy_BMTexPoly_AssignPyObject(value, py_value); break; } case CD_MLOOPUV: { ret = BPy_BMLoopUV_AssignPyObject(value, py_value); break; } case CD_MLOOPCOL: { ret = BPy_BMLoopColor_AssignPyObject(value, py_value); break; } case CD_SHAPEKEY: { float tmp_val[3]; if (UNLIKELY(mathutils_array_parse(tmp_val, 3, 3, py_value, "BMVert[shape] = value") == -1)) { ret = -1; } else { copy_v3_v3((float *)value, tmp_val); } break; } case CD_BWEIGHT: { float tmp_val = PyFloat_AsDouble(py_value); if (UNLIKELY(tmp_val == -1 && PyErr_Occurred())) { PyErr_Format(PyExc_TypeError, "expected a float, not a %.200s", Py_TYPE(py_value)->tp_name); ret = -1; } else { *(float *)value = clamp_f(tmp_val, 0.0f, 1.0f); } break; } case CD_CREASE: { float tmp_val = PyFloat_AsDouble(py_value); if (UNLIKELY(tmp_val == -1 && PyErr_Occurred())) { PyErr_Format(PyExc_TypeError, "expected a float, not a %.200s", Py_TYPE(py_value)->tp_name); ret = -1; } else { *(float *)value = clamp_f(tmp_val, 0.0f, 1.0f); } break; } case CD_MVERT_SKIN: { ret = BPy_BMVertSkin_AssignPyObject(value, py_value); break; } default: { PyErr_SetString(PyExc_AttributeError, "readonly / unsupported type"); ret = -1; break; } } return ret; }