示例#1
0
static void um_arraystore_expand(UndoMesh *um)
{
	Mesh *me = &um->me;

	um_arraystore_cd_expand(um->store.vdata, &me->vdata, me->totvert);
	um_arraystore_cd_expand(um->store.edata, &me->edata, me->totedge);
	um_arraystore_cd_expand(um->store.ldata, &me->ldata, me->totloop);
	um_arraystore_cd_expand(um->store.pdata, &me->pdata, me->totpoly);

	if (um->store.keyblocks) {
		const size_t stride = me->key->elemsize;
		KeyBlock *keyblock = me->key->block.first;
		for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
			BArrayState *state = um->store.keyblocks[i];
			size_t state_len;
			keyblock->data = BLI_array_store_state_data_get_alloc(state, &state_len);
			BLI_assert(keyblock->totelem == (state_len / stride));
			UNUSED_VARS_NDEBUG(stride);
		}
	}

	if (um->store.mselect) {
		const size_t stride = sizeof(*me->mselect);
		BArrayState *state = um->store.mselect;
		size_t state_len;
		me->mselect = BLI_array_store_state_data_get_alloc(state, &state_len);
		BLI_assert(me->totselect == (state_len / stride));
		UNUSED_VARS_NDEBUG(stride);
	}

	/* not essential, but prevents accidental dangling pointer access */
	BKE_mesh_update_customdata_pointers(me, false);
}
示例#2
0
void BKE_paint_toolslots_brush_update_ex(Paint *paint, Brush *brush)
{
  const uint tool_offset = paint->runtime.tool_offset;
  UNUSED_VARS_NDEBUG(tool_offset);
  BLI_assert(tool_offset != 0);
  const int slot_index = BKE_brush_tool_get(brush, paint);
  BKE_paint_toolslots_len_ensure(paint, slot_index + 1);
  PaintToolSlot *tslot = &paint->tool_slots[slot_index];
  id_us_plus(&brush->id);
  id_us_min(&tslot->brush->id);
  tslot->brush = brush;
}
示例#3
0
/* translate callbacks */
static void gizmo_value_operator_redo_value_get(const wmGizmo *gz,
                                                wmGizmoProperty *gz_prop,
                                                void *value_p)
{
  float *value = value_p;
  BLI_assert(gz_prop->type->array_length == 1);
  UNUSED_VARS_NDEBUG(gz_prop);

  struct ValueOpRedoGroup *igzgroup = gz->parent_gzgroup->customdata;
  wmOperator *op = igzgroup->state.op;
  *value = RNA_property_float_get(op->ptr, op->type->prop);
}
static PyObject *bpy_bm_utils_vert_splice(PyObject *UNUSED(self), PyObject *args)
{
	BPy_BMVert *py_vert;
	BPy_BMVert *py_vert_target;

	BMesh *bm;

	bool ok;

	if (!PyArg_ParseTuple(args, "O!O!:vert_splice",
	                      &BPy_BMVert_Type, &py_vert,
	                      &BPy_BMVert_Type, &py_vert_target))
	{
		return NULL;
	}

	BPY_BM_CHECK_OBJ(py_vert);
	BPY_BM_CHECK_OBJ(py_vert_target);

	bm = py_vert->bm;
	BPY_BM_CHECK_SOURCE_OBJ(bm, "vert_splice", py_vert_target);

	if (py_vert->v == py_vert_target->v) {
		PyErr_SetString(PyExc_ValueError,
		                "vert_splice(...): vert arguments match");
		return NULL;
	}

	if (BM_edge_exists(py_vert->v, py_vert_target->v)) {
		PyErr_SetString(PyExc_ValueError,
		                "vert_splice(...): verts can't share an edge");
		return NULL;
	}

	if (BM_vert_pair_share_face_check(py_vert->v, py_vert_target->v)) {
		PyErr_SetString(PyExc_ValueError,
		                "vert_splice(...): verts can't share a face");
		return NULL;
	}

	/* should always succeed */
	ok = BM_vert_splice(bm, py_vert_target->v, py_vert->v);
	BLI_assert(ok == true);
	UNUSED_VARS_NDEBUG(ok);

	Py_RETURN_NONE;
}
示例#5
0
/**
 * \note There is no room for data going out of sync here.
 * The layers and the states are stored together so this can be kept working.
 */
static void um_arraystore_cd_expand(
        const BArrayCustomData *bcd, struct CustomData *cdata, const size_t data_len)
{
	CustomDataLayer *layer = cdata->layers;
	while (bcd) {
		const int stride = CustomData_sizeof(bcd->type);
		for (int i = 0; i < bcd->states_len; i++) {
			BLI_assert(bcd->type == layer->type);
			if (bcd->states[i]) {
				size_t state_len;
				layer->data = BLI_array_store_state_data_get_alloc(bcd->states[i], &state_len);
				BLI_assert(stride * data_len == state_len);
				UNUSED_VARS_NDEBUG(stride, data_len);
			}
			else {
				layer->data = NULL;
			}
			layer++;
		}
		bcd = bcd->next;
	}
}
示例#6
0
/**
 * Run this to ensure brush types are set for each slot on entering modes
 * (for new scenes for example).
 */
void BKE_paint_toolslots_brush_validate(Main *bmain, Paint *paint)
{
  /* Clear slots with invalid slots or mode (unlikely but possible). */
  const uint tool_offset = paint->runtime.tool_offset;
  UNUSED_VARS_NDEBUG(tool_offset);
  const eObjectMode ob_mode = paint->runtime.ob_mode;
  BLI_assert(tool_offset && ob_mode);
  for (int i = 0; i < paint->tool_slots_len; i++) {
    PaintToolSlot *tslot = &paint->tool_slots[i];
    if (tslot->brush) {
      if ((i != BKE_brush_tool_get(tslot->brush, paint)) ||
          (tslot->brush->ob_mode & ob_mode) == 0) {
        id_us_min(&tslot->brush->id);
        tslot->brush = NULL;
      }
    }
  }

  /* Unlikely but possible the active brush is not currently using a slot. */
  BKE_paint_toolslots_brush_update(paint);

  /* Fill slots from brushes. */
  paint_toolslots_init(bmain, paint);
}
示例#7
0
/**
 * Replace all references in given Main to \a old_id by \a new_id
 * (if \a new_id is NULL, it unlinks \a old_id).
 */
void BKE_libblock_remap_locked(
        Main *bmain, void *old_idv, void *new_idv,
        const short remap_flags)
{
	IDRemap id_remap_data;
	ID *old_id = old_idv;
	ID *new_id = new_idv;
	int skipped_direct, skipped_refcounted;

	BLI_assert(old_id != NULL);
	BLI_assert((new_id == NULL) || GS(old_id->name) == GS(new_id->name));
	BLI_assert(old_id != new_id);

	libblock_remap_data(bmain, NULL, old_id, new_id, remap_flags, &id_remap_data);

	if (free_notifier_reference_cb) {
		free_notifier_reference_cb(old_id);
	}

	/* We assume editors do not hold references to their IDs... This is false in some cases
	 * (Image is especially tricky here), editors' code is to handle refcount (id->us) itself then. */
	if (remap_editor_id_reference_cb) {
		remap_editor_id_reference_cb(old_id, new_id);
	}

	skipped_direct = id_remap_data.skipped_direct;
	skipped_refcounted = id_remap_data.skipped_refcounted;

	/* If old_id was used by some ugly 'user_one' stuff (like Image or Clip editors...), and user count has actually
	 * been incremented for that, we have to decrease once more its user count... unless we had to skip
	 * some 'user_one' cases. */
	if ((old_id->tag & LIB_TAG_EXTRAUSER_SET) && !(id_remap_data.status & ID_REMAP_IS_USER_ONE_SKIPPED)) {
		id_us_min(old_id);
		old_id->tag &= ~LIB_TAG_EXTRAUSER_SET;
	}

	BLI_assert(old_id->us - skipped_refcounted >= 0);
	UNUSED_VARS_NDEBUG(skipped_refcounted);

	if (skipped_direct == 0) {
		/* old_id is assumed to not be used directly anymore... */
		if (old_id->lib && (old_id->tag & LIB_TAG_EXTERN)) {
			old_id->tag &= ~LIB_TAG_EXTERN;
			old_id->tag |= LIB_TAG_INDIRECT;
		}
	}

	/* Some after-process updates.
	 * This is a bit ugly, but cannot see a way to avoid it. Maybe we should do a per-ID callback for this instead?
	 */
	switch (GS(old_id->name)) {
		case ID_OB:
			libblock_remap_data_postprocess_object_fromgroup_update(bmain, (Object *)old_id, (Object *)new_id);
			break;
		case ID_GR:
			if (!new_id) {  /* Only affects us in case group was unlinked. */
				for (Scene *sce = bmain->scene.first; sce; sce = sce->id.next) {
					libblock_remap_data_postprocess_group_scene_unlink(bmain, sce, old_id);
				}
			}
			break;
		case ID_ME:
		case ID_CU:
		case ID_MB:
			if (new_id) {  /* Only affects us in case obdata was relinked (changed). */
				for (Object *ob = bmain->object.first; ob; ob = ob->id.next) {
					libblock_remap_data_postprocess_obdata_relink(bmain, ob, new_id);
				}
			}
			break;
		default:
			break;
	}

	/* Full rebuild of DAG! */
	DAG_relations_tag_update(bmain);
}
示例#8
0
/**
 * \return a face index in \a faces and set \a r_is_flip
 * if the face is flipped away from the center.
 */
static int recalc_face_normals_find_index(BMesh *bm,
                                          BMFace **faces,
                                          const int faces_len,
                                          bool *r_is_flip)
{
  const float eps = FLT_EPSILON;
  float cent_area_accum = 0.0f;
  float cent[3];
  const float cent_fac = 1.0f / (float)faces_len;

  bool is_flip = false;
  int f_start_index;
  int i;

  /** Search for the best loop. Members are compared in-order defined here. */
  struct {
    /**
     * Squared distance from the center to the loops vertex 'l->v'.
     * The normalized direction between the center and this vertex
     * is also used for the dot-products below.
     */
    float dist_sq;
    /**
     * Signed dot product using the normalized edge vector,
     * (best of 'l->prev->v' or 'l->next->v').
     */
    float edge_dot;
    /**
     * Unsigned dot product using the loop-normal
     * (sign is used to check if we need to flip).
     */
    float loop_dot;
  } best, test;

  UNUSED_VARS_NDEBUG(bm);

  zero_v3(cent);

  /* first calculate the center */
  for (i = 0; i < faces_len; i++) {
    float f_cent[3];
    const float f_area = BM_face_calc_area(faces[i]);
    BM_face_calc_center_median_weighted(faces[i], f_cent);
    madd_v3_v3fl(cent, f_cent, cent_fac * f_area);
    cent_area_accum += f_area;

    BLI_assert(BMO_face_flag_test(bm, faces[i], FACE_TEMP) == 0);
    BLI_assert(BM_face_is_normal_valid(faces[i]));
  }

  if (cent_area_accum != 0.0f) {
    mul_v3_fl(cent, 1.0f / cent_area_accum);
  }

  /* Distances must start above zero,
   * or we can't do meaningful calculations based on the direction to the center */
  best.dist_sq = eps;
  best.edge_dot = best.loop_dot = -FLT_MAX;

  /* used in degenerate cases only */
  f_start_index = 0;

  /**
   * Find the outer-most vertex, comparing distance to the center,
   * then the outer-most loop attached to that vertex.
   *
   * Important this is correctly detected,
   * where casting a ray from the center wont hit any loops past this one.
   * Otherwise the result may be incorrect.
   */
  for (i = 0; i < faces_len; i++) {
    BMLoop *l_iter, *l_first;

    l_iter = l_first = BM_FACE_FIRST_LOOP(faces[i]);
    do {
      bool is_best_dist_sq;
      float dir[3];
      sub_v3_v3v3(dir, l_iter->v->co, cent);
      test.dist_sq = len_squared_v3(dir);
      is_best_dist_sq = (test.dist_sq > best.dist_sq);
      if (is_best_dist_sq || (test.dist_sq == best.dist_sq)) {
        float edge_dir_pair[2][3];
        mul_v3_fl(dir, 1.0f / sqrtf(test.dist_sq));

        sub_v3_v3v3(edge_dir_pair[0], l_iter->next->v->co, l_iter->v->co);
        sub_v3_v3v3(edge_dir_pair[1], l_iter->prev->v->co, l_iter->v->co);

        if ((normalize_v3(edge_dir_pair[0]) > eps) && (normalize_v3(edge_dir_pair[1]) > eps)) {
          bool is_best_edge_dot;
          test.edge_dot = max_ff(dot_v3v3(dir, edge_dir_pair[0]), dot_v3v3(dir, edge_dir_pair[1]));
          is_best_edge_dot = (test.edge_dot > best.edge_dot);
          if (is_best_dist_sq || is_best_edge_dot || (test.edge_dot == best.edge_dot)) {
            float loop_dir[3];
            cross_v3_v3v3(loop_dir, edge_dir_pair[0], edge_dir_pair[1]);
            if (normalize_v3(loop_dir) > eps) {
              float loop_dir_dot;
              /* Highly unlikely the furthest loop is also the concave part of an ngon,
               * but it can be contrived with _very_ non-planar faces - so better check. */
              if (UNLIKELY(dot_v3v3(loop_dir, l_iter->f->no) < 0.0f)) {
                negate_v3(loop_dir);
              }
              loop_dir_dot = dot_v3v3(dir, loop_dir);
              test.loop_dot = fabsf(loop_dir_dot);
              if (is_best_dist_sq || is_best_edge_dot || (test.loop_dot > best.loop_dot)) {
                best = test;
                f_start_index = i;
                is_flip = (loop_dir_dot < 0.0f);
              }
            }
          }
        }
      }
    } while ((l_iter = l_iter->next) != l_first);
  }

  *r_is_flip = is_flip;
  return f_start_index;
}