Beispiel #1
0
void CamerasExporter::operator()(Object *ob, Scene *sce)
{
    // TODO: shiftx, shifty, YF_dofdist
    Camera *cam = (Camera*)ob->data;
    std::string cam_id(get_camera_id(ob));
    std::string cam_name(id_name(cam));

    switch (cam->type) {
    case CAM_PANO:
    case CAM_PERSP: {
        COLLADASW::PerspectiveOptic persp(mSW);
        persp.setXFov(RAD2DEGF(focallength_to_fov(cam->lens, cam->sensor_x)), "xfov");
        persp.setAspectRatio((float)(sce->r.xsch)/(float)(sce->r.ysch), false, "aspect_ratio");
        persp.setZFar(cam->clipend, false, "zfar");
        persp.setZNear(cam->clipsta, false, "znear");
        COLLADASW::Camera ccam(mSW, &persp, cam_id, cam_name);
        addCamera(ccam);
        break;
    }
    case CAM_ORTHO:
    default:
    {
        COLLADASW::OrthographicOptic ortho(mSW);
        ortho.setXMag(cam->ortho_scale, "xmag");
        ortho.setAspectRatio((float)(sce->r.xsch)/(float)(sce->r.ysch), false, "aspect_ratio");
        ortho.setZFar(cam->clipend, false, "zfar");
        ortho.setZNear(cam->clipsta, false, "znear");
        COLLADASW::Camera ccam(mSW, &ortho, cam_id, cam_name);
        addCamera(ccam);
        break;
    }
    }
}
/*
 * Similar to create_source_from_fcurve, but adds conversion of lens
 * animation data from focal length to FOV.
 */
std::string AnimationExporter::create_lens_source_from_fcurve(Camera *cam, COLLADASW::InputSemantic::Semantics semantic, FCurve *fcu, const std::string& anim_id)
{
	std::string source_id = anim_id + get_semantic_suffix(semantic);

	COLLADASW::FloatSourceF source(mSW);
	source.setId(source_id);
	source.setArrayId(source_id + ARRAY_ID_SUFFIX);
	source.setAccessorCount(fcu->totvert);

	source.setAccessorStride(1);

	COLLADASW::SourceBase::ParameterNameList &param = source.getParameterNameList();
	add_source_parameters(param, semantic, false, "", false);

	source.prepareToAppendValues();

	for (unsigned int i = 0; i < fcu->totvert; i++) {
		float values[3]; // be careful!
		int length = 0;
		get_source_values(&fcu->bezt[i], semantic, false, values, &length);
		for (int j = 0; j < length; j++)
		{
			float val = RAD2DEGF(focallength_to_fov(values[j], cam->sensor_x));
			source.appendValues(val);
		}
	}

	source.finish();

	return source_id;
}
Beispiel #3
0
/* 'rotmat' can be obedit->obmat when uv project is used.
 * 'winx' and 'winy' can be from scene->r.xsch/ysch */
ProjCameraInfo *BLI_uvproject_camera_info(Object *ob, float(*rotmat)[4], float winx, float winy)
{
	ProjCameraInfo uci;
	Camera *camera = ob->data;

	uci.do_pano = (camera->type == CAM_PANO);
	uci.do_persp = (camera->type == CAM_PERSP);

	uci.camangle = focallength_to_fov(camera->lens, camera->sensor_x) / 2.0f;
	uci.camsize = uci.do_persp ? tanf(uci.camangle) : camera->ortho_scale;

	/* account for scaled cameras */
	copy_m4_m4(uci.caminv, ob->obmat);
	normalize_m4(uci.caminv);

	if (invert_m4(uci.caminv)) {
		ProjCameraInfo *uci_pt;

		/* normal projection */
		if (rotmat) {
			copy_m4_m4(uci.rotmat, rotmat);
			uci.do_rotmat = TRUE;
		}
		else {
			uci.do_rotmat = FALSE;
		}

		/* also make aspect ratio adjustment factors */
		if (winx > winy) {
			uci.xasp = 1.0f;
			uci.yasp = winx / winy;
		}
		else {
			uci.xasp = winy / winx;
			uci.yasp = 1.0f;
		}

		/* include 0.5f here to move the UVs into the center */
		uci.shiftx = 0.5f - (camera->shiftx * uci.xasp);
		uci.shifty = 0.5f - (camera->shifty * uci.yasp);

		uci_pt = MEM_mallocN(sizeof(ProjCameraInfo), "ProjCameraInfo");
		*uci_pt = uci;
		return uci_pt;
	}

	return NULL;
}
Beispiel #4
0
static float rna_Camera_angle_y_get(PointerRNA *ptr)
{
	Camera *cam = ptr->id.data;
	return focallength_to_fov(cam->lens, cam->sensor_y);
}
Beispiel #5
0
static float rna_Camera_angle_get(PointerRNA *ptr)
{
	Camera *cam = ptr->id.data;
	float sensor = BKE_camera_sensor_size(cam->sensor_fit, cam->sensor_x, cam->sensor_y);
	return focallength_to_fov(cam->lens, sensor);
}
Beispiel #6
0
static bool view3d_localview_init(
        wmWindowManager *wm, wmWindow *win,
        Main *bmain, Scene *scene, ScrArea *sa, const int smooth_viewtx,
        ReportList *reports)
{
	View3D *v3d = sa->spacedata.first;
	Base *base;
	float min[3], max[3], box[3], mid[3];
	float size = 0.0f, size_persp = 0.0f, size_ortho = 0.0f;
	unsigned int locallay;
	bool ok = false;

	if (v3d->localvd) {
		return ok;
	}

	INIT_MINMAX(min, max);

	locallay = free_localbit(bmain);

	if (locallay == 0) {
		BKE_report(reports, RPT_ERROR, "No more than 8 local views");
		ok = false;
	}
	else {
		if (scene->obedit) {
			BKE_object_minmax(scene->obedit, min, max, false);
			
			ok = true;
		
			BASACT->lay |= locallay;
			scene->obedit->lay = BASACT->lay;
		}
		else {
			for (base = FIRSTBASE; base; base = base->next) {
				if (TESTBASE(v3d, base)) {
					BKE_object_minmax(base->object, min, max, false);
					base->lay |= locallay;
					base->object->lay = base->lay;
					ok = true;
				}
			}
		}

		sub_v3_v3v3(box, max, min);
		size = max_fff(box[0], box[1], box[2]);

		/* do not zoom closer than the near clipping plane */
		size = max_ff(size, v3d->near * 1.5f);

		/* perspective size (we always switch out of camera view so no need to use its lens size) */
		size_persp = ED_view3d_radius_to_persp_dist(focallength_to_fov(v3d->lens, DEFAULT_SENSOR_WIDTH), size / 2.0f) * VIEW3D_MARGIN;
		size_ortho = ED_view3d_radius_to_ortho_dist(v3d->lens, size / 2.0f) * VIEW3D_MARGIN;
	}
	
	if (ok == true) {
		ARegion *ar;
		
		v3d->localvd = MEM_mallocN(sizeof(View3D), "localview");
		
		memcpy(v3d->localvd, v3d, sizeof(View3D));

		mid_v3_v3v3(mid, min, max);

		copy_v3_v3(v3d->cursor, mid);

		for (ar = sa->regionbase.first; ar; ar = ar->next) {
			if (ar->regiontype == RGN_TYPE_WINDOW) {
				RegionView3D *rv3d = ar->regiondata;

				/* new view values */
				Object *camera_old = NULL;
				float dist_new, ofs_new[3];

				rv3d->localvd = MEM_mallocN(sizeof(RegionView3D), "localview region");
				memcpy(rv3d->localvd, rv3d, sizeof(RegionView3D));

				negate_v3_v3(ofs_new, mid);

				if (rv3d->persp == RV3D_CAMOB) {
					rv3d->persp = RV3D_PERSP;
					camera_old = v3d->camera;
				}

				/* perspective should be a bit farther away to look nice */
				if (rv3d->persp != RV3D_ORTHO) {
					dist_new = size_persp;
				}
				else {
					dist_new = size_ortho;
				}

				/* correction for window aspect ratio */
				if (ar->winy > 2 && ar->winx > 2) {
					float asp = (float)ar->winx / (float)ar->winy;
					if (asp < 1.0f) asp = 1.0f / asp;
					dist_new *= asp;
				}

				ED_view3d_smooth_view_ex(
				        wm, win, sa,
				        v3d, ar, camera_old, NULL,
				        ofs_new, NULL, &dist_new, NULL,
				        smooth_viewtx);
			}
		}
		
		v3d->lay = locallay;
	}
	else {
		/* clear flags */ 
		for (base = FIRSTBASE; base; base = base->next) {
			if (base->lay & locallay) {
				base->lay -= locallay;
				if (base->lay == 0) base->lay = v3d->layact;
				if (base->object != scene->obedit) base->flag |= SELECT;
				base->object->lay = base->lay;
			}
		}
	}

	return ok;
}
Beispiel #7
0
/**
 * Return a new RegionView3D.dist value to fit the \a radius.
 *
 * \note Depth isn't taken into account, this will fit a flat plane exactly,
 * but points towards the view (with a perspective projection),
 * may be within the radius but outside the view. eg:
 *
 * <pre>
 *           +
 * pt --> + /^ radius
 *         / |
 *        /  |
 * view  +   +
 *        \  |
 *         \ |
 *          \|
 *           +
 * </pre>
 *
 * \param ar  Can be NULL if \a use_aspect is false.
 * \param persp  Allow the caller to tell what kind of perspective to use (ortho/view/camera)
 * \param use_aspect  Increase the distance to account for non 1:1 view aspect.
 * \param radius  The radius will be fitted exactly, typically pre-scaled by a margin (#VIEW3D_MARGIN).
 */
float ED_view3d_radius_to_dist(
        const View3D *v3d, const ARegion *ar,
        const char persp, const bool use_aspect,
        const float radius)
{
	float dist;

	BLI_assert(ELEM(persp, RV3D_ORTHO, RV3D_PERSP, RV3D_CAMOB));
	BLI_assert((persp != RV3D_CAMOB) || v3d->camera);

	if (persp == RV3D_ORTHO) {
		dist = ED_view3d_radius_to_dist_ortho(v3d->lens, radius);
	}
	else {
		float lens, sensor_size, zoom;
		float angle;

		if (persp == RV3D_CAMOB) {
			CameraParams params;
			BKE_camera_params_init(&params);
			params.clipsta = v3d->near;
			params.clipend = v3d->far;
			BKE_camera_params_from_object(&params, v3d->camera);

			lens = params.lens;
			sensor_size = BKE_camera_sensor_size(params.sensor_fit, params.sensor_x, params.sensor_y);

			/* ignore 'rv3d->camzoom' because we want to fit to the cameras frame */
			zoom = CAMERA_PARAM_ZOOM_INIT_CAMOB;
		}
		else {
			lens = v3d->lens;
			sensor_size = DEFAULT_SENSOR_WIDTH;
			zoom = CAMERA_PARAM_ZOOM_INIT_PERSP;
		}

		angle = focallength_to_fov(lens, sensor_size);

		/* zoom influences lens, correct this by scaling the angle as a distance (by the zoom-level) */
		angle = atanf(tanf(angle / 2.0f) * zoom) * 2.0f;

		dist = ED_view3d_radius_to_dist_persp(angle, radius);
	}

	if (use_aspect) {
		const RegionView3D *rv3d = ar->regiondata;

		float winx, winy;

		if (persp == RV3D_CAMOB) {
			/* camera frame x/y in pixels */
			winx = ar->winx / rv3d->viewcamtexcofac[0];
			winy = ar->winy / rv3d->viewcamtexcofac[1];
		}
		else {
			winx = ar->winx;
			winy = ar->winy;
		}

		if (winx && winy) {
			float aspect = winx / winy;
			if (aspect < 1.0f) {
				aspect = 1.0f / aspect;
			}
			dist *= aspect;
		}
	}

	return dist;
}