void SetTransformation(const Vec3& pos, const Quat& rot) { if (mOverridingCamera){ mOverridingCamera->SetTransformation(pos, rot); return; } mTransformation.SetTranslation(pos); mTransformation.SetRotation(rot); mViewPropertyChanged = true; }
void MultiResLayer::SetSubsplatStencil(CameraPtr const & vp_camera) { *subsplat_depth_deriv_tex_param_ = depth_deriative_tex_; *subsplat_normal_cone_tex_param_ = normal_cone_tex_; *subsplat_depth_normal_threshold_param_ = float2(0.001f * vp_camera->FarPlane(), 0.77f); *subsplat_far_plane_param_ = float2(vp_camera->FarPlane(), 1.0f / vp_camera->FarPlane()); RenderEngine& re = Context::Instance().RenderFactoryInstance().RenderEngineInstance(); for (size_t i = 0; i < multi_res_fbs_.size(); ++ i) { re.BindFrameBuffer(multi_res_fbs_[i]); multi_res_fbs_[i]->Clear(FrameBuffer::CBM_Color | FrameBuffer::CBM_Depth | FrameBuffer::CBM_Stencil, Color(0, 0, 0, 0), 0.0f, 128); *subsplat_cur_lower_level_param_ = int2(static_cast<int>(i), static_cast<int>(i + 1)); *subsplat_is_not_first_last_level_param_ = int2(i > 0, i < multi_res_fbs_.size() - 1); re.Render(*subsplat_stencil_tech_, *rl_quad_); } }
void _setDefaultCamera() { ScenePtr scene = _engine->getScene(); CameraPtr camera = _engine->getCamera(); FrameBufferPtr frameBuffer = _engine->getFrameBuffer(); const Vector2i& frameSize = frameBuffer->getSize(); const Boxf& worldBounds = scene->getWorldBounds(); const Vector3f& target = worldBounds.getCenter(); const Vector3f& diag = worldBounds.getSize(); Vector3f position = target; position.z() -= diag.z(); const Vector3f up = Vector3f( 0.f, 1.f, 0.f ); camera->setInitialState( position, target, up ); camera->setAspectRatio( static_cast< float >( frameSize.x()) / static_cast< float >( frameSize.y())); }
AVOID DeferredRenderer::PrepareForLightPass(CameraPtr pCamera) { //set vertex buffer with positions m_pVertices->Set(0, 0); //set vertex buffer with texture data m_pTexCoords->Set(1, 0); //bind matrix constant buffer to the pipeline Mat4x4 trans; //trans.CreateTranslation(pCamera->GetLookAt() + 10000 * pCamera->GetDir()); trans.CreateTranslation(pCamera->GetPosition() + 500* pCamera->GetDir()); Mat4x4 rot; rot = rot.CreateRollPitchYaw(pCamera->GetRoll(), pCamera->GetPitch(), pCamera->GetYaw()); Mat4x4 WVP = rot * trans * pCamera->GetView() * CreateOrthoProjectionLH(SCREEN_WIDTH, SCREEN_HEIGHT, 1.0f, 1000.0f); WVP.Transpose(); m_pMatrixBuffer->UpdateSubresource(0, NULL, &WVP, 0, 0); m_pMatrixBuffer->Set(0, ST_Vertex); struct CameraBuffer { Mat4x4 inverseViewProjection; Vec pos; }; CameraBuffer cameraBuffer; Mat4x4 inverseViewProjection = pCamera->GetViewProjection(); inverseViewProjection.Inverse(); cameraBuffer.pos = pCamera->GetPosition(); cameraBuffer.inverseViewProjection = inverseViewProjection; cameraBuffer.inverseViewProjection.Transpose(); m_pcbCameraPos->UpdateSubresource(0, nullptr, &pCamera->GetPosition(), 0, 0); //m_pcbCameraPos->UpdateSubresource(0, NULL, &cameraBuffer, 0, 0); m_pcbCameraPos->Set(0, ST_Pixel); //pCamera->SetViewport(); SetGlobalViewport(); //set shader resources m_pSSAOBlurredSRV->Set(6, ST_Pixel); m_pDepthSRV->Set(8, ST_Pixel); //set blending functionality //this->BlendLightPass()->Set(nullptr); }
//--------------------------------------------------------------------- void SceneResource::processCamera(const Util::XmlNode * cameraNode) const { CameraPtr camera; /// params { Util::String nearClipStr(mXmlReader->getAttribute(cameraNode, "near_clip")); Util::real nearClip = boost::lexical_cast<Util::real>(nearClipStr); Util::String farClipStr(mXmlReader->getAttribute(cameraNode, "far_clip")); Util::real farClip = boost::lexical_cast<Util::real>(farClipStr); Util::String viewportRectStr(mXmlReader->getAttribute(cameraNode, "viewport_rect")); XMVECTOR viewportRect = Util::StringToVector(viewportRectStr, 4); camera = boost::make_shared<Camera>(nearClip, farClip, Util::UintPair(XMVectorGetIntX(viewportRect), XMVectorGetIntY(viewportRect)), Util::UintPair(XMVectorGetIntZ(viewportRect), XMVectorGetIntW(viewportRect))); IF_NULL_EXCEPTION(camera, "Camera creat failed!"); } /// postion { Util::String posStr(mXmlReader->getAttribute(cameraNode, "position")); camera->setPosition(Util::StringToVector(posStr, 3)); } /// lookat { Util::String str(mXmlReader->getAttribute(cameraNode, "lookat")); camera->lookAt(Util::StringToVector(str, 3)); } /// movespeed { Util::String str(mXmlReader->getAttribute(cameraNode, "move_speed")); camera->setMoveSpeed(boost::lexical_cast<Util::real>(str)); } EngineManager::getSingleton().setCamera(camera); }
void Character::draw(ALLEGRO_DISPLAY *display, CameraPtr camera) { Rect rect = OffsetRect(getRect(), camera->getOffset()); al_draw_filled_rectangle( rect.getLeft(), rect.getTop(), rect.getRight(), rect.getBottom(), al_map_rgb(255, 0, 0)); }
void SetOrthogonalData(float left, float top, float right, float bottom){ if (mOverridingCamera){ mOverridingCamera->SetOrthogonalData(left, top, right, bottom); return; } mOrthogonalData.left = left; mOrthogonalData.top = top; mOrthogonalData.right = right; mOrthogonalData.bottom = bottom; mProjPropertyChanged = true; }
void SetPosition(const Vec3& pos) { if (mOverridingCamera){ mOverridingCamera->SetPosition(pos); return; } if (mTransformation.GetTranslation() == pos) return; mTransformation.SetTranslation(pos); mViewPropertyChanged = true; }
void mouse_movement_callback(GLFWwindow *, double xpos, double ypos){ if(Director::getScene()->hasMainCamera() && !(Director::getScene()->getCamera()->fixedMouse())){ CameraPtr cam = Director::getScene()->getCamera(); double xoffset = (xpos - lastX)*mouse_sensitivity; double yoffset = (ypos - lastY)*mouse_sensitivity; lastX = xpos; lastY = ypos; float phi = cam->getPhi(), theta = cam->getTheta(); phi -= (float)yoffset * 360.0f / Global::ScreenHeight; theta += (float)xoffset * 360.0f / Global::ScreenWidth; if (phi > 80.0f) phi = 80.0f; if (phi < -80.0f) phi = -80.0f; cam->setAngles(theta, phi); } }
void SetDirection(const Vec3& dir) { if (mOverridingCamera){ mOverridingCamera->SetDirection(dir); return; } if (mTransformation.GetForward() == dir) return; mTransformation.SetDirection(dir); mViewPropertyChanged = true; }
Point diff(const PointPair &pp, CameraPtr view1, CameraPtr view2, const Model &) { const Point &pp1 = pp.first; const Point &pp2 = pp.second; // Cast rays Ray3d R1 = view1->unproject(pp1[0], pp1[1]); Ray3d R2 = view2->unproject(pp2[0], pp2[1]); Point out(1); // Distance between two casted rays used as error metric Ray3d::Point p1, p2; R1.closestPoints(R2, p1, p2); out[0] = (p1 - p2).norm(); // Scale by distance to image plane to approximate image-space distance Vector3d mid = (p1 + p2) * 0.5; Vector3d mid1 = view1->fromGlobalToLocal(mid); Vector3d mid2 = view2->fromGlobalToLocal(mid); const double v1 = (0.5 * view1->K()(0, 0) * out[0]) / mid1.z(); const double v2 = (0.5 * view2->K()(0, 0) * out[0]) / mid2.z(); out[0] = v1 + v2; return out; }
void CameraImageInput::read() { if(port.isNew()){ do { port.read(); } while(port.isNew()); std::shared_ptr<Image> image; auto srcImage = timedCameraImage.data.image; switch(srcImage.format){ case Img::CF_RGB: image = createImageFromRawData(srcImage, 3); break; case Img::CF_GRAY: image = createImageFromRawData(srcImage, 1); break; #ifndef USE_BUILTIN_CAMERA_IMAGE_IDL case Img::CF_JPEG: case Img::CF_PNG: #else case Img::CF_GRAY_JPEG: case Img::CF_RGB_JPEG: #endif image = createImageFromImageFormat(srcImage); break; default: break; } if(image){ CameraPtr tmpCamera = camera; callLater([tmpCamera, image]() mutable { tmpCamera->setImage(image); tmpCamera->notifyStateChange(); }); } } }
void IECoreArnold::RendererImplementation::camera( const std::string &name, const IECore::CompoundDataMap ¶meters ) { CameraPtr cortexCamera = new Camera( name, 0, new CompoundData( parameters ) ); cortexCamera->addStandardParameters(); AtNode *arnoldCamera = CameraAlgo::convert( cortexCamera.get() ); string nodeName = boost::str( boost::format( "ieCoreArnold:camera:%s" ) % name ); AiNodeSetStr( arnoldCamera, "name", nodeName.c_str() ); AtNode *options = AiUniverseGetOptions(); AiNodeSetPtr( options, "camera", arnoldCamera ); applyTransformToNode( arnoldCamera ); const V2iData *resolution = cortexCamera->parametersData()->member<V2iData>( "resolution" ); AiNodeSetInt( options, "xres", resolution->readable().x ); AiNodeSetInt( options, "yres", resolution->readable().y ); const FloatData *pixelAspectRatio = cortexCamera->parametersData()->member<FloatData>( "pixelAspectRatio" ); AiNodeSetFlt( options, "aspect_ratio", 1.0f / pixelAspectRatio->readable() ); // arnold is y/x, we're x/y }
//---------------------------------------------------------------------------- void ConvexRegionManager::Draw (Renderer& rkRenderer) { CameraPtr spCamera = rkRenderer.GetCamera(); Vector3f kEye = spCamera->GetLocation(); if ( m_bUseEyePlusNear ) kEye += spCamera->GetFrustumNear()*spCamera->GetDirection(); ConvexRegion* pkRegion = GetContainingRegion(kEye); if ( pkRegion ) { // inside the set of regions, start drawing with region of camera pkRegion->Draw(rkRenderer); } else { // outside the set of regions, draw the outside scene (if it exists) if ( GetOutside() ) GetOutside()->Draw(rkRenderer); } }
IECore::CameraPtr GafferScene::camera( const ScenePlug *scene, const IECore::CompoundObject *globals ) { ConstCompoundObjectPtr computedGlobals; if( !globals ) { computedGlobals = scene->globalsPlug()->getValue(); globals = computedGlobals.get(); } if( const StringData *cameraPathData = globals->member<StringData>( "option:render:camera" ) ) { ScenePlug::ScenePath cameraPath; ScenePlug::stringToPath( cameraPathData->readable(), cameraPath ); return camera( scene, cameraPath, globals ); } else { CameraPtr defaultCamera = new IECore::Camera(); applyCameraGlobals( defaultCamera.get(), globals ); return defaultCamera; } }
void SceneObject::attachCamera( CameraPtr cam ) { AssertMsg( cam->getFunctionality() == Camera::kFunc_FPS, "Cannot attach non-FPS cameras" ); AssertMsg( mFPSCamera, "missing information about how to attach the camera" ); mCamera = cam; mCamera->setTarget(mFPSCamera->target + getPosition()); if (mCamera && mFPSCamera) { mFPSCamera->UpdatePosition(this, mCamera, getPosition()); mFPSCamera->UpdateRotation(this, mCamera, getRotation()); } }
void OGRLIBKMLLayer::SetCamera( const char* pszCameraLongitude, const char* pszCameraLatitude, const char* pszCameraAltitude, const char* pszCameraHeading, const char* pszCameraTilt, const char* pszCameraRoll, const char* pszCameraAltitudeMode ) { int isGX = FALSE; int iAltitudeMode = kmlAltitudeModeFromString(pszCameraAltitudeMode, isGX); if( isGX == FALSE && iAltitudeMode == kmldom::ALTITUDEMODE_CLAMPTOGROUND ) { CPLError(CE_Warning, CPLE_AppDefined, "Camera altitudeMode should be different from %s", pszCameraAltitudeMode); return; } KmlFactory *poKmlFactory = m_poOgrDS->GetKmlFactory(); CameraPtr camera = poKmlFactory->CreateCamera(); camera->set_latitude(CPLAtof(pszCameraLatitude)); camera->set_longitude(CPLAtof(pszCameraLongitude)); camera->set_altitude(CPLAtof(pszCameraAltitude)); if( pszCameraHeading != NULL ) camera->set_heading(CPLAtof(pszCameraHeading)); if( pszCameraTilt != NULL ) { double dfTilt = CPLAtof(pszCameraTilt); if( dfTilt >= 0 && dfTilt <= 90 ) camera->set_tilt(dfTilt); else CPLError(CE_Warning, CPLE_AppDefined, "Invalid value for tilt: %s", pszCameraTilt); } if( pszCameraRoll != NULL ) camera->set_roll(CPLAtof(pszCameraRoll)); if( isGX ) camera->set_gx_altitudemode(iAltitudeMode); else camera->set_altitudemode(iAltitudeMode); m_poKmlLayer->set_abstractview(camera); }
void RenderPipelineLayer::assignLight(RenderScenePtr scene, CameraPtr camera) noexcept { assert(camera && scene); _renderQueue[RenderQueue::RQ_LIGHTING][RenderPass::RP_LIGHTS].clear(); scene->computVisiableLight(camera->getViewProject(), [&](LightPtr it) { this->addRenderData(RenderQueue::RQ_LIGHTING, RenderPass::RP_LIGHTS, it); }); }
void uvctest::testExposure() { debug(LOG_DEBUG, DEBUG_LOG, 0, "get the first camera device"); CameraPtr camera = locator->getCamera(0); int ccdindex = default_ccdid; debug(LOG_DEBUG, DEBUG_LOG, 0, "get the CCD no %d", ccdindex); CcdPtr ccd = camera->getCcd(ccdindex); Exposure exposure(ccd->getInfo().getFrame(), default_exposuretime); debug(LOG_DEBUG, DEBUG_LOG, 0, "start an exposure: %s", exposure.toString().c_str()); ccd->startExposure(exposure); ccd->exposureStatus(); debug(LOG_DEBUG, DEBUG_LOG, 0, "retrieve an image"); ImageSequence imgseq = ccd->getImageSequence(2); ImagePtr image = imgseq[imgseq.size() - 1]; debug(LOG_DEBUG, DEBUG_LOG, 0, "image retrieved"); // write the image to a file unlink("test.fits"); FITSout file("test.fits"); file.write(image); if (ccdindex == 2) { DemosaicBilinear<unsigned char> demosaicer; Image<unsigned char> *mosaicimg = dynamic_cast<Image<unsigned char> *>(&*image); if (NULL != mosaicimg) { Image<RGB<unsigned char> > *demosaiced = demosaicer(*mosaicimg); ImagePtr demosaicedptr(demosaiced); unlink("test-demosaiced.fits"); FITSout demosaicedfile("test-demosaiced.fits"); demosaicedfile.write(demosaicedptr); } else { debug(LOG_ERR, DEBUG_LOG, 0, "not a mosaic image"); } } }
void onItemAdded(Item* item) { MessageView* mv = MessageView::instance(); if(BodyItem* bodyItem = dynamic_cast<BodyItem*>(item)){ Body* body = bodyItem->body(); for(size_t i=0; i < body->numDevices(); ++i){ Device* device = body->device(i); if(!camera){ camera = dynamic_pointer_cast<Camera>(device); if(camera){ mv->putln(format("RTCVisionSensorSamplePlugin: Detected Camera \"%1%\" of %2%.") % camera->name() % body->name()); visionSensorSampleRTC->setCameraLocalT(camera->T_local()); } } if(!rangeSensor){ rangeSensor = dynamic_pointer_cast<RangeSensor>(device); if(rangeSensor){ mv->putln(format("RTCVisionSensorSamplePlugin: Detected RangeSensor \"%1%\" of %2%.") % rangeSensor->name() % body->name()); visionSensorSampleRTC->setRangeSensorLocalT(rangeSensor->T_local()); } } } }else if(PointSetItem* pointSetItem = dynamic_cast<PointSetItem*>(item)){ if(pointSetItem->name() == "RangeCameraPoints"){ pointSetFromRangeCamera = pointSetItem->pointSet(); mv->putln("RTCVisionSensorSamplePlugin: Detected PointSetItem \"RangeCameraPoints\""); visionSensorSampleRTC->setPointSetFromRangeCamera(pointSetFromRangeCamera); } else if(pointSetItem->name() == "RangeSensorPoints"){ pointSetFromRangeSensor = pointSetItem->pointSet(); mv->putln("RTCVisionSensorSamplePlugin: Detected PointSetItem \"RangeSensorPoints\""); visionSensorSampleRTC->setPointSetFromRangeSensor(pointSetFromRangeSensor); } } }
void render() { ScenePtr scene = _engine->getScene(); CameraPtr camera = _engine->getCamera(); FrameBufferPtr frameBuffer = _engine->getFrameBuffer(); const Vector2i& frameSize = frameBuffer->getSize(); _engine->preRender(); #if(BRAYNS_USE_DEFLECT || BRAYNS_USE_REST) if( !_extensionPluginFactory ) _intializeExtensionPluginFactory( ); _extensionPluginFactory->execute( ); #endif if( _parametersManager->getRenderingParameters().getSunOnCamera() ) { LightPtr sunLight = scene->getLight( 0 ); DirectionalLight* sun = dynamic_cast< DirectionalLight* > ( sunLight.get() ); if( sun ) { sun->setDirection( camera->getTarget() - camera->getPosition() ); scene->commitLights(); } } camera->commit(); _render( ); _engine->postRender(); const Vector2ui windowSize = _parametersManager ->getApplicationParameters() .getWindowSize(); if( windowSize != frameSize ) reshape(windowSize); }
/*! Get/calculate the decoration matrix for this camera. The default is identity. */ void OffsetCameraDecorator::getDecoration(Matrix &result, UInt32 width, UInt32 height) { if(width == 0 || height == 0) { result.setIdentity(); return; } CameraPtr camera = getDecoratee(); if(camera == NullFC) { FWARNING(("OffsetCameraDecorator::getProjection: no decoratee!\n")); result.setIdentity(); return; } if(getFullWidth() != 0) width = getFullWidth(); if(getFullHeight() != 0) height = getFullHeight(); // this is the only difference to getProjection() camera->getDecoration(result, width, height); Real32 x = getOffsetX() / Real32(width); Real32 y = getOffsetY() / Real32(height); Matrix sm( 1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, 0, 0, 0, 0, 1); result.multLeft(sm); }
/** * \brief Get a cooler from the camera * * \param name devicename for a cooler */ CoolerPtr QhyCameraLocator::getCooler0(const DeviceName& name) { debug(LOG_DEBUG, DEBUG_LOG, 0, "get QHY cooler named: %s", name.toString().c_str()); QhyName qhyname(name); if (!qhyname.isCooler(name)) { std::string msg = stringprintf("%s is not a Cooler name", name.toString().c_str()); debug(LOG_ERR, DEBUG_LOG, 0, "%s", msg.c_str()); throw std::runtime_error(msg); } DeviceName cameraname = qhyname.cameraname(); debug(LOG_DEBUG, DEBUG_LOG, 0, "looking for cooler of camera %s", cameraname.toString().c_str()); CameraPtr camera = this->getCamera(cameraname); CcdPtr ccd = camera->getCcd(0); if (!ccd->hasCooler()) { debug(LOG_ERR, DEBUG_LOG, 0, "camera has no cooler"); throw NotFound("camera does not have a cooler"); } CoolerPtr result = ccd->getCooler(); debug(LOG_DEBUG, DEBUG_LOG, 0, "got cooler named '%s'", result->name().toString().c_str()); return result; }
AtNode *ToArnoldCameraConverter::doConversion( IECore::ConstObjectPtr from, IECore::ConstCompoundObjectPtr operands ) const { CameraPtr camera = boost::static_pointer_cast<const Camera>( from )->copy(); camera->addStandardParameters(); // use projection to decide what sort of camera node to create const std::string &projection = camera->parametersData()->member<StringData>( "projection", true )->readable(); AtNode *result = 0; if( projection=="perspective" ) { result = AiNode( "persp_camera" ); AiNodeSetFlt( result, "fov", camera->parametersData()->member<FloatData>( "projection:fov", true )->readable() ); } else if( projection=="orthographic" ) { result = AiNode( "ortho_camera" ); } else { result = AiNode( projection.c_str() ); } // set clipping planes const Imath::V2f &clippingPlanes = camera->parametersData()->member<V2fData>( "clippingPlanes", true )->readable(); AiNodeSetFlt( result, "near_clip", clippingPlanes[0] ); AiNodeSetFlt( result, "far_clip", clippingPlanes[1] ); // set shutter const Imath::V2f &shutter = camera->parametersData()->member<V2fData>( "shutter", true )->readable(); AiNodeSetFlt( result, "shutter_start", shutter[0] ); AiNodeSetFlt( result, "shutter_end", shutter[1] ); // set screen window const Imath::Box2f &screenWindow = camera->parametersData()->member<Box2fData>( "screenWindow", true )->readable(); const Imath::V2i &resolution = camera->parametersData()->member<V2iData>( "resolution", true )->readable(); float aspect = (float)resolution.x / (float)resolution.y; AiNodeSetPnt2( result, "screen_window_min", screenWindow.min.x, screenWindow.min.y * aspect ); AiNodeSetPnt2( result, "screen_window_max", screenWindow.max.x, screenWindow.max.y * aspect ); return result; }
//---------------------------------------------------------------------------- void UpdateFrustum() { if (mOverridingCamera){ mOverridingCamera->UpdateFrustum(); } const auto& viewProjMat = mMatrices[ViewProj]; mPlanes[FRUSTUM_PLANE_LEFT].mNormal.x = viewProjMat[3][0] + viewProjMat[0][0]; mPlanes[FRUSTUM_PLANE_LEFT].mNormal.y = viewProjMat[3][1] + viewProjMat[0][1]; mPlanes[FRUSTUM_PLANE_LEFT].mNormal.z = viewProjMat[3][2] + viewProjMat[0][2]; mPlanes[FRUSTUM_PLANE_LEFT].mConstant = -(viewProjMat[3][3] + viewProjMat[0][3]); mPlanes[FRUSTUM_PLANE_RIGHT].mNormal.x = viewProjMat[3][0] - viewProjMat[0][0]; mPlanes[FRUSTUM_PLANE_RIGHT].mNormal.y = viewProjMat[3][1] - viewProjMat[0][1]; mPlanes[FRUSTUM_PLANE_RIGHT].mNormal.z = viewProjMat[3][2] - viewProjMat[0][2]; mPlanes[FRUSTUM_PLANE_RIGHT].mConstant = -(viewProjMat[3][3] - viewProjMat[0][3]); mPlanes[FRUSTUM_PLANE_TOP].mNormal.x = viewProjMat[3][0] - viewProjMat[1][0]; mPlanes[FRUSTUM_PLANE_TOP].mNormal.y = viewProjMat[3][1] - viewProjMat[1][1]; mPlanes[FRUSTUM_PLANE_TOP].mNormal.z = viewProjMat[3][2] - viewProjMat[1][2]; mPlanes[FRUSTUM_PLANE_TOP].mConstant = -(viewProjMat[3][3] - viewProjMat[1][3]); mPlanes[FRUSTUM_PLANE_BOTTOM].mNormal.x = viewProjMat[3][0] + viewProjMat[1][0]; mPlanes[FRUSTUM_PLANE_BOTTOM].mNormal.y = viewProjMat[3][1] + viewProjMat[1][1]; mPlanes[FRUSTUM_PLANE_BOTTOM].mNormal.z = viewProjMat[3][2] + viewProjMat[1][2]; mPlanes[FRUSTUM_PLANE_BOTTOM].mConstant = -(viewProjMat[3][3] + viewProjMat[1][3]); mPlanes[FRUSTUM_PLANE_NEAR].mNormal.x = viewProjMat[3][0] + viewProjMat[2][0]; mPlanes[FRUSTUM_PLANE_NEAR].mNormal.y = viewProjMat[3][1] + viewProjMat[2][1]; mPlanes[FRUSTUM_PLANE_NEAR].mNormal.z = viewProjMat[3][2] + viewProjMat[2][2]; mPlanes[FRUSTUM_PLANE_NEAR].mConstant = -(viewProjMat[3][3] + viewProjMat[2][3]); mPlanes[FRUSTUM_PLANE_FAR].mNormal.x = viewProjMat[3][0] - viewProjMat[2][0]; mPlanes[FRUSTUM_PLANE_FAR].mNormal.y = viewProjMat[3][1] - viewProjMat[2][1]; mPlanes[FRUSTUM_PLANE_FAR].mNormal.z = viewProjMat[3][2] - viewProjMat[2][2]; mPlanes[FRUSTUM_PLANE_FAR].mConstant = -(viewProjMat[3][3] - viewProjMat[2][3]); // Renormalise any normals which were not unit length for (int i = 0; i<6; i++) { Real length = mPlanes[i].mNormal.Normalize(); mPlanes[i].mConstant /= length; } }
AVOID DeferredRenderer::VRenderSky(CameraPtr pCamera, const Mat4x4 & viewproj) { m_gbuffer.BindRenderTarget(3); //m_pSkyShaders->VBind(); m_pAtmoShaders->VBind(); Mat4x4 trans; trans.CreateTranslation(pCamera->GetPosition()); Mat4x4 scale; scale.CreateScaling(1, 1, 1); //Mat4x4 worldViewProj = trans * viewproj; Mat4x4 worldViewProj = scale * viewproj; worldViewProj.Transpose(); m_pcbWVP->UpdateSubresource(0, nullptr, &worldViewProj, 0, 0); m_pcbWVP->Set(1, ST_Vertex); struct WorldData { Mat4x4 wvp; //Vec cameraPos; }; WorldData worldData; worldData.wvp = worldViewProj; //worldData.cameraPos = pCamera->GetPosition(); //m_pcbAtmoWorld->UpdateSubresource(0, nullptr, &worldData, 0, 0); //m_pcbAtmoWorld->Set(1, ST_Vertex); m_pSphereMesh->SetPositionBuffer(); m_pSphereMesh->SetIndexBuffer(); this->NoCullingStandardRasterizer()->Set(); this->m_pDepthDisableStencilDisable->Set(0xFF); //m_pSkySRV->Set(0, ST_Pixel); m_pSphereMesh->VRender(this); UnbindRenderTargetViews(1); UnbindShaderResourceViews(0, 1, ST_Pixel); }
CameraPtr VRMLBodyLoaderImpl::createCamera(VRMLProtoInstance* node) { CameraPtr camera; RangeCamera* range = 0; const SFString& type = get<SFString>(node->fields["type"]); if(type == "DEPTH"){ range = new RangeCamera; range->setOrganized(true); range->setImageType(Camera::NO_IMAGE); } else if(type == "COLOR_DEPTH"){ range = new RangeCamera; range->setOrganized(true); range->setImageType(Camera::COLOR_IMAGE); } else if(type == "POINT_CLOUD"){ range = new RangeCamera; range->setOrganized(false); range->setImageType(Camera::NO_IMAGE); } else if(type == "COLOR_POINT_CLOUD"){ range = new RangeCamera; range->setOrganized(false); range->setImageType(Camera::COLOR_IMAGE); } else { camera = new Camera; } if(range){ camera = range; } readDeviceCommonParameters(*camera, node); bool on = true; if(checkAndReadVRMLfield(node, "on", on)){ camera->on(on); } camera->setResolution(getValue<SFInt32>(node, "width"), getValue<SFInt32>(node, "height")); camera->setFieldOfView(getValue<SFFloat>(node, "fieldOfView")); camera->setNearDistance(getValue<SFFloat>(node, "frontClipDistance")); camera->setFarDistance(getValue<SFFloat>(node, "backClipDistance")); camera->setFrameRate(getValue<SFFloat>(node, "frameRate")); return camera; }
void RenderPipelineLayer::setCamera(CameraPtr camera) noexcept { auto semantic = Material::getMaterialSemantic(); if (camera->getCameraOrder() != CameraOrder::CO_SHADOW) { if (camera->getCameraType() == CameraType::CT_PERSPECTIVE) { float ratio; float aperture; float znear; float zfar; camera->getPerspective(aperture, ratio, znear, zfar); std::size_t width, height; this->getWindowResolution(width, height); float windowRatio = (float)width / (float)height; if (ratio != windowRatio) { ratio = windowRatio; camera->makePerspective(aperture, znear, zfar, ratio); } semantic->setFloatParam(GlobalFloatSemantic::CameraAperture, aperture); semantic->setFloatParam(GlobalFloatSemantic::CameraNear, znear); semantic->setFloatParam(GlobalFloatSemantic::CameraFar, zfar); } else { float left; float right; float top; float bottom; float ratio; float znear; float zfar; camera->getOrtho(left, right, top, bottom, ratio, znear, zfar); std::size_t width, height; this->getWindowResolution(width, height); float windowRatio = (float)width / (float)height; if (ratio != windowRatio) { ratio = windowRatio; camera->makeOrtho(left, right, top, bottom, znear, zfar, ratio); } semantic->setFloatParam(GlobalFloatSemantic::CameraNear, znear); semantic->setFloatParam(GlobalFloatSemantic::CameraFar, zfar); } } semantic->setFloat3Param(GlobalFloat3Semantic::CameraView, camera->getLookAt()); semantic->setFloat3Param(GlobalFloat3Semantic::CameraPosition, camera->getTranslate()); semantic->setFloat3Param(GlobalFloat3Semantic::CameraDirection, camera->getLookAt() - camera->getTranslate()); semantic->setMatrixParam(GlobalMatrixSemantic::matView, camera->getView()); semantic->setMatrixParam(GlobalMatrixSemantic::matViewInverse, camera->getViewInverse()); semantic->setMatrixParam(GlobalMatrixSemantic::matViewInverseTranspose, camera->getViewInverseTranspose()); semantic->setMatrixParam(GlobalMatrixSemantic::matProject, camera->getProject()); semantic->setMatrixParam(GlobalMatrixSemantic::matProjectInverse, camera->getProjectInverse()); semantic->setMatrixParam(GlobalMatrixSemantic::matViewProject, camera->getViewProject()); semantic->setMatrixParam(GlobalMatrixSemantic::matViewProjectInverse, camera->getViewProjectInverse()); _camera = camera; }
void uvctest::testCcd() { debug(LOG_DEBUG, DEBUG_LOG, 0, "testCcd"); CameraPtr camera = locator->getCamera(0); CcdPtr ccd = camera->getCcd(default_ccdid); std::cout << ccd->getInfo() << std::endl; }
void ProjectItem::update(CameraPtr oldCam, CameraPtr newCam) { if(oldCam) oldCam->disconnect(this); if(newCam) connect(newCam.get(), SIGNAL(nameChanged(QString)), SLOT(update())); update(); }