FPoint2 vtSpriteSizer::GetWindowCenter() { FPoint2 center = m_window_rect.Center(); IPoint2 size = vtGetScene()->GetWindowSize(); center.y = size.y - center.y; return center; }
void TerrainPicker::FindGroundPoint() { if (!m_pHeightField) return; FPoint3 pos, dir, result; vtGetScene()->CameraRay(m_pos, pos, dir); // test whether we hit the heightfield m_bOnTerrain = m_pHeightField->CastRayToSurface(pos, dir, result); if (!m_bOnTerrain) return; for (uint i = 0; i < NumTargets(); i++) { vtTransform *pTarget = (vtTransform *) GetTarget(i); pTarget->SetTrans(result); } // save result m_GroundPoint = result; // Find corresponding earth coordinates g_Conv.ConvertToEarth(m_GroundPoint, m_EarthPos); }
/////////////////////////////////////////////////////////////////////// // vtSpriteSizer // An engine to put sprites in the right place. // vtSpriteSizer::vtSpriteSizer(vtImageSprite *pSprite, float l, float t, float r, float b) { m_pSprite = pSprite; m_fRotation = 0.0f; m_rect.SetRect(l, t, r, b); IPoint2 size = vtGetScene()->GetWindowSize(); OnWindowSize(size.x, size.y); }
void LocationDlg::OnPlayToDisk( wxCommandEvent &event ) { if (m_iAnim == -1) { wxMessageBox(_("Please select an animpath to record.")); return; } // Ask for directory to place the images wxDirDialog getDir(NULL, _("Output directory for the images")); bool bResult = (getDir.ShowModal() == wxID_OK); if (!bResult) return; wxString dir = getDir.GetPath(); // Make sure there is a trailing slash if (dir.Length() > 1) { char ch = dir.GetChar(dir.Length()-1); if (ch != '/' && ch != '\\') dir += _T("/"); } // Ask for unit of (animation) time for each frame wxString value; value.Printf(_T("%.1f"), 0.1f); wxString step = wxGetTextFromUser(_("Animation time step for each frame, in seconds:"), _("Animation"), value, this); if (step == _T("")) return; float fStep = atof(step.mb_str(wxConvUTF8)); vtAnimPathEngine *engine = GetEngine(m_iAnim); vtAnimPath *path = engine->GetAnimationPath(); wxString msg; msg.Printf(_("The animation of %.2f seconds will be recorded as %d frames (%.2f/sec)"), path->GetLastTime(), (int) (path->GetLastTime()/fStep), 1.0f/fStep); if (wxMessageBox(msg, _("Animation"), wxOK+wxCANCEL) == wxCANCEL) return; OpenProgressDialog(_("Output"), _T(""), true, this); PlayToDiskEngine *eng = new PlayToDiskEngine; eng->bReady = false; eng->directory = dir.mb_str(wxConvUTF8); eng->step = 0; eng->fStep = fStep; eng->fTotal = path->GetLastTime(); eng->engine = engine; // If we use a regular engine, it is possible (due to timing) that it // will capture the window contents at the wrong time (a blank window). // So, we add it to the list of engines that are called _after_ draw. vtGetScene()->SetPostDrawEngine(eng); }
void App::process_mouse_motion(const SDL_Event &sdle) { // turn SDL mouse move event into a VT mouse event vtMouseEvent event; event.type = VT_MOVE; event.button = VT_NONE; event.flags = process_modifiers(); event.pos.Set(sdle.motion.x, sdle.motion.y); vtGetScene()->OnMouse(event); }
static PyObject *vtpSimple_setCanvasSize(PyObject* self, PyObject* args) { int width, height; if (!PyArg_ParseTuple(args, "ii", &width, &height)) return NULL; //printf("window: %i %i\n", width, height); vtGetScene()->SetWindowSize(width, height); Py_INCREF(Py_None); return Py_None; }
static PyObject *vtpSimple_updateScene(PyObject* self, PyObject* args) { if (!m_bPainting) { m_bPainting = true; vtGetScene()->DoUpdate(); m_iConsecutiveMousemoves = 0; m_bPainting = false; } Py_INCREF(Py_None); return Py_None; }
void GlobePicker::Eval() { FPoint3 pos, dir; vtGetScene()->CameraRay(m_pos, pos, dir); // test whether we hit the globe FSphere sphere(FPoint3(0.0f, 0.0f, 0.0f), (float)m_fRadius); FPoint3 akPoint[2]; int riQuantity; m_bOnTerrain = RaySphereIntersection(pos, dir, sphere, riQuantity, akPoint); if (m_bOnTerrain) { // save result m_GroundPoint = akPoint[0]; // apply global position to target (which is not a child of the globe) vtTransform *pTarget = (vtTransform *) GetTarget(); if (pTarget) { pTarget->Identity(); pTarget->SetTrans(m_GroundPoint); pTarget->PointTowards(m_GroundPoint * 2.0f); pTarget->Scale(m_fTargetScale); } if (m_pGlobe) { // rotate to find position relative to globe's rotation vtTransform *xform = m_pGlobe->GetTop(); FMatrix4 rot; xform->GetTransform1(rot); FMatrix4 inverse; inverse.Invert(rot); FPoint3 newpoint; // work around SML bug: matrices flagged as identity but // will still transform by their components if (! inverse.IsIdentity()) { inverse.Transform(m_GroundPoint, newpoint); m_GroundPoint = newpoint; } } // Find corresponding geographic coordinates xyz_to_geo(m_fRadius, m_GroundPoint, m_EarthPos); } }
void EnviroGUI::SetupScene3() { GetFrame()->Setup3DScene(); #if wxUSE_JOYSTICK || WIN32 if (g_Options.m_bUseJoystick) { m_pJFlyer = new vtJoystickEngine; m_pJFlyer->setName("Joystick"); vtGetScene()->AddEngine(m_pJFlyer); m_pJFlyer->AddTarget(m_pNormalCamera); } #endif }
void CSaveImageOSG::operator () (osg::RenderInfo& renderInfo) const { osgViewer::Viewer *pViewer = vtGetScene()->getViewer(); // Save the file osgDB::writeImageFile(*m_pImage, m_FileName); // Remove the camera from the viewer pViewer->removeSlave(pViewer->findSlaveIndexForCamera(m_pCamera)); // Take the camera out of the rendering context m_pCamera->setGraphicsContext(NULL); // Delay deleting the camera until the next frame pViewer->getCamera()->setPreDrawCallback(new MyDeleteCallback(m_pCamera.get())); }
void EnviroFrame::DeleteCanvas() { // Tell our graphics context that there is no canvas. GraphicsWindowWX *pGW = (GraphicsWindowWX*) vtGetScene()->GetGraphicsWindow(); if (pGW) { pGW->CloseOsgContext(); pGW->SetCanvas(NULL); } if (m_canvas) { m_canvas->m_bRunning = false; delete m_canvas; m_canvas = NULL; } }
void EnviroFrame::UpdateLODInfo() { if (!m_pLODDlg) return; vtTerrain *terr = g_App.GetCurrentTerrain(); if (!terr) return; vtTiledGeom *geom = terr->GetTiledGeom(); if (geom) { float fmin = log(TILEDGEOM_RESOLUTION_MIN); float fmax = log(TILEDGEOM_RESOLUTION_MAX); float scale = 300 / (fmax -fmin); float log0 = log(geom->m_fLResolution); float log1 = log(geom->m_fResolution); float log2 = log(geom->m_fHResolution); m_pLODDlg->Refresh((log0-fmin) * scale, (log1-fmin) * scale, (log2-fmin) * scale, geom->m_iVertexTarget, geom->m_iVertexCount, geom->GetPagingRange()); m_pLODDlg->DrawTilesetState(geom, vtGetScene()->GetCamera()); } vtDynTerrainGeom *dyn = terr->GetDynTerrain(); if (dyn) { SRTerrain *sr = dynamic_cast<SRTerrain*>(dyn); if (sr) { m_pLODDlg->Refresh(log(sr->m_fLResolution)*17, log(sr->m_fResolution)*17, log(sr->m_fHResolution)*17, sr->GetPolygonTarget(), sr->NumDrawnTriangles(), -1); } SMTerrain *sm = dynamic_cast<SMTerrain*>(dyn); if (sm) { m_pLODDlg->Refresh(-1, log((sm->GetQualityConstant()-0.002f)*10000)*40, -1, sm->GetPolygonTarget(), sm->NumDrawnTriangles(), -1); } } vtPagedStructureLodGrid *pPSLG = terr->GetStructureLodGrid(); if (pPSLG) m_pLODDlg->DrawStructureState(pPSLG, terr->GetStructurePageOutDistance()); }
void App::process_mouse_button(const SDL_Event &sdle) { // turn SDL mouse button event into a VT mouse event vtMouseEvent event; event.type = (sdle.button.type == SDL_MOUSEBUTTONDOWN) ? VT_DOWN : VT_UP; if (sdle.button.button == 1) event.button = VT_LEFT; else if (sdle.button.button == 2) event.button = VT_MIDDLE; else if (sdle.button.button == 3) event.button = VT_RIGHT; event.flags = process_modifiers(); event.pos.Set(sdle.button.x, sdle.button.y); vtGetScene()->OnMouse(event); }
static PyObject *vtpSimple_mouseButtonEvent(PyObject* self, PyObject* args, PyObject* kw) { // 1 = left, 2 = right, 3 = middle int buttonNums[] = {VT_LEFT, VT_RIGHT, VT_MIDDLE}; int button; long xpos, ypos; bool down; vtMouseEvent event; bool alt = false, shift = false, control = false; char* keywords[] = {"x", "y", "button", "down", "alt","shift","control", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kw, "llib|bbb", keywords, &xpos, &ypos, &button, &down, &alt, &shift, &control)) return NULL; button--; if (button < 0 || button >= 3) { Py_INCREF(Py_None); return Py_None; } event.button = buttonNums[button]; if (down) { event.type = VT_DOWN; } else { event.type = VT_UP; } event.pos.Set(xpos, ypos); event.flags = 0; if (alt) event.flags |= VT_CONTROL; if (shift) event.flags |= VT_SHIFT; if (control) event.flags |= VT_CONTROL; vtGetScene()->OnMouse(event); Py_INCREF(Py_None); return Py_None; }
static PyObject *vtpSimple_keyEvent(PyObject* self, PyObject* args, PyObject* kw) { int key; long flag = 0; bool alt = false, shift = false, control = false; static char* keywords[] = {"code", "alt","shift","control", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kw, "i|bbb", keywords, &key, &alt, &shift, &control)) return NULL; if (alt) flag |= VT_CONTROL; if (shift) flag |= VT_SHIFT; if (control) flag |= VT_CONTROL; // pass the char to the vtlib Scene vtGetScene()->OnKey(key, flag); Py_INCREF(Py_None); return Py_None; }
void LayerDlg::OnZoomTo( wxCommandEvent &event ) { VTLOG1("LayerDlg::OnZoomTo\n"); osg::Node *pThing = GetNodeFromItem(m_item, true); // get container if (pThing) { FSphere sphere = GetGlobalBoundSphere(pThing); // get global bounds vtCamera *pCam = vtGetScene()->GetCamera(); // Put the camera a bit back from the sphere; sufficiently so that // the whole volume of the bounding sphere is visible. float smallest = min(pCam->GetFOV(), pCam->GetVertFOV()); float alpha = smallest / 2.0f; float distance = sphere.radius / tanf(alpha); pCam->Identity(); pCam->Rotate(FPoint3(1,0,0), -PID2f/2); // tilt down a little pCam->Translate(sphere.center); pCam->TranslateLocal(FPoint3(0.0f, 0.0f, distance)); } }
void App::display() { static int frame = 0; if (frame < 10) { frame++; VTLOG("Frame %d: ", frame); const GLubyte *ver = glGetString(GL_VERSION); if (ver != NULL) VTLOG1("Has context\n"); else { VTLOG1("No context\n"); return; } } vtGetScene()->DoUpdate(); // calls viewer->frame, etc. SDL_GL_SwapBuffers(); }
void Eval() { vtScene *scene = vtGetScene(); if (step > 0) { wxString msg; msg.Printf(_T("Output %.2f/%.2f"), fStep * step, fTotal); if (UpdateProgressDialog((int) (99 * fStep * step / fTotal), msg) == true) { // user pressed cancel scene->SetPostDrawEngine(NULL); CloseProgressDialog(); return; } // We can't grab the screen directly, we must use an OSG callback // to capture after the next draw. vtString fname; fname.Format("image_%04d.png", step-1); std::string Filename = (const char *)(directory+fname); CScreenCaptureHandler::SetupScreenCapture(Filename); } // Show the next frame time engine->SetTime(fStep * step); engine->UpdateTargets(); // Advance to next frame step++; if (fStep * step > fTotal) { // We're finished scene->SetPostDrawEngine(NULL); CloseProgressDialog(); return; } }
/* The works. */ int App::main(int argc, char **argv) { // Log messages to make troubleshooting easier VTSTARTLOG("debug.txt"); VTLOG("sdlSimple\n"); #ifdef __FreeBSD__ /* FreeBSD is more stringent with FP ops by default, and OSG is */ /* doing silly things sqrt(Inf) (computing lengths of MAXFLOAT */ /* and NaN Vec3's). This turns off FP bug core dumps, ignoring */ /* the error like most platforms do by default. */ fpsetmask(0); #endif // init SDL VTLOG("Initializing SDL..\n"); if ( SDL_Init(SDL_INIT_VIDEO) < 0 ) { VTLOG("Unable to init SDL: %s\n", SDL_GetError()); exit(1); } atexit(SDL_Quit); // Starting with SDL 1.2.10, passing in 0 will use the system's current resolution. unsigned int windowWidth = 0; unsigned int windowHeight = 0; // Passing in 0 for bitdepth also uses the system's current bitdepth. This works before 1.2.10 too. unsigned int bitDepth = 0; //videosettings(true, true); SDL_GL_SetAttribute( SDL_GL_RED_SIZE, 5 ); SDL_GL_SetAttribute( SDL_GL_GREEN_SIZE, 5 ); SDL_GL_SetAttribute( SDL_GL_BLUE_SIZE, 5 ); SDL_GL_SetAttribute( SDL_GL_DEPTH_SIZE, 16 ); SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 ); // set up the surface to render to SDL_Surface* screen = SDL_SetVideoMode(windowWidth, windowHeight, bitDepth, SDL_OPENGL | SDL_FULLSCREEN | SDL_RESIZABLE); if ( screen == NULL ) { std::cerr<<"Unable to set "<<windowWidth<<"x"<<windowHeight<<" video: %s\n"<< SDL_GetError()<<std::endl; exit(1); } SDL_EnableUNICODE(1); // If we used 0 to set the fields, query the values so we can pass it to osgViewer windowWidth = screen->w; windowHeight = screen->h; VTLOG("Initializing vtlib/OSG..\n"); vtGetScene()->Init(argc, argv); vtGetScene()->getViewer()->setThreadingModel(osgViewer::Viewer::SingleThreaded); vtGetScene()->getViewer()->setUpViewerAsEmbeddedInWindow(0,0,windowWidth,windowHeight); // vtGetScene()->SetGraphicsContext(new osgViewer::GraphicsWindowEmbedded(0, 0, width, height)); // Tell window size to vtlib vtGetScene()->SetWindowSize(windowWidth, windowHeight); VTLOG("Creating the terrain..\n"); if (!CreateScene()) return 0; VTLOG("Running..\n"); run(); VTLOG("Cleaning up..\n"); vtGetScene()->SetRoot(NULL); if (m_ts) m_ts->CleanupScene(); delete m_ts; vtGetScene()->Shutdown(); return 0; }
bool CSaveImageOSG::SaveImage(std::string& FilePath, int Width, int Height) { osg::FBOExtensions* fbo_ext = osg::FBOExtensions::instance(vtGetScene()->GetGraphicsContext()->getState()->getContextID(), true); if (!(fbo_ext && fbo_ext->isSupported())) return false; GLint MaxRenderBufferSize = 3000; vtTemporaryGraphicsContext TempContext; glGetIntegerv(MAX_RENDERBUFFER_SIZE_EXT, &MaxRenderBufferSize); osgViewer::Viewer *pViewer = vtGetScene()->getViewer(); osg::ref_ptr<osg::Camera> pCamera = new osg::Camera; if (!pCamera.valid()) return false; ImagePtr pImage = new osg::Image; if (!pImage.valid()) { pCamera = NULL; return false; } // Clone any global state // I am only going to mess about with viewport so SHALLOW_COPY should be OK. pCamera->setStateSet((osg::StateSet*)pViewer->getCamera()->getOrCreateStateSet()->clone(osg::CopyOp::SHALLOW_COPY)); float AspectRatio = (float)Width / (float)Height; if (AspectRatio > 1.0) { if (Width > MaxRenderBufferSize) { Width = MaxRenderBufferSize; Height = MaxRenderBufferSize / AspectRatio; } } else { if (Height > MaxRenderBufferSize) { Height = MaxRenderBufferSize; Width = MaxRenderBufferSize * AspectRatio; } } VTLOG("CSaveImageOSG::SaveImage - Width %d Height %d Total %d\n", Width, Height, Width* Height); pImage->allocateImage(Width, Height, 1, GL_RGB, GL_UNSIGNED_BYTE); pCamera->setClearColor(pViewer->getCamera()->getClearColor()); pCamera->setViewport(0, 0, Width, Height); pCamera->setRenderOrder(osg::Camera::POST_RENDER); // Have to set this to PRE_RENDER to stop // annoying white semicircle flashing up pCamera->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT, osg::Camera::FRAME_BUFFER); pCamera->setComputeNearFarMode(osg::Camera::DO_NOT_COMPUTE_NEAR_FAR); pCamera->attach(osg::Camera::COLOR_BUFFER, pImage.get()); pCamera->setGraphicsContext(pViewer->getCamera()->getGraphicsContext()); // Set draw callback - we cannot assume the camera will be rendered on this thread. pCamera->setFinalDrawCallback(new CSaveImageOSG(FilePath, pImage.get(), pCamera.get())); // Ensure that all threads have finished rendering. We probably do not need to do this // but it seems cleaner to me. osg::Object::DataVariance dv = pViewer->getSceneData()->getDataVariance(); pViewer->getSceneData()->setDataVariance(osg::Object::DYNAMIC); pViewer->frame(); // Restore the data variance ready for next frame pViewer->getSceneData()->setDataVariance(dv); pViewer->addSlave(pCamera.get()); // The call to addSlave creates a default renderer with _compileOnNextDraw set to true // Because I am using the master cameras scene graph and graphics context I can assume that the display lists have // already been compiled. This is another way of avoiding the annoying white flash which is caused by the display // lists being recompiled into the main frame buffer with the projection and view matrices set to identity, rather than // into this cameras frame buffer object with the projection and view matrices set to something reasonable. ((osgViewer::Renderer*)pCamera->getRenderer())->setCompileOnNextDraw(false); return true; }
// Methods static PyObject* vtpSimple_init(PyObject* self, PyObject* args) { // Would be usefule to include a parameter for the type of flyer const char *sourcefile, *datadir, *debugfile; if (!PyArg_ParseTuple(args, "sss", &sourcefile, &datadir, &debugfile)) return NULL; m_pTerrainScene = NULL; vtGetScene()->Init(); VTSTARTLOG(debugfile); // Get a handle to the vtScene - one is already created for you vtScene *pScene = vtGetScene(); // Look up the camera vtCamera *pCamera = pScene->GetCamera(); pCamera->SetHither(10); pCamera->SetYon(100000); // The terrain scene will contain all the terrains that are created. m_pTerrainScene = new vtTerrainScene; // Set the global data path vtStringArray paths; paths.push_back(vtString(datadir)); pScene->SetDataPath(paths); // Begin creating the scene, including the sun and sky vtGroup *pTopGroup = m_pTerrainScene->BeginTerrainScene(); // Tell the scene graph to point to this terrain scene pScene->SetRoot(pTopGroup); // Create a new vtTerrain, read its paramters from a file vtTerrain *pTerr = new vtTerrain; pTerr->SetParamFile(sourcefile); pTerr->LoadParams(); // Add the terrain to the scene, and contruct it m_pTerrainScene->AppendTerrain(pTerr); if (!m_pTerrainScene->BuildTerrain(pTerr)) { printf("Couldn't create the terrain. Perhaps the elevation data file isn't in the expected location?\n"); Py_INCREF(Py_False); return Py_False; } m_pTerrainScene->SetCurrentTerrain(pTerr); // Create a navigation engine to move around on the terrain // Get flight speed from terrain parameters float fSpeed = pTerr->GetParams().GetValueFloat(STR_NAVSPEED); vtTerrainFlyer *pFlyer = new vtTerrainFlyer(fSpeed); pFlyer->SetTarget(pCamera); pFlyer->SetHeightField(pTerr->GetHeightField()); pScene->AddEngine(pFlyer); // Minimum height over terrain is 100 m vtHeightConstrain *pConstrain = new vtHeightConstrain(100); pConstrain->SetTarget(pCamera); pConstrain->SetHeightField(pTerr->GetHeightField()); pScene->AddEngine(pConstrain); for (int i = 0; i < 512; i++) m_pbKeyState[i] = false; vtGetScene()->SetKeyStates(m_pbKeyState); printf("Done creating scene.\n"); Py_INCREF(Py_True); return Py_True; }
float CVisualImpactCalculatorOSG::Calculate() { m_ViewMatrix = dynamic_cast<osgViewer::Renderer*>(vtGetScene()->getViewer()->getCamera()->getRenderer())->getSceneView(0)->getViewMatrix(); return Implementation(true); }
float CVisualImpactCalculatorOSG::Implementation(bool bOneOffMode, GDALRasterBand *pRasterBand, float fScaleFactor, double dXSampleInterval, double dYSampleInterval, bool progress_callback(int)) { if (!m_bInitialised) return 0.0; osgViewer::Viewer *pViewer = vtGetScene()->getViewer(); osgViewer::Viewer::Cameras ActiveCameras; std::vector<CameraMask> NodeMasks; pViewer->getCameras(ActiveCameras, true); // Stop any other cameras rendering the scene for (osgViewer::Viewer::Cameras::iterator itr = ActiveCameras.begin(); itr != ActiveCameras.end(); ++itr) { if (*itr != m_pVisualImpactCamera.get()) { NodeMasks.push_back(CameraMask(*itr, (*itr)->getNodeMask())); (*itr)->setNodeMask(0); } } // Set up the render bins for (VisualImpactContributors::iterator itr = m_VisualImpactContributors.begin(); itr != m_VisualImpactContributors.end(); itr++) (*itr)->getOrCreateStateSet()->setRenderBinDetails(VISUAL_IMPACT_BIN_NUMBER, VISUAL_IMPACT_BIN_NAME); // Pick up the current main scene camera state osg::StateSet* pStateSet = new osg::StateSet(*ActiveCameras[0]->getOrCreateStateSet(), osg::CopyOp::DEEP_COPY_ALL); pStateSet->setAttribute(m_pVisualImpactCamera->getViewport()); m_pVisualImpactCamera->setStateSet(pStateSet); m_pVisualImpactCamera->setClearColor(ActiveCameras[0]->getClearColor()); // Enable the visual impact camera m_pVisualImpactCamera->setNodeMask(0xffffffff); if (bOneOffMode) { m_pVisualImpactCamera->setViewMatrix(m_ViewMatrix); pViewer->frame(); // Disable the visual impact camera m_pVisualImpactCamera->setNodeMask(0); for(std::vector<CameraMask>::iterator itr = NodeMasks.begin(); itr != NodeMasks.end(); ++itr) itr->m_pCamera->setNodeMask(itr->m_NodeMask); for (VisualImpactContributors::iterator itr = m_VisualImpactContributors.begin(); itr != m_VisualImpactContributors.end(); itr++) { osg::StateSet *pStateSet = (*itr)->getOrCreateStateSet(); pStateSet->setRenderBinMode(osg::StateSet::INHERIT_RENDERBIN_DETAILS); pStateSet->setRenderingHint(osg::StateSet::DEFAULT_BIN); } return InnerImplementation(); } else { DPoint2 CameraOrigin; DPoint2 CurrentCamera; vtHeightField3d *pHeightField = vtGetTS()->GetCurrentTerrain()->GetHeightField(); DRECT EarthExtents = pHeightField->GetEarthExtents(); CameraOrigin.x = EarthExtents.left; CameraOrigin.y = EarthExtents.bottom; int iCurrentY = 0; int iXsize = (int)((EarthExtents.right - EarthExtents.left)/dXSampleInterval); int iYsize = (int)((EarthExtents.top - EarthExtents.bottom)/dYSampleInterval); int iTotalProgress = iXsize * iYsize; #ifdef _DEBUG int iBlockSizeX, iBlockSizeY; pRasterBand->GetBlockSize(&iBlockSizeX, &iBlockSizeY); #endif for (CurrentCamera.y = CameraOrigin.y; CurrentCamera.y < EarthExtents.top; CurrentCamera.y += dYSampleInterval) { // Process horizontal scanline int iCurrentX = 0; for (CurrentCamera.x = CameraOrigin.x; CurrentCamera.x < EarthExtents.right; CurrentCamera.x += dXSampleInterval) { FPoint3 CameraTranslate; pHeightField->ConvertEarthToSurfacePoint(CurrentCamera, CameraTranslate); m_pVisualImpactCamera->setViewMatrixAsLookAt(v2s(CameraTranslate), v2s(m_Target), osg::Vec3(0.0, 1.0, 0.0)); pViewer->frame(); float fFactor = InnerImplementation(); pRasterBand->RasterIO(GF_Write, iCurrentX, iYsize - iCurrentY - 1, 1, 1, &fFactor, 1, 1, GDT_Float32, 0, 0); iCurrentX++; if ((*progress_callback)(100 * (iCurrentY * iXsize + iCurrentX) / iTotalProgress)) { // Disable the visual impact camera m_pVisualImpactCamera->setNodeMask(0); for(std::vector<CameraMask>::iterator itr = NodeMasks.begin(); itr != NodeMasks.end(); ++itr) itr->m_pCamera->setNodeMask(itr->m_NodeMask); for (VisualImpactContributors::iterator itr = m_VisualImpactContributors.begin(); itr != m_VisualImpactContributors.end(); itr++) { osg::StateSet *pStateSet = (*itr)->getOrCreateStateSet(); pStateSet->setRenderBinMode(osg::StateSet::INHERIT_RENDERBIN_DETAILS); pStateSet->setRenderingHint(osg::StateSet::DEFAULT_BIN); } VTLOG("CVisualImpactCalculatorOSG::Implementation - Cancelled by user\n"); return -1.0f; } } iCurrentY++; } // Disable the visual impact camera m_pVisualImpactCamera->setNodeMask(0); for(std::vector<CameraMask>::iterator itr = NodeMasks.begin(); itr != NodeMasks.end(); ++itr) itr->m_pCamera->setNodeMask(itr->m_NodeMask); for (VisualImpactContributors::iterator itr = m_VisualImpactContributors.begin(); itr != m_VisualImpactContributors.end(); itr++) { osg::StateSet *pStateSet = (*itr)->getOrCreateStateSet(); pStateSet->setRenderBinMode(osg::StateSet::INHERIT_RENDERBIN_DETAILS); pStateSet->setRenderingHint(osg::StateSet::DEFAULT_BIN); } return 0.0f; } }
bool CVisualImpactCalculatorOSG::Initialise() { osgViewer::Viewer *pViewer = vtGetScene()->getViewer(); osgUtil::RenderBin::addRenderBinPrototype(VISUAL_IMPACT_BIN_NAME, new VisualImpactBin(this)); m_ProjectionMatrix.makePerspective(DEFAULT_HUMAN_FOV_DEGREES, DEFAULT_HUMAN_FOV_ASPECT_RATIO, 10.0, 40000.0); osg::FBOExtensions* fbo_ext = osg::FBOExtensions::instance(DEFAULT_GRAPHICS_CONTEXT, true); // if ((fbo_ext && fbo_ext->isSupported()) || osg::isGLExtensionSupported(DEFAULT_GRAPHICS_CONTEXT, "ARB_render_texture")) if (fbo_ext && fbo_ext->isSupported()) m_bUsingLiveFrameBuffer = false; else m_bUsingLiveFrameBuffer = true; m_pIntermediateImage = new osg::Image; if (!m_pIntermediateImage.valid()) { VTLOG("CVisualImpactCalculatorOSG::Implementation - Cannot create intermediate image\n"); return false; } m_pIntermediateImage->allocateImage(DEFAULT_VISUAL_IMPACT_RESOLUTION, DEFAULT_VISUAL_IMPACT_RESOLUTION, 1, GL_DEPTH_COMPONENT, GL_FLOAT); if (!m_pIntermediateImage->valid()) { VTLOG("CVisualImpactCalculatorOSG::Implementation - Cannot allocate intermediate image\n"); return false; } // Even though the camera node knows that you have attached the image to the depth buffer // it does not set this up correctly for you. There is no way to set the dataType, so // preallocation of the data is easiest m_pFinalImage= new osg::Image; if (!m_pFinalImage.valid()) { VTLOG("CVisualImpactCalculatorOSG::Implementation - Cannot create final image\n"); return false; } m_pFinalImage->allocateImage(DEFAULT_VISUAL_IMPACT_RESOLUTION, DEFAULT_VISUAL_IMPACT_RESOLUTION, 1, GL_DEPTH_COMPONENT, GL_FLOAT); if (!m_pFinalImage->valid()) { VTLOG("CVisualImpactCalculatorOSG::Implementation - Cannot allocate final image\n"); return false; } m_pVisualImpactCamera = new osg::Camera; if (!m_pVisualImpactCamera.valid()) { VTLOG("CVisualImpactCalculatorOSG::Implementation - Cannot create visual impact camera\n"); return false; } m_pVisualImpactCamera->setName("Visual impact calculator camera"); m_pVisualImpactCamera->setViewport(0, 0, DEFAULT_VISUAL_IMPACT_RESOLUTION, DEFAULT_VISUAL_IMPACT_RESOLUTION); m_pVisualImpactCamera->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT, osg::Camera::FRAME_BUFFER); m_pVisualImpactCamera->setRenderOrder(osg::Camera::PRE_RENDER); m_pVisualImpactCamera->attach(osg::Camera::DEPTH_BUFFER, GL_DEPTH_COMPONENT); // Force a renderBuffer // m_pVisualImpactCamera->attach(osg::Camera::COLOR_BUFFER, GL_UNSIGNED_BYTE); // Force a renderBuffer m_pVisualImpactCamera->setComputeNearFarMode(osg::Camera::DO_NOT_COMPUTE_NEAR_FAR); m_pVisualImpactCamera->setReferenceFrame(osg::Camera::ABSOLUTE_RF); m_pVisualImpactCamera->setProjectionMatrix(m_ProjectionMatrix); m_pVisualImpactCamera->setGraphicsContext(pViewer->getCamera()->getGraphicsContext()); m_pVisualImpactCamera->setNodeMask(0); // Initially disabled pViewer->addSlave(m_pVisualImpactCamera.get()); m_bInitialised = true; return true; }