void AtomicCounterBufferBinding::readData(osg::State & state, osg::UIntArray & uintArray) const { if (!_bufferObject) return; GLBufferObject* bo = _bufferObject->getOrCreateGLBufferObject( state.getContextID() ); if (!bo) return; GLint previousID = 0; glGetIntegerv(GL_ATOMIC_COUNTER_BUFFER_BINDING, &previousID); if (static_cast<GLuint>(previousID) != bo->getGLObjectID()) bo->_extensions->glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, bo->getGLObjectID()); GLubyte* src = (GLubyte*)bo->_extensions->glMapBuffer(GL_ATOMIC_COUNTER_BUFFER, GL_READ_ONLY_ARB); if(src) { size_t size = osg::minimum<int>(_size, uintArray.getTotalDataSize()); memcpy((void*) &(uintArray.front()), src+_offset, size); bo->_extensions->glUnmapBuffer(GL_ATOMIC_COUNTER_BUFFER); } if (static_cast<GLuint>(previousID) != bo->getGLObjectID()) bo->_extensions->glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, static_cast<GLuint>(previousID)); }
void Validator::apply(osg::State& state) const { if (!_effect) return; if (_effect->_tech_selected[state.getContextID()] == 0) { Effect::Technique_list::iterator i; int j = 0; for (i=_effect->_techs.begin(); i!=_effect->_techs.end(); ++i, ++j) { if ((*i)->validate(state)) { _effect->_sel_tech[state.getContextID()] = j; _effect->_tech_selected[state.getContextID()] = 1; return; } } OSG_WARN << "Warning: osgFX::Validator: could not find any techniques compatible with the current OpenGL context" << std::endl; } }
bool Technique::validate(osg::State& state) const { typedef std::vector<std::string> String_list; String_list extensions; getRequiredExtensions(extensions); for (String_list::const_iterator i=extensions.begin(); i!=extensions.end(); ++i) { if (!osg::isGLExtensionSupported(state.getContextID(),i->c_str())) return false; } return true; }
std::string DepthPeelBin::createFileName( osg::State& state, int pass, bool depth ) { unsigned int contextID = state.getContextID(); int frameNumber = state.getFrameStamp()->getFrameNumber(); std::ostringstream ostr; ostr << std::setfill( '0' ); ostr << "f" << std::setw( 6 ) << frameNumber << "_c" << std::setw( 2 ) << contextID << "_"; ostr << "peel_part" << _partitionNumber; if( pass == -1 ) ostr << "_a"; else ostr << "_b" << std::setw( 2 ) << pass; if( depth ) ostr << "_z"; ostr << ".png"; return( ostr.str() ); }
void DepthPeelBin::PerContextInfo::cleanup( const osg::State& state ) { TRACEDUMP(" PerContextInfo::cleanup"); glDeleteTextures( 3, _depthTex ); _depthTex[ 0 ] = _depthTex[ 1 ] = _depthTex[ 2 ] = 0; glDeleteTextures( 1, &_colorTex ); _colorTex = 0; osg::FBOExtensions* fboExt( osg::FBOExtensions::instance( state.getContextID(), true ) ); osgwTools::glBindFramebuffer( fboExt, GL_FRAMEBUFFER_EXT, 0 ); osgwTools::glDeleteFramebuffers( fboExt, 1, &_fbo ); _fbo = 0; _glDeleteQueries( 1, &_queryID ); _queryID = 0; _init = false; }
void Font::Glyph::draw(osg::State& state) const { GLuint& globj = _globjList[state.getContextID()]; // call the globj if already set otherwise compile and execute. if( globj != 0 ) { glCallList( globj ); } else { globj = glGenLists( 1 ); glNewList( globj, GL_COMPILE_AND_EXECUTE ); glPixelStorei(GL_UNPACK_ALIGNMENT,getPacking()); glDrawPixels(s(), t(), (GLenum)getPixelFormat(), (GLenum)getDataType(), data() ); glEndList(); } }
void GlyphTexture::apply(osg::State& state) const { // get the contextID (user defined ID of 0 upwards) for the // current OpenGL context. const unsigned int contextID = state.getContextID(); if (contextID>=_glyphsToSubload.size()) { OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex); // graphics context is beyond the number of glyphsToSubloads, so // we must now copy the glyph list across, this is a potential // threading issue though is multiple applies are happening the // same time on this object - to avoid this condition number of // graphics contexts should be set before create text. for(unsigned int i=_glyphsToSubload.size();i<=contextID;++i) { GlyphPtrList& glyphPtrs = _glyphsToSubload[i]; for(GlyphRefList::const_iterator itr=_glyphs.begin(); itr!=_glyphs.end(); ++itr) { glyphPtrs.push_back(itr->get()); } } } const Extensions* extensions = getExtensions(contextID,true); bool generateMipMapSupported = extensions->isGenerateMipMapSupported(); // get the texture object for the current contextID. TextureObject* textureObject = getTextureObject(contextID); bool newTextureObject = (textureObject == 0); if (newTextureObject) { GLint maxTextureSize = 256; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &maxTextureSize); if (maxTextureSize < getTextureWidth() || maxTextureSize < getTextureHeight()) { OSG_WARN<<"Warning: osgText::Font texture size of ("<<getTextureWidth()<<", "<<getTextureHeight()<<") too large, unable to create font texture."<<std::endl; OSG_WARN<<" Maximum supported by hardward by native OpenGL implementation is ("<<maxTextureSize<<","<<maxTextureSize<<")."<<std::endl; OSG_WARN<<" Please set OSG_MAX_TEXTURE_SIZE lenvironment variable to "<<maxTextureSize<<" and re-run application."<<std::endl; return; } // being bound for the first time, need to allocate the texture _textureObjectBuffer[contextID] = textureObject = osg::Texture::generateTextureObject( this, contextID,GL_TEXTURE_2D,1,GL_ALPHA,getTextureWidth(), getTextureHeight(),1,0); textureObject->bind(); applyTexParameters(GL_TEXTURE_2D,state); // need to look at generate mip map extension if mip mapping required. switch(_min_filter) { case NEAREST_MIPMAP_NEAREST: case NEAREST_MIPMAP_LINEAR: case LINEAR_MIPMAP_NEAREST: case LINEAR_MIPMAP_LINEAR: if (generateMipMapSupported) { glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS,GL_TRUE); } else glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, LINEAR); break; default: // not mip mapping so no problems. break; } unsigned int imageDataSize = getTextureHeight()*getTextureWidth(); unsigned char* imageData = new unsigned char[imageDataSize]; for(unsigned int i=0; i<imageDataSize; ++i) { imageData[i] = 0; } // allocate the texture memory. glTexImage2D( GL_TEXTURE_2D, 0, GL_ALPHA, getTextureWidth(), getTextureHeight(), 0, GL_ALPHA, GL_UNSIGNED_BYTE, imageData ); delete [] imageData; } else { // reuse texture by binding. textureObject->bind(); if (getTextureParameterDirty(contextID)) { applyTexParameters(GL_TEXTURE_2D,state); } } static const GLubyte* s_renderer = 0; static bool s_subloadAllGlyphsTogether = false; if (!s_renderer) { OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex); s_renderer = glGetString(GL_RENDERER); OSG_INFO<<"glGetString(GL_RENDERER)=="<<s_renderer<<std::endl; if (s_renderer && strstr((const char*)s_renderer,"IMPACT")!=0) { // we're running on an Octane, so need to work around its // subloading bugs by loading all at once. s_subloadAllGlyphsTogether = true; } if (s_renderer && ((strstr((const char*)s_renderer,"Radeon")!=0) || (strstr((const char*)s_renderer,"RADEON")!=0) || (strstr((const char*)s_renderer,"ALL-IN-WONDER")!=0))) { // we're running on an ATI, so need to work around its // subloading bugs by loading all at once. s_subloadAllGlyphsTogether = true; } if (s_renderer && strstr((const char*)s_renderer,"Sun")!=0) { // we're running on an solaris x server, so need to work around its // subloading bugs by loading all at once. s_subloadAllGlyphsTogether = true; } const char* str = getenv("OSG_TEXT_INCREMENTAL_SUBLOADING"); if (str) { s_subloadAllGlyphsTogether = strcmp(str,"OFF")==0 || strcmp(str,"Off")==0 || strcmp(str,"off")==0; } } // now subload the glyphs that are outstanding for this graphics context. GlyphPtrList& glyphsWereSubloading = _glyphsToSubload[contextID]; if (!glyphsWereSubloading.empty() || newTextureObject) { OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex); if (!s_subloadAllGlyphsTogether) { if (newTextureObject) { for(GlyphRefList::const_iterator itr=_glyphs.begin(); itr!=_glyphs.end(); ++itr) { (*itr)->subload(); } } else // just subload the new entries. { // default way of subloading as required. //std::cout<<"subloading"<<std::endl; for(GlyphPtrList::iterator itr=glyphsWereSubloading.begin(); itr!=glyphsWereSubloading.end(); ++itr) { (*itr)->subload(); } } // clear the list since we have now subloaded them. glyphsWereSubloading.clear(); } else { OSG_INFO<<"osgText::Font loading all glyphs as a single subload."<<std::endl; // Octane has bugs in OGL driver which mean that subloads smaller // than 32x32 produce errors, and also cannot handle general alignment, // so to get round this copy all glyphs into a temporary image and // then subload the whole lot in one go. int tsize = getTextureHeight() * getTextureWidth(); unsigned char *local_data = new unsigned char[tsize]; memset( local_data, 0L, tsize); for(GlyphRefList::const_iterator itr=_glyphs.begin(); itr!=_glyphs.end(); ++itr) { //(*itr)->subload(); // Rather than subloading to graphics, we'll write the values // of the glyphs into some intermediate data and subload the // whole thing at the end for( int t = 0; t < (*itr)->t(); t++ ) { for( int s = 0; s < (*itr)->s(); s++ ) { int sindex = (t*(*itr)->s()+s); int dindex = ((((*itr)->getTexturePositionY()+t) * getTextureWidth()) + ((*itr)->getTexturePositionX()+s)); const unsigned char *sptr = &(*itr)->data()[sindex]; unsigned char *dptr = &local_data[dindex]; (*dptr) = (*sptr); } } } // clear the list since we have now subloaded them. glyphsWereSubloading.clear(); // Subload the image once glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, getTextureWidth(), getTextureHeight(), GL_ALPHA, GL_UNSIGNED_BYTE, local_data ); delete [] local_data; } } else { // OSG_INFO << "no need to subload "<<std::endl; } // if (generateMipMapTurnedOn) // { // glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS,GL_FALSE); // } }
void VertexArrayState::setArray(ArrayDispatch* vad, osg::State& state, const osg::Array* new_array) { if (new_array) { if (!vad->active) { vad->active = true; _activeDispatchers.push_back(vad); } if (vad->array==0) { GLBufferObject* vbo = isVertexBufferObjectSupported() ? new_array->getOrCreateGLBufferObject(state.getContextID()) : 0; if (vbo) { bindVertexBufferObject(vbo); vad->enable_and_dispatch(state, new_array, vbo); } else { unbindVertexBufferObject(); vad->enable_and_dispatch(state, new_array); } } else if (new_array!=vad->array || new_array->getModifiedCount()!=vad->modifiedCount) { GLBufferObject* vbo = isVertexBufferObjectSupported() ? new_array->getOrCreateGLBufferObject(state.getContextID()) : 0; if (vbo) { bindVertexBufferObject(vbo); vad->dispatch(state, new_array, vbo); } else { unbindVertexBufferObject(); vad->dispatch(state, new_array); } } vad->array = new_array; vad->modifiedCount = new_array->getModifiedCount(); } else if (vad->array) { disable(vad, state); } }
void Shader::compileShader( osg::State& state ) const { PerContextShader* pcs = getPCS( state.getContextID() ); if( pcs ) pcs->compileShader( state ); }
void SparseTexture2DArray::apply( osg::State& state ) const { // get the contextID (user defined ID of 0 upwards) for the // current OpenGL context. const unsigned int contextID = state.getContextID(); Texture::TextureObjectManager* tom = Texture::getTextureObjectManager(contextID).get(); //ElapsedTime elapsedTime(&(tom->getApplyTime())); tom->getNumberApplied()++; const Extensions* extensions = getExtensions(contextID,true); // if not supported, then return if (!extensions->isTexture2DArraySupported() || !extensions->isTexture3DSupported()) { OSG_WARN<<"Warning: Texture2DArray::apply(..) failed, 2D texture arrays are not support by OpenGL driver."<<std::endl; return; } // get the texture object for the current contextID. TextureObject* textureObject = getTextureObject(contextID); if (textureObject && _textureDepth>0) { const osg::Image* image = firstValidImage(); if (image && getModifiedCount(0, contextID) != image->getModifiedCount()) { // compute the internal texture format, this set the _internalFormat to an appropriate value. computeInternalFormat(); GLsizei new_width, new_height, new_numMipmapLevels; // compute the dimensions of the texture. computeRequiredTextureDimensions(state, *image, new_width, new_height, new_numMipmapLevels); if (!textureObject->match(GL_TEXTURE_2D_ARRAY_EXT, new_numMipmapLevels, _internalFormat, new_width, new_height, 1, _borderWidth)) { Texture::releaseTextureObject(contextID, _textureObjectBuffer[contextID].get()); _textureObjectBuffer[contextID] = 0; textureObject = 0; } } } // if we already have an texture object, then if (textureObject) { // bind texture object textureObject->bind(); // if texture parameters changed, then reset them if (getTextureParameterDirty(state.getContextID())) applyTexParameters(GL_TEXTURE_2D_ARRAY_EXT,state); // if subload is specified, then use it to subload the images to GPU memory //if (_subloadCallback.valid()) //{ // _subloadCallback->subload(*this,state); //} //else { // for each image of the texture array do for (GLsizei n=0; n < _textureDepth; n++) { osg::Image* image = _images[n].get(); // if image content is modified, then upload it to the GPU memory // GW: this means we have to "dirty" an image before setting it! if (image && getModifiedCount(n,contextID) != image->getModifiedCount()) { applyTexImage2DArray_subload(state, image, _textureWidth, _textureHeight, n, _internalFormat, _numMipmapLevels); getModifiedCount(n,contextID) = image->getModifiedCount(); } } } } // nothing before, but we have valid images, so do manual upload and create texture object manually else if ( firstValidImage() != 0L ) // if (imagesValid()) { // compute the internal texture format, this set the _internalFormat to an appropriate value. computeInternalFormat(); // compute the dimensions of the texture. osg::Image* firstImage = firstValidImage(); computeRequiredTextureDimensions(state, *firstImage, _textureWidth, _textureHeight, _numMipmapLevels); // create texture object textureObject = generateTextureObject( this, contextID,GL_TEXTURE_2D_ARRAY_EXT,_numMipmapLevels,_internalFormat,_textureWidth,_textureHeight,_textureDepth,0); // bind texture textureObject->bind(); applyTexParameters(GL_TEXTURE_2D_ARRAY_EXT, state); _textureObjectBuffer[contextID] = textureObject; // First we need to allocate the texture memory int sourceFormat = _sourceFormat ? _sourceFormat : _internalFormat; if( isCompressedInternalFormat( sourceFormat ) && sourceFormat == _internalFormat && extensions->isCompressedTexImage3DSupported() ) { extensions->glCompressedTexImage3D( GL_TEXTURE_2D_ARRAY_EXT, 0, _internalFormat, _textureWidth, _textureHeight, _textureDepth, _borderWidth, firstImage->getImageSizeInBytes() * _textureDepth, 0); } else { // Override compressed source format with safe GL_RGBA value which not generate error // We can safely do this as source format is not important when source data is NULL if( isCompressedInternalFormat( sourceFormat ) ) sourceFormat = GL_RGBA; extensions->glTexImage3D( GL_TEXTURE_2D_ARRAY_EXT, 0, _internalFormat, _textureWidth, _textureHeight, _textureDepth, _borderWidth, sourceFormat, _sourceType ? _sourceType : GL_UNSIGNED_BYTE, 0); } // For certain we have to manually allocate memory for mipmaps if images are compressed // if not allocated OpenGL will produce errors on mipmap upload. // I have not tested if this is neccessary for plain texture formats but // common sense suggests its required as well. if( _min_filter != LINEAR && _min_filter != NEAREST && firstImage->isMipmap() ) allocateMipmap( state ); // now for each layer we upload it into the memory for (GLsizei n=0; n<_textureDepth; n++) { // if image is valid then upload it to the texture memory osg::Image* image = _images[n].get(); if (image) { // now load the image data into the memory, this will also check if image do have valid properties applyTexImage2DArray_subload(state, image, _textureWidth, _textureHeight, n, _internalFormat, _numMipmapLevels); getModifiedCount(n,contextID) = image->getModifiedCount(); } } const Texture::Extensions* texExtensions = Texture::getExtensions(contextID,true); // source images have no mipmamps but we could generate them... if( _min_filter != LINEAR && _min_filter != NEAREST && !firstImage->isMipmap() && _useHardwareMipMapGeneration && texExtensions->isGenerateMipMapSupported() ) { _numMipmapLevels = osg::Image::computeNumberOfMipmapLevels( _textureWidth, _textureHeight ); generateMipmap( state ); } textureObject->setAllocated(_numMipmapLevels,_internalFormat,_textureWidth,_textureHeight,_textureDepth,0); // unref image data? if (isSafeToUnrefImageData(state)) { SparseTexture2DArray* non_const_this = const_cast<SparseTexture2DArray*>(this); for (int n=0; n<_textureDepth; n++) { if (_images[n].valid() && _images[n]->getDataVariance()==STATIC) { non_const_this->_images[n] = NULL; } } } } // No images present, but dimensions are set. So create empty texture else if ( (_textureWidth > 0) && (_textureHeight > 0) && (_textureDepth > 0) && (_internalFormat!=0) ) { // generate texture _textureObjectBuffer[contextID] = textureObject = generateTextureObject( this, contextID, GL_TEXTURE_2D_ARRAY_EXT,_numMipmapLevels,_internalFormat,_textureWidth,_textureHeight,_textureDepth,0); textureObject->bind(); applyTexParameters(GL_TEXTURE_2D_ARRAY_EXT,state); extensions->glTexImage3D( GL_TEXTURE_2D_ARRAY_EXT, 0, _internalFormat, _textureWidth, _textureHeight, _textureDepth, _borderWidth, _sourceFormat ? _sourceFormat : _internalFormat, _sourceType ? _sourceType : GL_UNSIGNED_BYTE, 0); } // nothing before, so just unbind the texture target else { glBindTexture( GL_TEXTURE_2D_ARRAY_EXT, 0 ); } // if texture object is now valid and we have to allocate mipmap levels, then if (textureObject != 0 && _texMipmapGenerationDirtyList[contextID]) { generateMipmap(state); } }
// replaces the same func in the superclass void SparseTexture2DArray::applyTexImage2DArray_subload(osg::State& state, osg::Image* image, GLsizei inwidth, GLsizei inheight, GLsizei indepth, GLint inInternalFormat, GLsizei& numMipmapLevels) const { //// if we don't have a valid image we can't create a texture! //if (!imagesValid()) // return; // get the contextID (user defined ID of 0 upwards) for the // current OpenGL context. const unsigned int contextID = state.getContextID(); const Extensions* extensions = getExtensions(contextID,true); const Texture::Extensions* texExtensions = Texture::getExtensions(contextID,true); GLenum target = GL_TEXTURE_2D_ARRAY_EXT; // compute the internal texture format, this set the _internalFormat to an appropriate value. computeInternalFormat(); // select the internalFormat required for the texture. // bool compressed = isCompressedInternalFormat(_internalFormat); bool compressed_image = isCompressedInternalFormat((GLenum)image->getPixelFormat()); // if the required layer is exceeds the maximum allowed layer sizes if (indepth > extensions->maxLayerCount()) { // we give a warning and do nothing OSG_WARN<<"Warning: Texture2DArray::applyTexImage2DArray_subload(..) the given layer number exceeds the maximum number of supported layers."<<std::endl; return; } //Rescale if resize hint is set or NPOT not supported or dimensions exceed max size if( _resizeNonPowerOfTwoHint || !texExtensions->isNonPowerOfTwoTextureSupported(_min_filter) || inwidth > extensions->max2DSize() || inheight > extensions->max2DSize()) image->ensureValidSizeForTexturing(extensions->max2DSize()); // image size or format has changed, this is not allowed, hence return if (image->s()!=inwidth || image->t()!=inheight || image->getInternalTextureFormat()!=inInternalFormat ) { OSG_WARN<<"Warning: Texture2DArray::applyTexImage2DArray_subload(..) given image do have wrong dimension or internal format."<<std::endl; return; } glPixelStorei(GL_UNPACK_ALIGNMENT,image->getPacking()); bool useHardwareMipmapGeneration = !image->isMipmap() && _useHardwareMipMapGeneration && texExtensions->isGenerateMipMapSupported(); // if no special mipmapping is required, then if( _min_filter == LINEAR || _min_filter == NEAREST || useHardwareMipmapGeneration ) { if( _min_filter == LINEAR || _min_filter == NEAREST ) numMipmapLevels = 1; else //Hardware Mipmap Generation numMipmapLevels = image->getNumMipmapLevels(); // upload non-compressed image if ( !compressed_image ) { extensions->glTexSubImage3D( target, 0, 0, 0, indepth, inwidth, inheight, 1, (GLenum)image->getPixelFormat(), (GLenum)image->getDataType(), image->data() ); } // if we support compression and image is compressed, then else if (extensions->isCompressedTexImage3DSupported()) { // OSG_WARN<<"glCompressedTexImage3D "<<inwidth<<", "<<inheight<<", "<<indepth<<std::endl; GLint blockSize, size; getCompressedSize(_internalFormat, inwidth, inheight, 1, blockSize,size); extensions->glCompressedTexSubImage3D(target, 0, 0, 0, indepth, inwidth, inheight, 1, (GLenum)image->getPixelFormat(), size, image->data()); } // we want to use mipmapping, so enable it }else { // image does not provide mipmaps, so we have to create them if( !image->isMipmap() ) { numMipmapLevels = 1; OSG_WARN<<"Warning: Texture2DArray::applyTexImage2DArray_subload(..) mipmap layer not passed, and auto mipmap generation turned off or not available. Check texture's min/mag filters & hardware mipmap generation."<<std::endl; // the image object does provide mipmaps, so upload the in the certain levels of a layer }else { numMipmapLevels = image->getNumMipmapLevels(); int width = image->s(); int height = image->t(); if( !compressed_image ) { for( GLsizei k = 0 ; k < numMipmapLevels && (width || height ) ;k++) { if (width == 0) width = 1; if (height == 0) height = 1; extensions->glTexSubImage3D( target, k, 0, 0, indepth, width, height, 1, (GLenum)image->getPixelFormat(), (GLenum)image->getDataType(), image->getMipmapData(k)); width >>= 1; height >>= 1; } } else if (extensions->isCompressedTexImage3DSupported()) { GLint blockSize,size; for( GLsizei k = 0 ; k < numMipmapLevels && (width || height) ;k++) { if (width == 0) width = 1; if (height == 0) height = 1; getCompressedSize(image->getInternalTextureFormat(), width, height, 1, blockSize,size); // state.checkGLErrors("before extensions->glCompressedTexSubImage3D("); extensions->glCompressedTexSubImage3D(target, k, 0, 0, indepth, width, height, 1, (GLenum)image->getPixelFormat(), size, image->getMipmapData(k)); // state.checkGLErrors("after extensions->glCompressedTexSubImage3D("); width >>= 1; height >>= 1; } } }
void MPGeometry::renderPrimitiveSets(osg::State& state, bool usingVBOs) const { // check the map frame to see if it's up to date if ( _frame.needsSync() ) { // this lock protects a MapFrame sync when we have multiple DRAW threads. Threading::ScopedMutexLock exclusive( _frameSyncMutex ); if ( _frame.needsSync() && _frame.sync() ) // always double check { // This should only happen is the layer ordering changes; // If layers are added or removed, the Tile gets rebuilt and // the point is moot. std::vector<Layer> reordered; const ImageLayerVector& layers = _frame.imageLayers(); reordered.reserve( layers.size() ); for( ImageLayerVector::const_iterator i = layers.begin(); i != layers.end(); ++i ) { std::vector<Layer>::iterator j = std::find( _layers.begin(), _layers.end(), i->get()->getUID() ); if ( j != _layers.end() ) reordered.push_back( *j ); } _layers.swap( reordered ); } } unsigned layersDrawn = 0; // access the GL extensions interface for the current GC: osg::ref_ptr<osg::GL2Extensions> ext = osg::GL2Extensions::Get( state.getContextID(), true ); const osg::Program::PerContextProgram* pcp = state.getLastAppliedProgramObject(); // cannot store these in the object since there could be multiple GCs (and multiple // PerContextPrograms) at large GLint tileKeyLocation; GLint opacityLocation; GLint uidLocation; GLint orderLocation; GLint texMatParentLocation; // The PCP can change (especially in a VirtualProgram environment). So we do need to // requery the uni locations each time unfortunately. TODO: explore optimizations. if ( pcp ) { tileKeyLocation = pcp->getUniformLocation( _tileKeyUniformNameID ); opacityLocation = pcp->getUniformLocation( _opacityUniformNameID ); uidLocation = pcp->getUniformLocation( _uidUniformNameID ); orderLocation = pcp->getUniformLocation( _orderUniformNameID ); texMatParentLocation = pcp->getUniformLocation( _texMatParentUniformNameID ); } // apply the tilekey uniform once. ext->glUniform4fv( tileKeyLocation, 1, _tileKeyValue.ptr() ); // activate the tile coordinate set - same for all layers state.setTexCoordPointer( _imageUnit+1, _tileCoords.get() ); if ( _layers.size() > 0 ) { float prev_opacity = -1.0f; float prev_alphaThreshold = -1.0f; // first bind any shared layers // TODO: optimize by pre-storing shared indexes for(unsigned i=0; i<_layers.size(); ++i) { const Layer& layer = _layers[i]; // a "shared" layer binds to a secondary texture unit so that other layers // can see it and use it. if ( layer._imageLayer->isShared() ) { int sharedUnit = layer._imageLayer->shareImageUnit().get(); { state.setActiveTextureUnit( sharedUnit ); state.setTexCoordPointer( sharedUnit, layer._texCoords.get() ); // bind the texture for this layer to the active share unit. layer._tex->apply( state ); // no texture LOD blending for shared layers for now. maybe later. } } } // track the active image unit. int activeImageUnit = -1; // interate over all the image layers //glDepthMask(GL_TRUE); for(unsigned i=0; i<_layers.size(); ++i) { // if ( i > 0 ) // glDepthMask(GL_FALSE); const Layer& layer = _layers[i]; if ( layer._imageLayer->getVisible() ) { // activate the visible unit if necessary: if ( activeImageUnit != _imageUnit ) { state.setActiveTextureUnit( _imageUnit ); activeImageUnit = _imageUnit; } // bind the texture for this layer: layer._tex->apply( state ); // if we're using a parent texture for blending, activate that now if ( layer._texParent.valid() ) { state.setActiveTextureUnit( _imageUnitParent ); activeImageUnit = _imageUnitParent; layer._texParent->apply( state ); } // bind the texture coordinates for this layer. // TODO: can probably optimize this by sharing or using texture matrixes. // State::setTexCoordPointer does some redundant work under the hood. state.setTexCoordPointer( _imageUnit, layer._texCoords.get() ); // apply uniform values: if ( pcp ) { // apply opacity: float opacity = layer._imageLayer->getOpacity(); if ( opacity != prev_opacity ) { ext->glUniform1f( opacityLocation, (GLfloat)opacity ); prev_opacity = opacity; } // assign the layer UID: ext->glUniform1i( uidLocation, (GLint)layer._layerID ); // assign the layer order: ext->glUniform1i( orderLocation, (GLint)layersDrawn ); // assign the parent texture matrix if ( layer._texParent.valid() ) { ext->glUniformMatrix4fv( texMatParentLocation, 1, GL_FALSE, layer._texMatParent.ptr() ); } } // draw the primitive sets. for(unsigned int primitiveSetNum=0; primitiveSetNum!=_primitives.size(); ++primitiveSetNum) { const osg::PrimitiveSet* primitiveset = _primitives[primitiveSetNum].get(); primitiveset->draw(state, usingVBOs); } ++layersDrawn; } } // prevent texture leakage // TODO: find a way to remove this to speed things up glBindTexture( GL_TEXTURE_2D, 0 ); } // if we didn't draw anything, draw the raw tiles anyway with no texture. if ( layersDrawn == 0 ) { ext->glUniform1f( opacityLocation, (GLfloat)1.0f ); ext->glUniform1i( uidLocation, (GLint)-1 ); ext->glUniform1i( orderLocation, (GLint)0 ); // draw the primitives themselves. for(unsigned int primitiveSetNum=0; primitiveSetNum!=_primitives.size(); ++primitiveSetNum) { const osg::PrimitiveSet* primitiveset = _primitives[primitiveSetNum].get(); primitiveset->draw(state, usingVBOs); } } }
void VirtualProgram::apply( osg::State& state ) const { if (_shaderMap.empty() && !_inheritSet) { // If there's no data in the VP, and never has been, unload any existing program. // NOTE: OSG's State processor creates a "global default attribute" for each type. // Sine we have no way of knowing whether the user created the VP or OSG created it // as the default fallback, we use the "_inheritSet" flag to differeniate. This // prevents any shader leakage from a VP-enabled node. const unsigned int contextID = state.getContextID(); const osg::GL2Extensions* extensions = osg::GL2Extensions::Get(contextID,true); if( ! extensions->isGlslSupported() ) return; extensions->glUseProgram( 0 ); state.setLastAppliedProgramObject(0); return; } // first, find and collect all the VirtualProgram attributes: ShaderMap accumShaderMap; AttribBindingList accumAttribBindings; AttribAliasMap accumAttribAliases; // Build the active shader map up to this point: if ( _inherit ) { accumulateShaders(state, _mask, accumShaderMap, accumAttribBindings, accumAttribAliases); } // next add the local shader components to the map, respecting the override values: { Threading::ScopedReadLock readonly(_dataModelMutex); for( ShaderMap::const_iterator i = _shaderMap.begin(); i != _shaderMap.end(); ++i ) { if ( i->second.accept(state) ) { addToAccumulatedMap( accumShaderMap, i->first, i->second ); } } const AttribBindingList& abl = this->getAttribBindingList(); accumAttribBindings.insert( abl.begin(), abl.end() ); #ifdef USE_ATTRIB_ALIASES const AttribAliasMap& aliases = this->getAttribAliases(); accumAttribAliases.insert( aliases.begin(), aliases.end() ); #endif } // next, assemble a list of the shaders in the map so we can use it as our // program cache key. // (Note: at present, the "cache key" does not include any information on the vertex // attribute bindings. Technically it should, but in practice this might not be an // issue; it is unlikely one would have two identical shader programs with different // bindings.) ShaderVector vec; vec.reserve( accumShaderMap.size() ); for( ShaderMap::iterator i = accumShaderMap.begin(); i != accumShaderMap.end(); ++i ) { ShaderEntry& entry = i->second; if ( i->second.accept(state) ) { vec.push_back( entry._shader.get() ); } } // see if there's already a program associated with this list: osg::ref_ptr<osg::Program> program; // look up the program: { Threading::ScopedReadLock shared( _programCacheMutex ); ProgramMap::const_iterator p = _programCache.find( vec ); if ( p != _programCache.end() ) { program = p->second.get(); } } // if not found, lock and build it: if ( !program.valid() ) { // build a new set of accumulated functions, to support the creation of main() ShaderComp::FunctionLocationMap accumFunctions; accumulateFunctions( state, accumFunctions ); // now double-check the program cache, and failing that, build the // new shader Program. { Threading::ScopedWriteLock exclusive( _programCacheMutex ); // double-check: look again ito negate race conditions ProgramMap::const_iterator p = _programCache.find( vec ); if ( p != _programCache.end() ) { program = p->second.get(); } else { ShaderVector keyVector; //OE_NOTICE << LC << "Building new Program for VP " << getName() << std::endl; program = buildProgram( getName(), state, accumFunctions, accumShaderMap, accumAttribBindings, accumAttribAliases, _template.get(), keyVector); // global sharing. s_programRepo.share(program); // finally, put own new program in the cache. _programCache[ keyVector ] = program; } } } // finally, apply the program attribute. if ( program.valid() ) { const unsigned int contextID = state.getContextID(); const osg::GL2Extensions* extensions = osg::GL2Extensions::Get(contextID,true); osg::Program::PerContextProgram* pcp = program->getPCP( contextID ); bool useProgram = state.getLastAppliedProgramObject() != pcp; #ifdef DEBUG_APPLY_COUNTS { // debugging static int s_framenum = 0; static Threading::Mutex s_mutex; static std::map< const VirtualProgram*, std::pair<int,int> > s_counts; Threading::ScopedMutexLock lock(s_mutex); int framenum = state.getFrameStamp()->getFrameNumber(); if ( framenum > s_framenum ) { OE_NOTICE << LC << "Applies in last frame: " << std::endl; for(std::map<const VirtualProgram*,std::pair<int,int> >::iterator i = s_counts.begin(); i != s_counts.end(); ++i) { std::pair<int,int>& counts = i->second; OE_NOTICE << LC << " " << i->first->getName() << " : " << counts.second << "/" << counts.first << std::endl; } s_framenum = framenum; s_counts.clear(); } s_counts[this].first++; if ( useProgram ) s_counts[this].second++; } #endif if ( useProgram ) { if( pcp->needsLink() ) program->compileGLObjects( state ); if( pcp->isLinked() ) { if( osg::isNotifyEnabled(osg::INFO) ) pcp->validateProgram(); pcp->useProgram(); state.setLastAppliedProgramObject( pcp ); } else { // program not usable, fallback to fixed function. extensions->glUseProgram( 0 ); state.setLastAppliedProgramObject(0); OE_WARN << LC << "Program link failure!" << std::endl; } } //program->apply( state ); #if 0 // test code for detecting race conditions for(int i=0; i<10000; ++i) { state.setLastAppliedProgramObject(0L); program->apply( state ); } #endif } }
void VirtualProgram::apply( osg::State& state ) const { if (_shaderMap.empty() && !_inheritSet) { // If there's no data in the VP, and never has been, unload any existing program. // NOTE: OSG's State processor creates a "global default attribute" for each type. // Sine we have no way of knowing whether the user created the VP or OSG created it // as the default fallback, we use the "_inheritSet" flag to differeniate. This // prevents any shader leakage from a VP-enabled node. const unsigned int contextID = state.getContextID(); const osg::GL2Extensions* extensions = osg::GL2Extensions::Get(contextID,true); if( ! extensions->isGlslSupported() ) return; extensions->glUseProgram( 0 ); state.setLastAppliedProgramObject(0); return; } // first, find and collect all the VirtualProgram attributes: ShaderMap accumShaderMap; AttribBindingList accumAttribBindings; AttribAliasMap accumAttribAliases; if ( _inherit ) { const StateHack::AttributeVec* av = StateHack::GetAttributeVec( state, this ); if ( av && av->size() > 0 ) { // find the deepest VP that doesn't inherit: unsigned start = 0; for( start = (int)av->size()-1; start > 0; --start ) { const VirtualProgram* vp = dynamic_cast<const VirtualProgram*>( (*av)[start].first ); if ( vp && (vp->_mask & _mask) && vp->_inherit == false ) break; } // collect shaders from there to here: for( unsigned i=start; i<av->size(); ++i ) { const VirtualProgram* vp = dynamic_cast<const VirtualProgram*>( (*av)[i].first ); if ( vp && (vp->_mask && _mask) ) { ShaderMap vpShaderMap; vp->getShaderMap( vpShaderMap ); for( ShaderMap::const_iterator i = vpShaderMap.begin(); i != vpShaderMap.end(); ++i ) { addToAccumulatedMap( accumShaderMap, i->first, i->second ); } const AttribBindingList& abl = vp->getAttribBindingList(); accumAttribBindings.insert( abl.begin(), abl.end() ); #ifdef USE_ATTRIB_ALIASES const AttribAliasMap& aliases = vp->getAttribAliases(); accumAttribAliases.insert( aliases.begin(), aliases.end() ); #endif } } } } // next add the local shader components to the map, respecting the override values: { Threading::ScopedReadLock readonly(_dataModelMutex); for( ShaderMap::const_iterator i = _shaderMap.begin(); i != _shaderMap.end(); ++i ) { addToAccumulatedMap( accumShaderMap, i->first, i->second ); } const AttribBindingList& abl = this->getAttribBindingList(); accumAttribBindings.insert( abl.begin(), abl.end() ); #ifdef USE_ATTRIB_ALIASES const AttribAliasMap& aliases = this->getAttribAliases(); accumAttribAliases.insert( aliases.begin(), aliases.end() ); #endif } if ( true ) //even with nothing in the map, we still want mains! -gw //accumShaderMap.size() ) { // next, assemble a list of the shaders in the map so we can use it as our // program cache key. // (Note: at present, the "cache key" does not include any information on the vertex // attribute bindings. Technically it should, but in practice this might not be an // issue; it is unlikely one would have two identical shader programs with different // bindings.) ShaderVector vec; vec.reserve( accumShaderMap.size() ); for( ShaderMap::iterator i = accumShaderMap.begin(); i != accumShaderMap.end(); ++i ) { ShaderEntry& entry = i->second; vec.push_back( entry.first.get() ); } // see if there's already a program associated with this list: osg::ref_ptr<osg::Program> program; // look up the program: { Threading::ScopedReadLock shared( _programCacheMutex ); ProgramMap::const_iterator p = _programCache.find( vec ); if ( p != _programCache.end() ) { program = p->second.get(); } } // if not found, lock and build it: if ( !program.valid() ) { // build a new set of accumulated functions, to support the creation of main() ShaderComp::FunctionLocationMap accumFunctions; accumulateFunctions( state, accumFunctions ); // now double-check the program cache, and failing that, build the // new shader Program. { Threading::ScopedWriteLock exclusive( _programCacheMutex ); // double-check: look again ito negate race conditions ProgramMap::const_iterator p = _programCache.find( vec ); if ( p != _programCache.end() ) { program = p->second.get(); } else { ShaderVector keyVector; //OE_NOTICE << LC << "Building new Program for VP " << getName() << std::endl; program = buildProgram( getName(), state, accumFunctions, accumShaderMap, accumAttribBindings, accumAttribAliases, _template.get(), keyVector); // finally, put own new program in the cache. _programCache[ keyVector ] = program; } } } // finally, apply the program attribute. if ( program.valid() ) { program->apply( state ); #if 0 // test code for detecting race conditions for(int i=0; i<10000; ++i) { state.setLastAppliedProgramObject(0L); program->apply( state ); } #endif } } }
void DepthPeelBin::PerContextInfo::init( const osg::State& state, const GLsizei width, const GLsizei height ) { TRACEDUMP("PerContextInfo::init"); _width = width; _height = height; // Create two depth buffers; First two are ping-pong buffers for each pass. // The third is the persistent depth buffer from the opaque pass. glGenTextures( 3, _depthTex ); UTIL_GL_ERROR_CHECK( "DepthPeelBin PerContext Depth Tex" ); int idx; for( idx=0; idx<3; idx++ ) { glBindTexture( GL_TEXTURE_2D, _depthTex[ idx ] ); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST ); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST ); glTexParameteri( GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE_ARB, GL_ALPHA ); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE_ARB, GL_COMPARE_R_TO_TEXTURE_ARB ); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC_ARB, GL_LEQUAL ); // Alpha == 1.0 if R [func] texel. glTexImage2D( GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, _width, _height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL ); } glGenTextures( 1, &_colorTex ); UTIL_GL_ERROR_CHECK( "DepthPeelBin PerContextInfo Color Tex" ); glBindTexture( GL_TEXTURE_2D, _colorTex ); glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, _width, _height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL ); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST ); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST ); glBindTexture( GL_TEXTURE_2D, 0 ); osg::FBOExtensions* fboExt( osg::FBOExtensions::instance( state.getContextID(), true ) ); osgwTools::glGenFramebuffers( fboExt, 1, &_fbo ); osgwTools::glBindFramebuffer( fboExt, GL_FRAMEBUFFER_EXT, _fbo ); osgwTools::glFramebufferTexture2D( fboExt, GL_DRAW_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, _colorTex, 0 ); UTIL_GL_ERROR_CHECK( "DepthPeenBin PerContextInfo" ); osg::setGLExtensionFuncPtr( _glGenQueries, "glGenQueries", "glGenQueriesARB"); osg::setGLExtensionFuncPtr( _glDeleteQueries, "glDeleteQueries", "glDeleteQueriesARB"); osg::setGLExtensionFuncPtr( _glBeginQuery, "glBeginQuery", "glBeginQueryARB"); osg::setGLExtensionFuncPtr( _glEndQuery, "glEndQuery", "glEndQueryARB"); osg::setGLExtensionFuncPtr( _glGetQueryObjectiv, "glGetQueryObjectiv","glGetQueryObjectivARB"); osg::setGLExtensionFuncPtr( _glGetFramebufferAttachmentParameteriv, "glGetFramebufferAttachmentParameteriv","glGetFramebufferAttachmentParameterivEXT"); _glGenQueries( 1, &_queryID ); UTIL_GL_ERROR_CHECK( "DepthPeelBin PerContextInfo end" ); UTIL_GL_FBO_ERROR_CHECK( "DepthPeelBin PerContextInfo end", fboExt ); _init = true; }
void MPGeometry::renderPrimitiveSets(osg::State& state, bool usingVBOs) const { // check the map frame to see if it's up to date if ( _frame.needsSync() ) { // this lock protects a MapFrame sync when we have multiple DRAW threads. Threading::ScopedMutexLock exclusive( _frameSyncMutex ); if ( _frame.needsSync() && _frame.sync() ) // always double check { // This should only happen is the layer ordering changes; // If layers are added or removed, the Tile gets rebuilt and // the point is moot. std::vector<Layer> reordered; const ImageLayerVector& layers = _frame.imageLayers(); reordered.reserve( layers.size() ); for( ImageLayerVector::const_iterator i = layers.begin(); i != layers.end(); ++i ) { std::vector<Layer>::iterator j = std::find( _layers.begin(), _layers.end(), i->get()->getUID() ); if ( j != _layers.end() ) reordered.push_back( *j ); } _layers.swap( reordered ); } } unsigned layersDrawn = 0; osg::ref_ptr<osg::GL2Extensions> ext = osg::GL2Extensions::Get( state.getContextID(), true ); const osg::Program::PerContextProgram* pcp = state.getLastAppliedProgramObject(); GLint opacityLocation; GLint uidLocation; GLint orderLocation; GLint texMatParentLocation; // yes, it's possible that the PCP is not set up yet. // TODO: can we optimize this so we don't need to get uni locations every time? if ( pcp ) { opacityLocation = pcp->getUniformLocation( _opacityUniform->getNameID() ); uidLocation = pcp->getUniformLocation( _layerUIDUniform->getNameID() ); orderLocation = pcp->getUniformLocation( _layerOrderUniform->getNameID() ); texMatParentLocation = pcp->getUniformLocation( _texMatParentUniform->getNameID() ); } // activate the tile coordinate set - same for all layers state.setTexCoordPointer( _imageUnit+1, _tileCoords.get() ); if ( _layers.size() > 0 ) { float prev_opacity = -1.0f; float prev_alphaThreshold = -1.0f; // first bind any shared layers // TODO: optimize by pre-storing shared indexes for(unsigned i=0; i<_layers.size(); ++i) { const Layer& layer = _layers[i]; // a "shared" layer binds to a secondary texture unit so that other layers // can see it and use it. if ( layer._imageLayer->isShared() ) { int sharedUnit = layer._imageLayer->shareImageUnit().get(); { state.setActiveTextureUnit( sharedUnit ); state.setTexCoordPointer( sharedUnit, layer._texCoords.get() ); // bind the texture for this layer to the active share unit. layer._tex->apply( state ); // no texture LOD blending for shared layers for now. maybe later. } } } // track the active image unit. int activeImageUnit = -1; // interate over all the image layers for(unsigned i=0; i<_layers.size(); ++i) { const Layer& layer = _layers[i]; if ( layer._imageLayer->getVisible() ) { // activate the visible unit if necessary: if ( activeImageUnit != _imageUnit ) { state.setActiveTextureUnit( _imageUnit ); activeImageUnit = _imageUnit; } // bind the texture for this layer: layer._tex->apply( state ); // if we're using a parent texture for blending, activate that now if ( layer._texParent.valid() ) { state.setActiveTextureUnit( _imageUnitParent ); activeImageUnit = _imageUnitParent; layer._texParent->apply( state ); } // bind the texture coordinates for this layer. // TODO: can probably optimize this by sharing or using texture matrixes. // State::setTexCoordPointer does some redundant work under the hood. state.setTexCoordPointer( _imageUnit, layer._texCoords.get() ); // apply uniform values: if ( pcp ) { // apply opacity: float opacity = layer._imageLayer->getOpacity(); if ( opacity != prev_opacity ) { _opacityUniform->set( opacity ); _opacityUniform->apply( ext, opacityLocation ); prev_opacity = opacity; } // assign the layer UID: _layerUIDUniform->set( layer._layerID ); _layerUIDUniform->apply( ext, uidLocation ); // assign the layer order: _layerOrderUniform->set( (int)layersDrawn ); _layerOrderUniform->apply( ext, orderLocation ); // assign the parent texture matrix if ( layer._texParent.valid() ) { _texMatParentUniform->set( layer._texMatParent ); _texMatParentUniform->apply( ext, texMatParentLocation ); } } // draw the primitive sets. for(unsigned int primitiveSetNum=0; primitiveSetNum!=_primitives.size(); ++primitiveSetNum) { const osg::PrimitiveSet* primitiveset = _primitives[primitiveSetNum].get(); primitiveset->draw(state, usingVBOs); } ++layersDrawn; } } // prevent texture leakage glBindTexture( GL_TEXTURE_2D, 0 ); } // if we didn't draw anything, draw the raw tiles anyway with no texture. if ( layersDrawn == 0 ) { _opacityUniform->set( 1.0f ); _opacityUniform->apply( ext, opacityLocation ); _layerUIDUniform->set( (int)-1 ); // indicates a non-textured layer _layerUIDUniform->apply( ext, uidLocation ); _layerOrderUniform->set( (int)0 ); _layerOrderUniform->apply( ext, orderLocation ); // draw the primitives themselves. for(unsigned int primitiveSetNum=0; primitiveSetNum!=_primitives.size(); ++primitiveSetNum) { const osg::PrimitiveSet* primitiveset = _primitives[primitiveSetNum].get(); primitiveset->draw(state, usingVBOs); } } }
void MPGeometry::renderPrimitiveSets(osg::State& state, bool renderColor, bool usingVBOs) const { // check the map frame to see if it's up to date if ( _frame.needsSync() ) { // this lock protects a MapFrame sync when we have multiple DRAW threads. Threading::ScopedMutexLock exclusive( _frameSyncMutex ); if ( _frame.needsSync() && _frame.sync() ) // always double check { // This should only happen is the layer ordering changes; // If layers are added or removed, the Tile gets rebuilt and // the point is moot. std::vector<Layer> reordered; const ImageLayerVector& layers = _frame.imageLayers(); reordered.reserve( layers.size() ); for( ImageLayerVector::const_iterator i = layers.begin(); i != layers.end(); ++i ) { std::vector<Layer>::iterator j = std::find( _layers.begin(), _layers.end(), i->get()->getUID() ); if ( j != _layers.end() ) reordered.push_back( *j ); } _layers.swap( reordered ); } } unsigned layersDrawn = 0; // access the GL extensions interface for the current GC: const osg::Program::PerContextProgram* pcp = 0L; #if OSG_MIN_VERSION_REQUIRED(3,3,3) osg::ref_ptr<osg::GLExtensions> ext; #else osg::ref_ptr<osg::GL2Extensions> ext; #endif unsigned contextID; if (_supportsGLSL) { contextID = state.getContextID(); #if OSG_MIN_VERSION_REQUIRED(3,3,3) ext = osg::GLExtensions::Get(contextID, true); #else ext = osg::GL2Extensions::Get( contextID, true ); #endif pcp = state.getLastAppliedProgramObject(); } // cannot store these in the object since there could be multiple GCs (and multiple // PerContextPrograms) at large GLint tileKeyLocation = -1; GLint birthTimeLocation = -1; GLint opacityLocation = -1; GLint uidLocation = -1; GLint orderLocation = -1; GLint texMatParentLocation = -1; GLint minRangeLocation = -1; GLint maxRangeLocation = -1; // The PCP can change (especially in a VirtualProgram environment). So we do need to // requery the uni locations each time unfortunately. TODO: explore optimizations. if ( pcp ) { tileKeyLocation = pcp->getUniformLocation( _tileKeyUniformNameID ); birthTimeLocation = pcp->getUniformLocation( _birthTimeUniformNameID ); opacityLocation = pcp->getUniformLocation( _opacityUniformNameID ); uidLocation = pcp->getUniformLocation( _uidUniformNameID ); orderLocation = pcp->getUniformLocation( _orderUniformNameID ); texMatParentLocation = pcp->getUniformLocation( _texMatParentUniformNameID ); minRangeLocation = pcp->getUniformLocation( _minRangeUniformNameID ); maxRangeLocation = pcp->getUniformLocation( _maxRangeUniformNameID ); } // apply the tilekey uniform once. if ( tileKeyLocation >= 0 ) { ext->glUniform4fv( tileKeyLocation, 1, _tileKeyValue.ptr() ); } // set the "birth time" - i.e. the time this tile last entered the scene in the current GC. if ( birthTimeLocation >= 0 ) { PerContextData& pcd = _pcd[contextID]; if ( pcd.birthTime < 0.0f ) { const osg::FrameStamp* stamp = state.getFrameStamp(); if ( stamp ) { pcd.birthTime = stamp->getReferenceTime(); } } ext->glUniform1f( birthTimeLocation, pcd.birthTime ); } // activate the tile coordinate set - same for all layers if ( renderColor ) { state.setTexCoordPointer( _imageUnit+1, _tileCoords.get() ); } #ifndef OSG_GLES2_AVAILABLE if ( renderColor ) { // emit a default terrain color since we're not binding a color array: glColor4f(1.0f, 1.0f, 1.0f, 1.0f); } #endif // activate the elevation texture if there is one. Same for all layers. //if ( _elevTex.valid() ) //{ // state.setActiveTextureUnit( 2 ); // state.setTexCoordPointer( 1, _tileCoords.get() ); // necessary?? since we do it above // _elevTex->apply( state ); // // todo: probably need an elev texture matrix as well. -gw //} // track the active image unit. int activeImageUnit = -1; // remember whether we applied a parent texture. bool usedTexParent = false; if ( _layers.size() > 0 ) { float prev_opacity = -1.0f; // first bind any shared layers. We still have to do this even if we are // in !renderColor mode b/c these textures could be used by vertex shaders // to alter the geometry. int sharedLayers = 0; if ( pcp ) { for(unsigned i=0; i<_layers.size(); ++i) { const Layer& layer = _layers[i]; // a "shared" layer binds to a secondary texture unit so that other layers // can see it and use it. if ( layer._imageLayer->isShared() ) { ++sharedLayers; int sharedUnit = layer._imageLayer->shareImageUnit().get(); { state.setActiveTextureUnit( sharedUnit ); state.setTexCoordPointer( sharedUnit, layer._texCoords.get() ); // bind the texture for this layer to the active share unit. layer._tex->apply( state ); // Shared layers need a texture matrix since the terrain engine doesn't // provide a "current texture coordinate set" uniform (i.e. oe_layer_texc) GLint texMatLocation = 0; texMatLocation = pcp->getUniformLocation( layer._texMatUniformID ); if ( texMatLocation >= 0 ) { ext->glUniformMatrix4fv( texMatLocation, 1, GL_FALSE, layer._texMat.ptr() ); } } } } } if (renderColor) { // find the first opaque layer, top-down, and start there: unsigned first = 0; for(first = _layers.size()-1; first > 0; --first) { const Layer& layer = _layers[first]; if (layer._opaque && //Color filters can modify the opacity layer._imageLayer->getColorFilters().empty() && layer._imageLayer->getVisible() && layer._imageLayer->getOpacity() >= 1.0f) { break; } } // interate over all the image layers for(unsigned i=first; i<_layers.size(); ++i) { const Layer& layer = _layers[i]; if ( layer._imageLayer->getVisible() && layer._imageLayer->getOpacity() > 0.0f ) { // activate the visible unit if necessary: if ( activeImageUnit != _imageUnit ) { state.setActiveTextureUnit( _imageUnit ); activeImageUnit = _imageUnit; } // bind the texture for this layer: layer._tex->apply( state ); // in FFP mode, we need to enable the GL mode for texturing: if ( !pcp ) //!_supportsGLSL) { state.applyMode(GL_TEXTURE_2D, true); } // if we're using a parent texture for blending, activate that now if ( texMatParentLocation >= 0 && layer._texParent.valid() ) { state.setActiveTextureUnit( _imageUnitParent ); activeImageUnit = _imageUnitParent; layer._texParent->apply( state ); usedTexParent = true; } // bind the texture coordinates for this layer. // TODO: can probably optimize this by sharing or using texture matrixes. // State::setTexCoordPointer does some redundant work under the hood. state.setTexCoordPointer( _imageUnit, layer._texCoords.get() ); // apply uniform values: if ( pcp ) { // apply opacity: if ( opacityLocation >= 0 ) { float opacity = layer._imageLayer->getOpacity(); if ( opacity != prev_opacity ) { ext->glUniform1f( opacityLocation, (GLfloat)opacity ); prev_opacity = opacity; } } // assign the layer UID: if ( uidLocation >= 0 ) { ext->glUniform1i( uidLocation, (GLint)layer._layerID ); } // assign the layer order: if ( orderLocation >= 0 ) { ext->glUniform1i( orderLocation, (GLint)layersDrawn ); } // assign the parent texture matrix if ( texMatParentLocation >= 0 && layer._texParent.valid() ) { ext->glUniformMatrix4fv( texMatParentLocation, 1, GL_FALSE, layer._texMatParent.ptr() ); } // assign the min range if ( minRangeLocation >= 0 ) { ext->glUniform1f( minRangeLocation, layer._imageLayer->getImageLayerOptions().minVisibleRange().get() ); } // assign the max range if ( maxRangeLocation >= 0 ) { ext->glUniform1f( maxRangeLocation, layer._imageLayer->getImageLayerOptions().maxVisibleRange().get() ); } } // draw the primitive sets. for(unsigned int primitiveSetNum=0; primitiveSetNum!=_primitives.size(); ++primitiveSetNum) { const osg::PrimitiveSet* primitiveset = _primitives[primitiveSetNum].get(); if ( primitiveset ) { primitiveset->draw(state, usingVBOs); } else { OE_WARN << LC << "Strange, MPGeometry had a 0L primset" << std::endl; } } ++layersDrawn; } } } } // if we didn't draw anything, draw the raw tiles anyway with no texture. if ( layersDrawn == 0 ) { if ( pcp ) { if ( opacityLocation >= 0 ) ext->glUniform1f( opacityLocation, (GLfloat)1.0f ); if ( uidLocation >= 0 ) ext->glUniform1i( uidLocation, (GLint)-1 ); if ( orderLocation >= 0 ) ext->glUniform1i( orderLocation, (GLint)0 ); } // draw the primitives themselves. for(unsigned int primitiveSetNum=0; primitiveSetNum!=_primitives.size(); ++primitiveSetNum) { const osg::PrimitiveSet* primitiveset = _primitives[primitiveSetNum].get(); primitiveset->draw(state, usingVBOs); } } else // at least one textured layer was drawn: { // prevent texture leakage // TODO: find a way to remove this to speed things up if ( renderColor ) { glBindTexture( GL_TEXTURE_2D, 0 ); // if a parent texture was applied, need to disable both. if ( usedTexParent ) { state.setActiveTextureUnit( activeImageUnit != _imageUnitParent ? _imageUnitParent : _imageUnit ); glBindTexture( GL_TEXTURE_2D, 0); } } } }
void VirtualProgram::apply( osg::State& state ) const { if (_shaderMap.empty() && !_inheritSet) { // If there's no data in the VP, and never has been, unload any existing program. // NOTE: OSG's State processor creates a "global default attribute" for each type. // Sine we have no way of knowing whether the user created the VP or OSG created it // as the default fallback, we use the "_inheritSet" flag to differeniate. This // prevents any shader leakage from a VP-enabled node. const unsigned int contextID = state.getContextID(); const osg::GL2Extensions* extensions = osg::GL2Extensions::Get(contextID,true); if( ! extensions->isGlslSupported() ) return; extensions->glUseProgram( 0 ); state.setLastAppliedProgramObject(0); return; } // first, find and collect all the VirtualProgram attributes: ShaderMap accumShaderMap; AttribBindingList accumAttribBindings; AttribAliasMap accumAttribAliases; if ( _inherit ) { const StateHack::AttributeVec* av = StateHack::GetAttributeVec( state, this ); if ( av && av->size() > 0 ) { // find the deepest VP that doesn't inherit: unsigned start = 0; for( start = (int)av->size()-1; start > 0; --start ) { const VirtualProgram* vp = dynamic_cast<const VirtualProgram*>( (*av)[start].first ); if ( vp && (vp->_mask & _mask) && vp->_inherit == false ) break; } // collect shaders from there to here: for( unsigned i=start; i<av->size(); ++i ) { const VirtualProgram* vp = dynamic_cast<const VirtualProgram*>( (*av)[i].first ); if ( vp && (vp->_mask && _mask) ) { for( ShaderMap::const_iterator i = vp->_shaderMap.begin(); i != vp->_shaderMap.end(); ++i ) { addToAccumulatedMap( accumShaderMap, i->first, i->second ); } const AttribBindingList& abl = vp->getAttribBindingList(); accumAttribBindings.insert( abl.begin(), abl.end() ); const AttribAliasMap& aliases = vp->getAttribAliases(); accumAttribAliases.insert( aliases.begin(), aliases.end() ); } } } } // next add the local shader components to the map, respecting the override values: for( ShaderMap::const_iterator i = _shaderMap.begin(); i != _shaderMap.end(); ++i ) { addToAccumulatedMap( accumShaderMap, i->first, i->second ); } const AttribBindingList& abl = this->getAttribBindingList(); accumAttribBindings.insert( abl.begin(), abl.end() ); const AttribAliasMap& aliases = this->getAttribAliases(); accumAttribAliases.insert( aliases.begin(), aliases.end() ); if ( true ) //even with nothing in the map, we still want mains! -gw //accumShaderMap.size() ) { // next, assemble a list of the shaders in the map so we can use it as our // program cache key. // (Note: at present, the "cache key" does not include any information on the vertex // attribute bindings. Technically it should, but in practice this might not be an // issue; it is unlikely one would have two identical shader programs with different // bindings.) ShaderVector vec; vec.reserve( accumShaderMap.size() ); for( ShaderMap::iterator i = accumShaderMap.begin(); i != accumShaderMap.end(); ++i ) { ShaderEntry& entry = i->second; vec.push_back( entry.first.get() ); } // see if there's already a program associated with this list: osg::Program* program = 0L; // look up the program: { Threading::ScopedReadLock shared( _programCacheMutex ); ProgramMap::const_iterator p = _programCache.find( vec ); if ( p != _programCache.end() ) { program = p->second.get(); } } // if not found, lock and build it: if ( !program ) { Threading::ScopedWriteLock exclusive( _programCacheMutex ); // look again in case of contention: ProgramMap::const_iterator p = _programCache.find( vec ); if ( p != _programCache.end() ) { program = p->second.get(); } else { VirtualProgram* nc = const_cast<VirtualProgram*>(this); program = nc->buildProgram( state, accumShaderMap, accumAttribBindings, accumAttribAliases); } } // finally, apply the program attribute. program->apply( state ); } }