void LLVertexBuffer::createGLIndices() { LLMemType mt2(LLMemType::MTYPE_VERTEX_CREATE_INDICES); U32 size = getIndicesSize(); if (mGLIndices) { destroyGLIndices(); } if (size == 0) { return; } mEmpty = TRUE; if (useVBOs()) { mMappedIndexData = NULL; genIndices(); mResized = TRUE; } else { mMappedIndexData = new U8[size]; memset(mMappedIndexData, 0, size); static int gl_buffer_idx = 0; mGLIndices = ++gl_buffer_idx; } }
void LLVertexBuffer::destroyGLIndices() { LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTROY_INDICES); if (mGLIndices) { if (useVBOs()) { freeClientBuffer() ; if (mMappedData || mMappedIndexData) { llerrs << "Vertex buffer destroyed while mapped." << llendl; } releaseIndices(); } else { delete [] mMappedIndexData; mMappedIndexData = NULL; mEmpty = TRUE; } sAllocatedBytes -= getIndicesSize(); } mGLIndices = 0; unbind(); }
void LLVertexBuffer::updateNumVerts(S32 nverts) { LLMemType mt2(LLMemType::MTYPE_VERTEX_UPDATE_VERTS); llassert(nverts >= 0); if (nverts >= 65535) { llwarns << "Vertex buffer overflow!" << llendl; nverts = 65535; } mRequestedNumVerts = nverts; if (!mDynamicSize) { mNumVerts = nverts; } else if (mUsage == GL_STATIC_DRAW_ARB || nverts > mNumVerts || nverts < mNumVerts/2) { if (mUsage != GL_STATIC_DRAW_ARB && nverts + nverts/4 <= 65535) { nverts += nverts/4; } mNumVerts = nverts; } }
void LLVertexBuffer::destroyGLBuffer() { LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTROY_BUFFER); if (mGLBuffer) { if (useVBOs()) { if (mMappedData || mMappedIndexData) { llerrs << "Vertex buffer destroyed while mapped!" << llendl; } releaseBuffer(); } else { delete [] mMappedData; mMappedData = NULL; mEmpty = TRUE; } sAllocatedBytes -= getSize(); } mGLBuffer = 0; unbind(); }
// protected, use unref() //virtual LLVertexBuffer::~LLVertexBuffer() { LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTRUCTOR); destroyGLBuffer(); destroyGLIndices(); sCount--; };
//------------------------------------------------------------------------------ // GforceSite(Matrix& force, int *x, int mu): // It calculates the gauge force at site x and direction mu. //------------------------------------------------------------------------------ void GimprRect::GforceSite(Matrix& force, int *x, int mu) { const char *fname = "GforceSite(M&,i*,i)"; setCbufCntrlReg(4, CBUF_MODE4); Matrix *u_off = GaugeField()+GsiteOffset(x)+mu; Matrix mt1; //---------------------------------------------------------------------------- // get staple // mt1 = staple //---------------------------------------------------------------------------- Staple(mt1, x, mu); ForceFlops += 198*3*3+12+216*3; //---------------------------------------------------------------------------- // mt2 = U_mu(x) //---------------------------------------------------------------------------- Matrix mt2(*u_off); // moveMem((IFloat *)mp2, (IFloat *)u_off, MATRIX_SIZE * sizeof(IFloat)) ; //---------------------------------------------------------------------------- // force = -(beta*(1-8*c_1)/3)*U_mu(x)*stap //---------------------------------------------------------------------------- force.DotMEqual(mt2, mt1); force *= plaq_coeff; // mDotMEqual((IFloat *)&force, (const IFloat *)mp2, (const IFloat *)mp1); // tmp = plaq_coeff ; // vecTimesEquFloat((IFloat *)&force, tmp, MATRIX_SIZE); //---------------------------------------------------------------------------- // get rectangle staple // mt1 = rect_stap //---------------------------------------------------------------------------- RectStaple(mt1, x, mu); ForceFlops += 198*3*18+216*3*6; //---------------------------------------------------------------------------- // mt2 = -(beta*c_1/3)*U_mu(x) //---------------------------------------------------------------------------- // mt2 = *u_off; // moveMem((IFloat *)mp2, (IFloat *)u_off, MATRIX_SIZE*sizeof(IFloat)); mt2 *= rect_coeff; // tmp = rect_coeff; // vecTimesEquFloat((IFloat *)mp2, tmp, MATRIX_SIZE) ; ForceFlops +=234; //---------------------------------------------------------------------------- // force += -(beta*c_1/3)*U_mu(x)*rect_stap //---------------------------------------------------------------------------- force.DotMPlus(mt2, mt1); // mDotMPlus((IFloat *)&force, (const IFloat *)mp2, (const IFloat *)mp1); mt1.Dagger(force); force.TrLessAntiHermMatrix(mt1); ForceFlops +=198+24; }
U8* LLVertexBuffer::mapIndexBuffer(S32 access) { LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER); if (mFinal) { llerrs << "LLVertexBuffer::mapIndexBuffer() called on a finalized buffer." << llendl; } if (!useVBOs() && !mMappedData && !mMappedIndexData) { llerrs << "LLVertexBuffer::mapIndexBuffer() called on unallocated buffer." << llendl; } if (!mIndexLocked && useVBOs()) { { LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES); setBuffer(0, TYPE_INDEX); mIndexLocked = TRUE; stop_glerror(); if(sDisableVBOMapping) { allocateClientIndexBuffer() ; } else { mMappedIndexData = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); } stop_glerror(); } if (!mMappedIndexData) { log_glerror(); if(!sDisableVBOMapping) { GLint buff; glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLIndices) { llerrs << "Invalid GL index buffer bound: " << buff << llendl; } llerrs << "glMapBuffer returned NULL (no index data)" << llendl; } else { llerrs << "memory allocation for Index data failed. " << llendl ; } } sMappedCount++; } return mMappedIndexData ; }
// protected, use unref() //virtual LLVertexBuffer::~LLVertexBuffer() { LLMemType mt2(LLMemType::MTYPE_VERTEX_DESTRUCTOR); destroyGLBuffer(); destroyGLIndices(); sCount--; llassert_always(!mMappedData && !mMappedIndexData) ; };
inline int shuffle() { std::random_device rnd; int v1, v2; std::mt19937 mt1; mt1.seed(rnd()); v1 = (mt1() % 6) + 1; std::mt19937 mt2; mt2.seed(rnd()); v2 = (mt2() % 6) + 1; std::cout << "dice " << v1 << " : " << v2 << std::endl; return v1 + v2; }
void LLVertexBuffer::setStride(S32 type, S32 new_stride) { LLMemType mt2(LLMemType::MTYPE_VERTEX_SET_STRIDE); if (mNumVerts) { llerrs << "LLVertexBuffer::setOffset called with mNumVerts = " << mNumVerts << llendl; } // This code assumes that setStride() will only be called once per VBO per type. S32 delta = new_stride - sTypeOffsets[type]; for (S32 i=type+1; i<TYPE_MAX; i++) { if (mTypeMask & (1<<i)) { mOffsets[i] += delta; } } mStride += delta; }
void LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create) { LLMemType mt2(LLMemType::MTYPE_VERTEX_ALLOCATE_BUFFER); updateNumVerts(nverts); updateNumIndices(nindices); if (mMappedData) { llerrs << "LLVertexBuffer::allocateBuffer() called redundantly." << llendl; } if (create && (nverts || nindices)) { createGLBuffer(); createGLIndices(); } sAllocatedBytes += getSize() + getIndicesSize(); }
LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) : LLRefCount(), mNumVerts(0), mNumIndices(0), mRequestedNumVerts(-1), mRequestedNumIndices(-1), mUsage(usage), mGLBuffer(0), mGLIndices(0), mMappedData(NULL), mMappedIndexData(NULL), mVertexLocked(FALSE), mIndexLocked(FALSE), mFinal(FALSE), mFilthy(FALSE), mEmpty(TRUE), mResized(FALSE), mDynamicSize(FALSE) { LLMemType mt2(LLMemType::MTYPE_VERTEX_CONSTRUCTOR); if (!sEnableVBOs) { mUsage = 0 ; } if (mUsage == GL_STREAM_DRAW_ARB && !sUseStreamDraw) { mUsage = 0; } if (mUsage == GL_STREAM_DRAW_ARB && !sUseStreamDraw) { mUsage = 0; } S32 stride = calcStride(typemask, mOffsets); mTypeMask = typemask; mStride = stride; sCount++; }
void LLVertexBuffer::unmapBuffer() { LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER); if (mMappedData || mMappedIndexData) { if (useVBOs() && mLocked) { stop_glerror(); glUnmapBufferARB(GL_ARRAY_BUFFER_ARB); stop_glerror(); glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); stop_glerror(); /*if (!sMapped) { llerrs << "Redundantly unmapped VBO!" << llendl; } sMapped = FALSE;*/ sMappedCount--; if (mUsage == GL_STATIC_DRAW_ARB) { //static draw buffers can only be mapped a single time //throw out client data (we won't be using it again) mEmpty = TRUE; mFinal = TRUE; } else { mEmpty = FALSE; } mMappedIndexData = NULL; mMappedData = NULL; mLocked = FALSE; } } }
void LLVertexBuffer::updateNumIndices(S32 nindices) { LLMemType mt2(LLMemType::MTYPE_VERTEX_UPDATE_INDICES); llassert(nindices >= 0); mRequestedNumIndices = nindices; if (!mDynamicSize) { mNumIndices = nindices; } else if (mUsage == GL_STATIC_DRAW_ARB || nindices > mNumIndices || nindices < mNumIndices/2) { if (mUsage != GL_STATIC_DRAW_ARB) { nindices += nindices/4; } mNumIndices = nindices; } }
//static void LLVertexBuffer::cleanupClass() { LLMemType mt2(LLMemType::MTYPE_VERTEX_CLEANUP_CLASS); unbind(); clientCopy(); // deletes GL buffers }
// Map for data access U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 access) { LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER); if (mFinal) { llerrs << "LLVertexBuffer::mapVeretxBuffer() called on a finalized buffer." << llendl; } if (!useVBOs() && !mMappedData && !mMappedIndexData) { llerrs << "LLVertexBuffer::mapVertexBuffer() called on unallocated buffer." << llendl; } if (!mVertexLocked && useVBOs()) { { LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES); setBuffer(0, type); mVertexLocked = TRUE; stop_glerror(); if(sDisableVBOMapping) { allocateClientVertexBuffer() ; } else { mMappedData = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); } stop_glerror(); } if (!mMappedData) { log_glerror(); //check the availability of memory U32 avail_phy_mem, avail_vir_mem; LLMemoryInfo::getAvailableMemoryKB(avail_phy_mem, avail_vir_mem) ; llinfos << "Available physical mwmory(KB): " << avail_phy_mem << llendl ; llinfos << "Available virtual memory(KB): " << avail_vir_mem << llendl; if(!sDisableVBOMapping) { //-------------------- //print out more debug info before crash llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ; GLint size ; glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ; llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ; //-------------------- GLint buff; glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLBuffer) { llerrs << "Invalid GL vertex buffer bound: " << buff << llendl; } llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl; } else { llerrs << "memory allocation for vertex data failed." << llendl ; } } sMappedCount++; } return mMappedData; }
void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices) { llassert(newnverts >= 0); llassert(newnindices >= 0); mRequestedNumVerts = newnverts; mRequestedNumIndices = newnindices; LLMemType mt2(LLMemType::MTYPE_VERTEX_RESIZE_BUFFER); mDynamicSize = TRUE; if (mUsage == GL_STATIC_DRAW_ARB) { //always delete/allocate static buffers on resize destroyGLBuffer(); destroyGLIndices(); allocateBuffer(newnverts, newnindices, TRUE); mFinal = FALSE; } else if (newnverts > mNumVerts || newnindices > mNumIndices || newnverts < mNumVerts/2 || newnindices < mNumIndices/2) { sAllocatedBytes -= getSize() + getIndicesSize(); S32 oldsize = getSize(); S32 old_index_size = getIndicesSize(); updateNumVerts(newnverts); updateNumIndices(newnindices); S32 newsize = getSize(); S32 new_index_size = getIndicesSize(); sAllocatedBytes += newsize + new_index_size; if (newsize) { if (!mGLBuffer) { //no buffer exists, create a new one createGLBuffer(); } else { //delete old buffer, keep GL buffer for now if (!useVBOs()) { U8* old = mMappedData; mMappedData = new U8[newsize]; if (old) { memcpy(mMappedData, old, llmin(newsize, oldsize)); if (newsize > oldsize) { memset(mMappedData+oldsize, 0, newsize-oldsize); } delete [] old; } else { memset(mMappedData, 0, newsize); mEmpty = TRUE; } } mResized = TRUE; } } else if (mGLBuffer) { destroyGLBuffer(); } if (new_index_size) { if (!mGLIndices) { createGLIndices(); } else { if (!useVBOs()) { //delete old buffer, keep GL buffer for now U8* old = mMappedIndexData; mMappedIndexData = new U8[new_index_size]; if (old) { memcpy(mMappedIndexData, old, llmin(new_index_size, old_index_size)); if (new_index_size > old_index_size) { memset(mMappedIndexData+old_index_size, 0, new_index_size - old_index_size); } delete [] old; } else { memset(mMappedIndexData, 0, new_index_size); mEmpty = TRUE; } } mResized = TRUE; } } else if (mGLIndices) { destroyGLIndices(); } } if (mResized && useVBOs()) { freeClientBuffer() ; setBuffer(0); } }
// Map for data access U8* LLVertexBuffer::mapBuffer(S32 access) { LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER); if (mFinal) { llerrs << "LLVertexBuffer::mapBuffer() called on a finalized buffer." << llendl; } if (!useVBOs() && !mMappedData && !mMappedIndexData) { llerrs << "LLVertexBuffer::mapBuffer() called on unallocated buffer." << llendl; } if (!mLocked && useVBOs()) { { LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES); setBuffer(0); mLocked = TRUE; stop_glerror(); mMappedData = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); stop_glerror(); } { LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES); mMappedIndexData = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB); stop_glerror(); } if (!mMappedData) { //-------------------- //print out more debug info before crash llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ; GLint size ; glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ; llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ; //-------------------- GLint buff; glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLBuffer) { llerrs << "Invalid GL vertex buffer bound: " << buff << llendl; } llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl; } if (!mMappedIndexData) { GLint buff; glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLIndices) { llerrs << "Invalid GL index buffer bound: " << buff << llendl; } llerrs << "glMapBuffer returned NULL (no index data)" << llendl; } sMappedCount++; } return mMappedData; }
void LLVertexBuffer::unmapBuffer(S32 type) { LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER); if (!useVBOs()) { return ; //nothing to unmap } bool updated_all = false ; if (mMappedData && mVertexLocked && type != TYPE_INDEX) { updated_all = (mIndexLocked && type < 0) ; //both vertex and index buffers done updating if(sDisableVBOMapping) { stop_glerror(); glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData); stop_glerror(); } else { stop_glerror(); glUnmapBufferARB(GL_ARRAY_BUFFER_ARB); stop_glerror(); mMappedData = NULL; } mVertexLocked = FALSE ; sMappedCount--; } if(mMappedIndexData && mIndexLocked && (type < 0 || type == TYPE_INDEX)) { if(sDisableVBOMapping) { stop_glerror(); glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData); stop_glerror(); } else { stop_glerror(); glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); stop_glerror(); mMappedIndexData = NULL ; } mIndexLocked = FALSE ; sMappedCount--; } if(updated_all) { if(mUsage == GL_STATIC_DRAW_ARB) { //static draw buffers can only be mapped a single time //throw out client data (we won't be using it again) mEmpty = TRUE; mFinal = TRUE; if(sDisableVBOMapping) { freeClientBuffer() ; } } else { mEmpty = FALSE; } } }
// virtual (default) void LLVertexBuffer::setupVertexBuffer(U32 data_mask) const { LLMemType mt2(LLMemType::MTYPE_VERTEX_SETUP_VERTEX_BUFFER); stop_glerror(); U8* base = useVBOs() ? NULL : mMappedData; S32 stride = mStride; if ((data_mask & mTypeMask) != data_mask) { llerrs << "LLVertexBuffer::setupVertexBuffer missing required components for supplied data mask." << llendl; } if (data_mask & MAP_NORMAL) { glNormalPointer(GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_NORMAL])); } if (data_mask & MAP_TEXCOORD3) { glClientActiveTextureARB(GL_TEXTURE3_ARB); glTexCoordPointer(2,GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_TEXCOORD3])); glClientActiveTextureARB(GL_TEXTURE0_ARB); } if (data_mask & MAP_TEXCOORD2) { glClientActiveTextureARB(GL_TEXTURE2_ARB); glTexCoordPointer(2,GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_TEXCOORD2])); glClientActiveTextureARB(GL_TEXTURE0_ARB); } if (data_mask & MAP_TEXCOORD1) { glClientActiveTextureARB(GL_TEXTURE1_ARB); glTexCoordPointer(2,GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_TEXCOORD1])); glClientActiveTextureARB(GL_TEXTURE0_ARB); } if (data_mask & MAP_BINORMAL) { glClientActiveTextureARB(GL_TEXTURE2_ARB); glTexCoordPointer(3,GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_BINORMAL])); glClientActiveTextureARB(GL_TEXTURE0_ARB); } if (data_mask & MAP_TEXCOORD0) { glTexCoordPointer(2,GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_TEXCOORD0])); } if (data_mask & MAP_COLOR) { glColorPointer(4, GL_UNSIGNED_BYTE, stride, (void*)(base + mOffsets[TYPE_COLOR])); } if (data_mask & MAP_WEIGHT) { glVertexAttribPointerARB(1, 1, GL_FLOAT, FALSE, stride, (void*)(base + mOffsets[TYPE_WEIGHT])); } if (data_mask & MAP_CLOTHWEIGHT) { glVertexAttribPointerARB(4, 4, GL_FLOAT, TRUE, stride, (void*)(base + mOffsets[TYPE_CLOTHWEIGHT])); } if (data_mask & MAP_VERTEX) { glVertexPointer(3,GL_FLOAT, stride, (void*)(base + 0)); } llglassertok(); }
// Set for rendering void LLVertexBuffer::setBuffer(U32 data_mask, S32 type) { LLMemType mt2(LLMemType::MTYPE_VERTEX_SET_BUFFER); //set up pointers if the data mask is different ... BOOL setup = (sLastMask != data_mask); if (useVBOs()) { if (mGLBuffer && (mGLBuffer != sGLRenderBuffer || !sVBOActive)) { /*if (sMapped) { llerrs << "VBO bound while another VBO mapped!" << llendl; }*/ stop_glerror(); glBindBufferARB(GL_ARRAY_BUFFER_ARB, mGLBuffer); stop_glerror(); sBindCount++; sVBOActive = TRUE; setup = TRUE; // ... or the bound buffer changed } if (mGLIndices && (mGLIndices != sGLRenderIndices || !sIBOActive)) { /*if (sMapped) { llerrs << "VBO bound while another VBO mapped!" << llendl; }*/ stop_glerror(); glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, mGLIndices); stop_glerror(); sBindCount++; sIBOActive = TRUE; } BOOL error = FALSE; if (gDebugGL) { GLint buff; glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLBuffer) { if (gDebugSession) { error = TRUE; gFailLog << "Invalid GL vertex buffer bound: " << buff << std::endl; } else { llerrs << "Invalid GL vertex buffer bound: " << buff << llendl; } } if (mGLIndices) { glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLIndices) { if (gDebugSession) { error = TRUE; gFailLog << "Invalid GL index buffer bound: " << buff << std::endl; } else { llerrs << "Invalid GL index buffer bound: " << buff << llendl; } } } } if (mResized) { if (gDebugGL) { GLint buff; glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLBuffer) { if (gDebugSession) { error = TRUE; gFailLog << "Invalid GL vertex buffer bound: " << std::endl; } else { llerrs << "Invalid GL vertex buffer bound: " << buff << llendl; } } if (mGLIndices != 0) { glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff); if ((GLuint)buff != mGLIndices) { if (gDebugSession) { error = TRUE; gFailLog << "Invalid GL index buffer bound: "<< std::endl; } else { llerrs << "Invalid GL index buffer bound: " << buff << llendl; } } } } if (mGLBuffer) { stop_glerror(); glBufferDataARB(GL_ARRAY_BUFFER_ARB, getSize(), NULL, mUsage); stop_glerror(); } if (mGLIndices) { stop_glerror(); glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, getIndicesSize(), NULL, mUsage); stop_glerror(); } mEmpty = TRUE; mResized = FALSE; if (data_mask != 0) { if (gDebugSession) { error = TRUE; gFailLog << "Buffer set for rendering before being filled after resize." << std::endl; } else { llerrs << "Buffer set for rendering before being filled after resize." << llendl; } } } if (error) { ll_fail("LLVertexBuffer::mapBuffer failed"); } unmapBuffer(type); } else { if (mGLBuffer) { if (sVBOActive) { glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); sBindCount++; sVBOActive = FALSE; setup = TRUE; // ... or a VBO is deactivated } if (sGLRenderBuffer != mGLBuffer) { setup = TRUE; // ... or a client memory pointer changed } } if (mGLIndices && sIBOActive) { /*if (sMapped) { llerrs << "VBO unbound while potentially mapped!" << llendl; }*/ glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); sBindCount++; sIBOActive = FALSE; } } setupClientArrays(data_mask); if (mGLIndices) { sGLRenderIndices = mGLIndices; } if (mGLBuffer) { sGLRenderBuffer = mGLBuffer; if (data_mask && setup) { setupVertexBuffer(data_mask); // subclass specific setup (virtual function) sSetCount++; } } }