GLES2DefaultHardwareUniformBuffer::GLES2DefaultHardwareUniformBuffer(size_t bufferSize, HardwareBuffer::Usage usage, bool useShadowBuffer, const String& name) : HardwareUniformBuffer(0, bufferSize, usage, useShadowBuffer, name) { mData = static_cast<unsigned char*>(OGRE_MALLOC_SIMD(mSizeInBytes, MEMCATEGORY_GEOMETRY)); }
//----------------------------------------------------------------------- DefaultHardwareVertexBuffer::DefaultHardwareVertexBuffer(HardwareBufferManagerBase* mgr, size_t vertexSize, size_t numVertices, HardwareBuffer::Usage usage) : HardwareVertexBuffer(mgr, vertexSize, numVertices, usage, true, false) // always software, never shadowed { // Allocate aligned memory for better SIMD processing friendly. mData = static_cast<unsigned char*>(OGRE_MALLOC_SIMD(mSizeInBytes, MEMCATEGORY_GEOMETRY)); }
GLESDefaultHardwareVertexBuffer::GLESDefaultHardwareVertexBuffer(size_t vertexSize, size_t numVertices, HardwareBuffer::Usage usage) : HardwareVertexBuffer(0, vertexSize, numVertices, usage, true, false) { mData = static_cast<unsigned char*>(OGRE_MALLOC_SIMD(mSizeInBytes, MEMCATEGORY_GEOMETRY)); }
NULLStagingBuffer::NULLStagingBuffer( size_t internalBufferStart, size_t sizeBytes, VaoManager *vaoManager, bool uploadOnly ) : StagingBuffer( internalBufferStart, sizeBytes, vaoManager, uploadOnly ), mMappedPtr( 0 ), mNullDataPtr( 0 ) { mNullDataPtr = reinterpret_cast<uint8*>( OGRE_MALLOC_SIMD( sizeBytes, MEMCATEGORY_RENDERSYS ) ); }
ObjectMemoryManager::ObjectMemoryManager() : mTotalObjects( 0 ), mDummyNode( 0 ), mDummyObject( 0 ), mMemoryManagerType( SCENE_DYNAMIC ), mTwinMemoryManager( 0 ) { //Manually allocate the memory for the dummy scene nodes (since we can't pass ourselves //or yet another object) We only allocate what's needed to prevent access violations. /*mDummyTransformPtrs.mPosition = reinterpret_cast<ArrayVector3*>( OGRE_MALLOC_SIMD( sizeof( ArrayVector3 ), MEMCATEGORY_SCENE_OBJECTS ) ); mDummyTransformPtrs.mOrientation = reinterpret_cast<ArrayQuaternion*>( OGRE_MALLOC_SIMD( sizeof( ArrayQuaternion ), MEMCATEGORY_SCENE_OBJECTS ) ); mDummyTransformPtrs.mScale = reinterpret_cast<ArrayVector3*>( OGRE_MALLOC_SIMD( sizeof( ArrayVector3 ), MEMCATEGORY_SCENE_OBJECTS ) );*/ mDummyTransformPtrs.mDerivedPosition = reinterpret_cast<ArrayVector3*>( OGRE_MALLOC_SIMD( sizeof( ArrayVector3 ), MEMCATEGORY_SCENE_OBJECTS ) ); mDummyTransformPtrs.mDerivedOrientation= reinterpret_cast<ArrayQuaternion*>( OGRE_MALLOC_SIMD( sizeof( ArrayQuaternion ), MEMCATEGORY_SCENE_OBJECTS ) ); mDummyTransformPtrs.mDerivedScale = reinterpret_cast<ArrayVector3*>( OGRE_MALLOC_SIMD( sizeof( ArrayVector3 ), MEMCATEGORY_SCENE_OBJECTS ) ); mDummyTransformPtrs.mDerivedTransform = reinterpret_cast<Matrix4*>( OGRE_MALLOC_SIMD( sizeof( Matrix4 ) * ARRAY_PACKED_REALS, MEMCATEGORY_SCENE_OBJECTS ) ); /*mDummyTransformPtrs.mInheritOrientation= OGRE_MALLOC_SIMD( sizeof( bool ) * ARRAY_PACKED_REALS, MEMCATEGORY_SCENE_OBJECTS ); mDummyTransformPtrs.mInheritScale = OGRE_MALLOC_SIMD( sizeof( bool ) * ARRAY_PACKED_REALS, MEMCATEGORY_SCENE_OBJECTS );*/ *mDummyTransformPtrs.mDerivedPosition = ArrayVector3::ZERO; *mDummyTransformPtrs.mDerivedOrientation = ArrayQuaternion::IDENTITY; *mDummyTransformPtrs.mDerivedScale = ArrayVector3::UNIT_SCALE; for( size_t i=0; i<ARRAY_PACKED_REALS; ++i ) mDummyTransformPtrs.mDerivedTransform[i] = Matrix4::IDENTITY; mDummyNode = new SceneNode( mDummyTransformPtrs ); mDummyObject = new NullEntity(); }
void GfxBody::reinitialise (void) { APP_ASSERT(mesh->isLoaded()); destroyGraphics(); for (unsigned short i = 0; i < mesh->getNumSubMeshes(); ++i) { Ogre::SubMesh *sm = mesh->getSubMesh(i); Sub* sub = new Sub(this, sm); subList.push_back(sub); GFX_MAT_SYNC; std::string matname = apply_map(initialMaterialMap, sm->getMaterialName()); if (!gfx_material_has(matname)) { CERR << "Mesh \"/"<<mesh->getName()<<"\" references non-existing material " << "\""<<matname<<"\""<<std::endl; matname = "/system/FallbackMaterial"; } sub->material = gfx_material_get(matname); } if (!mesh->getSkeleton().isNull()) { skeleton = OGRE_NEW Ogre::SkeletonInstance(mesh->getSkeleton()); skeleton->load(); numBoneMatrixes = skeleton->getNumBones(); boneMatrixes = static_cast<Ogre::Matrix4*>(OGRE_MALLOC_SIMD(sizeof(Ogre::Matrix4) * numBoneMatrixes, Ogre::MEMCATEGORY_ANIMATION)); boneWorldMatrixes = static_cast<Ogre::Matrix4*>(OGRE_MALLOC_SIMD(sizeof(Ogre::Matrix4) * numBoneMatrixes, Ogre::MEMCATEGORY_ANIMATION)); mesh->_initAnimationState(&animationState); } else { skeleton = NULL; numBoneMatrixes = 0; boneMatrixes = NULL; boneWorldMatrixes = NULL; } updateBones(); }
//void _getBoneMatrices(Matrix4* pMatrices); void skeleton__get_bone_matrices(SkeletonHandle handle, coiMatrix4* matrices[]) { Ogre::Skeleton* skeleton = static_cast<Ogre::Skeleton*>(handle); // Borrowed from OgreEntity.cpp unsigned short num = skeleton->getNumBones(); Ogre::Matrix4* BoneMatrices = static_cast<Ogre::Matrix4*>(OGRE_MALLOC_SIMD(sizeof(Ogre::Matrix4) * num, Ogre::MEMCATEGORY_ANIMATION)); skeleton->_getBoneMatrices(BoneMatrices); for (unsigned short current = 0; current != num; ++current) { Ogre::Matrix4 o_matrix = *BoneMatrices; ogre_matrix4_to_llcoi_matrix4(o_matrix, *matrices[current]); } }
//----------------------------------------------------------------------- void InstancedEntity::createSkeletonInstance() { //Is mesh skeletally animated? if( mBatchOwner->_getMeshRef()->hasSkeleton() && !mBatchOwner->_getMeshRef()->getSkeleton().isNull() && mBatchOwner->_supportsSkeletalAnimation() ) { mSkeletonInstance = OGRE_NEW SkeletonInstance( mBatchOwner->_getMeshRef()->getSkeleton() ); mSkeletonInstance->load(); mBoneMatrices = static_cast<Matrix4*>(OGRE_MALLOC_SIMD( sizeof(Matrix4) * mSkeletonInstance->getNumBones(), MEMCATEGORY_ANIMATION)); if (mBatchOwner->useBoneWorldMatrices()) { mBoneWorldMatrices = static_cast<Matrix4*>(OGRE_MALLOC_SIMD( sizeof(Matrix4) * mSkeletonInstance->getNumBones(), MEMCATEGORY_ANIMATION)); } mAnimationState = OGRE_NEW AnimationStateSet(); mBatchOwner->_getMeshRef()->_initAnimationState( mAnimationState ); } }
NULLVaoManager::NULLVaoManager() : mDrawId( 0 ) { mConstBufferAlignment = 256; mTexBufferAlignment = 256; mConstBufferMaxSize = 64 * 1024; //64kb mTexBufferMaxSize = 128 * 1024 * 1024;//128MB mSupportsPersistentMapping = true; mSupportsIndirectBuffers = false; mDynamicBufferMultiplier = 1; VertexElement2Vec vertexElements; vertexElements.push_back( VertexElement2( VET_UINT1, VES_COUNT ) ); uint32 *drawIdPtr = static_cast<uint32*>( OGRE_MALLOC_SIMD( 4096 * sizeof(uint32), MEMCATEGORY_GEOMETRY ) ); for( uint32 i=0; i<4096; ++i ) drawIdPtr[i] = i; mDrawId = createVertexBuffer( vertexElements, 4096, BT_IMMUTABLE, drawIdPtr, true ); }
//----------------------------------------------------------------------- GLDefaultHardwareVertexBuffer::GLDefaultHardwareVertexBuffer(HardwareBufferManagerBase* mgr, size_t vertexSize, size_t numVertices, HardwareBuffer::Usage usage) : HardwareVertexBuffer(mgr, vertexSize, numVertices, usage, true, false) // always software, never shadowed { mData = static_cast<unsigned char*>(OGRE_MALLOC_SIMD(mSizeInBytes, MEMCATEGORY_GEOMETRY)); }
//----------------------------------------------------------------------- DefaultHardwareCounterBuffer::DefaultHardwareCounterBuffer(HardwareBufferManagerBase* mgr, size_t sizeBytes, HardwareBuffer::Usage usage, bool useShadowBuffer, const String& name) : HardwareCounterBuffer(mgr, sizeBytes, usage, useShadowBuffer, name) { // Allocate aligned memory for better SIMD processing friendly. mData = static_cast<unsigned char*>(OGRE_MALLOC_SIMD(mSizeInBytes, MEMCATEGORY_GEOMETRY)); }
GL3PlusDefaultHardwareCounterBuffer::GL3PlusDefaultHardwareCounterBuffer(HardwareBufferManagerBase* mgr, const String& name) : HardwareCounterBuffer(mgr, sizeof(GLuint), HardwareBuffer::HBU_DYNAMIC, false, name) { mData = static_cast<unsigned char*>(OGRE_MALLOC_SIMD(mSizeInBytes, MEMCATEGORY_GEOMETRY)); }
//----------------------------------------------------------------------------------- void WireAabb::createBuffers(void) { const float c_vertexData[8*3] = { -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1 }; //Create the indices. const Ogre::uint16 c_indexData[2 * 4 * 3] = { 0, 1, 1, 2, 2, 3, 3, 0, //Front 4, 5, 5, 6, 6, 7, 7, 4, //Back 0, 4, 1, 5, 2, 6, 3, 7 }; Ogre::uint16 *cubeIndices = reinterpret_cast<Ogre::uint16*>( OGRE_MALLOC_SIMD( sizeof(Ogre::uint16) * 2 * 4 * 3, Ogre::MEMCATEGORY_GEOMETRY ) ); memcpy( cubeIndices, c_indexData, sizeof( c_indexData ) ); VaoManager *vaoManager = mManager->getDestinationRenderSystem()->getVaoManager(); Ogre::IndexBufferPacked *indexBuffer = 0; try { indexBuffer = vaoManager->createIndexBuffer( Ogre::IndexBufferPacked::IT_16BIT, 2 * 4 * 3, Ogre::BT_IMMUTABLE, cubeIndices, true ); } catch( Ogre::Exception &e ) { // When keepAsShadow = true, the memory will be freed when the index buffer is destroyed. // However if for some weird reason there is an exception raised, the memory will // not be freed, so it is up to us to do so. // The reasons for exceptions are very rare. But we're doing this for correctness. OGRE_FREE_SIMD( indexBuffer, Ogre::MEMCATEGORY_GEOMETRY ); indexBuffer = 0; throw e; } //Create the vertex buffer //Vertex declaration VertexElement2Vec vertexElements; vertexElements.push_back( VertexElement2( VET_FLOAT3, VES_POSITION ) ); //For immutable buffers, it is mandatory that cubeVertices is not a null pointer. float *cubeVertices = reinterpret_cast<float*>( OGRE_MALLOC_SIMD( sizeof(float) * 8 * 3, Ogre::MEMCATEGORY_GEOMETRY ) ); //Fill the data. memcpy( cubeVertices, c_vertexData, sizeof(float) * 8 * 3 ); Ogre::VertexBufferPacked *vertexBuffer = 0; try { //Create the actual vertex buffer. vertexBuffer = vaoManager->createVertexBuffer( vertexElements, 8, BT_IMMUTABLE, cubeVertices, true ); } catch( Ogre::Exception &e ) { OGRE_FREE_SIMD( vertexBuffer, Ogre::MEMCATEGORY_GEOMETRY ); vertexBuffer = 0; throw e; } //Now the Vao. We'll just use one vertex buffer source VertexBufferPackedVec vertexBuffers; vertexBuffers.push_back( vertexBuffer ); Ogre::VertexArrayObject *vao = vaoManager->createVertexArrayObject( vertexBuffers, indexBuffer, OT_LINE_LIST ); mVaoPerLod[0].push_back( vao ); mVaoPerLod[1].push_back( vao ); }