Beispiel #1
0
void Outlines::GetFaceNormals(ID3DXMesh* mesh,
			ID3DXBuffer* adj,
			D3DXVECTOR3* currentfacenormal,
			D3DXVECTOR3 adjfacenormals[3],
			DWORD faceindex)
{
	MeshVertex* v = 0;
	mesh->LockVertexBuffer(0, (void**)&v);

	WORD* in = 0;
	mesh->LockIndexBuffer(0, (void**)&in);

	DWORD* a = (DWORD*)adj->GetBufferPointer();

	//
	// Get the face normal.
	GetFaceNormal(mesh, faceindex, currentfacenormal);

	//
	// Get adjacent face indices
	WORD faceindexA = WORD(a[faceindex * 3]);
	WORD faceindexB = WORD(a[faceindex * 3 + 1]);
	WORD faceindexC = WORD(a[faceindex * 3 + 2]);

	//
	// Get adjacent face normals, if there is no adjacent face,
	// then set the adjacent face normal to the opposite of the
	// "currentFaceNormal".  Recall we do this because edges that
	// don't have an adjacent triangle are automatically considered
	// silhouette edges.  And in order to make that happen, we need
	// the current face normal and adjacent face normal to point
	// in the opposite direction.  Also, recall that an entry
	// in the adjacency buffer equal to -1 denotes that the edge
	// doesn't have an adjacent triangle.

	D3DXVECTOR3 facenormalA, facenormalB, facenormalC;
    
	if( faceindexA != USHRT_MAX ) // is there an adjacent triangle?
	{
		WORD iA = in[faceindexA * 3];
		WORD iB = in[faceindexA * 3 + 1];
		WORD iC = in[faceindexA * 3 + 2];

		D3DXVECTOR3 vA = v[iA].pos;
		D3DXVECTOR3 vB = v[iB].pos;
		D3DXVECTOR3 vC = v[iC].pos;

		D3DXVECTOR3 edgeA = vB - vA;
		D3DXVECTOR3 edgeB = vC - vA;
		D3DXVec3Cross(&facenormalA, &edgeA, &edgeB);
		D3DXVec3Normalize(&facenormalA, &facenormalA);
	}
	else
	{
		facenormalA = -(*currentfacenormal);
	}

	if( faceindexB != USHRT_MAX ) // is there an adjacent triangle?
	{
		WORD iA = in[faceindexB * 3];
		WORD iB = in[faceindexB * 3 + 1];
		WORD iC = in[faceindexB * 3 + 2];

		D3DXVECTOR3 vA = v[iA].pos;
		D3DXVECTOR3 vB = v[iB].pos;
		D3DXVECTOR3 vC = v[iC].pos;

		D3DXVECTOR3 edgeA = vB - vA;
		D3DXVECTOR3 edgeB = vC - vA;
		D3DXVec3Cross(&facenormalB, &edgeA, &edgeB);
		D3DXVec3Normalize(&facenormalB, &facenormalB);
	}
	else
	{
		facenormalB = -(*currentfacenormal);
	}

	if( faceindexC != USHRT_MAX ) // is there an adjacent triangle?
	{
		WORD iA = in[faceindexC * 3];
		WORD iB = in[faceindexC * 3 + 1];
		WORD iC = in[faceindexC * 3 + 2];

		D3DXVECTOR3 vA = v[iA].pos;
		D3DXVECTOR3 vB = v[iB].pos;
		D3DXVECTOR3 vC = v[iC].pos;

		D3DXVECTOR3 edgeA = vB - vA;
		D3DXVECTOR3 edgeB = vC - vA;
		D3DXVec3Cross(&facenormalC, &edgeA, &edgeB);
		D3DXVec3Normalize(&facenormalC, &facenormalC);
	}
	else
	{
		facenormalC = -(*currentfacenormal);
	}

	// save adjacent face normals
	adjfacenormals[0] = facenormalA;
	adjfacenormals[1] = facenormalB;
	adjfacenormals[2] = facenormalC;

	mesh->UnlockVertexBuffer();
	mesh->UnlockIndexBuffer();
}
//*****************************************************************************
// Screen-space metric.  The trick is getting a good delta.
IFXAdaptiveMetric::Action IFXScreenSpaceMetric::ScreenSpace(IFXTQTTriangle *pTriangle,
															IFXTQTVertex **ppVertex)
{

	//---------------------------------------------------------
	// Find face normal for screen space and back-facing stages
	IFXVector3 faceNormal;
	if (!GetFaceNormal(pTriangle,ppVertex,faceNormal))
		return IFXAdaptiveMetric::Sustain;
	//---------------------------------------------------------

	//=========================================================
	// Second Metric Stage: consolidate back-facing triangles

	// Note that a sustain is needed for triangles that are
	// nearly visible to prevent flicker.  Especially important
	// because these will be silhouette triangles.
	// Note2 :  actually, with access to the parent triangle,
	// the sustain isn't as necessary.

	F32 backface = -m_zdir.DotProduct(faceNormal);
	IFXASSERT( (backface > -1.001) && (backface < 1.001));
	IFXAdaptiveMetric::Action returnvalue = IFXAdaptiveMetric::Subdivide;

	if (backface > SLIGHT_BACKFACE) {
		if (backface > VERY_BACKFACE) {
			IFXTQTTriangle *pParent = pTriangle->GetParentTriangle();
			if (pParent) {
				// geometric error doesn't work for a truly back-facing triangle.
				// so, only consolidate this back-facing triangle if the parent was also
				// back-facing, otherwise we will thrash.
				pParent->GetVertices (&ppVertex[IFXTQTAddress::Left],
					&ppVertex[IFXTQTAddress::Base],
					&ppVertex[IFXTQTAddress::Right]);
				if (!GetFaceNormal(pParent,ppVertex,faceNormal))
					return IFXAdaptiveMetric::Sustain;

				backface = -m_zdir.DotProduct(faceNormal);
				// if the parent was back-facing too, then consolidate it.
				if (backface > SLIGHT_BACKFACE)
					return IFXAdaptiveMetric::Consolidate;
				// fail the above check and fall through to "sustain" below.
			} // end if parent
		} // end if very backfacing triangle
		// Instead of returning sustain here (thereby sustaining every slightly backfacing triangle)
		// allow the geometric error evaluation an opportunity to consolidate it.
		returnvalue = IFXAdaptiveMetric::Sustain;
	}
	//---------------------------------------------------------

	//=========================================================
	// Third Metric Stage:  evaluate Hoppe's metric.

	//---------------------------------------------------------
	// calculate "delta," the deviation of this triangle from its potential
	F32 delsquared = pTriangle->GetErrorMeasure();

	// see if the triangle already has a defined error term.
	// the error can't be negative, so if it is it must not be initialized
	if (delsquared < 0) {
		delsquared = 0;

		/// @todo: these should already be normalized?
		ppVertex[IFXTQTAddress::Left]->m_normal.Normalize();
		ppVertex[IFXTQTAddress::Right]->m_normal.Normalize();
		ppVertex[IFXTQTAddress::Base]->m_normal.Normalize();

		IFXVector3 center;
		FindTriangleCenter(center,ppVertex);

		IFXVector3 crnrvect;
		F32 hypotenuse, cosangle;

		// use the difference of the vertex normals from the face normal,
		// and the distance of the vertex from the center, to predict error
		for (I32 i = 0; i < 3; i++) {
			IFXTQTVertex *pVertex = ppVertex[IFXTQTAddress::Direction(i)];

			cosangle = faceNormal.DotProduct(pVertex->m_normal);
			if (cosangle > 0) {
				crnrvect.Subtract(pVertex->m_position,center);
				hypotenuse = crnrvect.CalcMagnitude() / cosangle;
				crnrvect.CrossProduct(faceNormal,pVertex->m_normal);
				delsquared += hypotenuse * crnrvect.CalcMagnitude();
			}
		}

		// normalize the cumulative error
		delsquared *= 0.33333333333f;

		// remember this error term for future evaluation
		pTriangle->SetErrorMeasure(delsquared);
	}
	//---------------------------------------------------------

	//===================================================================
	// This is the heart of the metric: the geometric error calculation
	F32 leftmetric = 0, rightmetric = 0;
	EvaluateGeometricError(pTriangle,ppVertex,faceNormal,delsquared,&leftmetric,&rightmetric);
	//===================================================================

	//---------------------------------------------------------
	// Now, make thresholds for subdivide, sustain, or consolidate

	// if the left side exceeds the right, the triangle has
	// too much "geometric error" and should be subdivided.
	if (leftmetric >= rightmetric)

		// note that if the triangle failed the "slightly backfacing" test
		// above, then the best we can do is sustain.
		return returnvalue;

	// Sustain threshold.
	// as this parameter -> 1, metric becomes unstable
	// however, if it too small, too many subdivisions are preserved.
	else if (leftmetric >= (SCREENSPACE_THRESHOLD * rightmetric))
		return IFXAdaptiveMetric::Sustain;

	else { // the triangle is a consolidation candidate.
		// Only consolidate if the parent does not need to be subdivided,
		// so evaluate the metric with the parent triangle.
		IFXTQTTriangle *pParent = pTriangle->GetParentTriangle();
		if (pParent) {
			delsquared = pParent->GetErrorMeasure();

			pParent->GetVertices (&ppVertex[IFXTQTAddress::Left],
				&ppVertex[IFXTQTAddress::Base], &ppVertex[IFXTQTAddress::Right]);
			if (!GetFaceNormal(pParent,ppVertex,faceNormal))
				return IFXAdaptiveMetric::Sustain;

			EvaluateGeometricError(pParent,ppVertex,faceNormal,delsquared,&leftmetric,&rightmetric);

			if (leftmetric < rightmetric) {
				return IFXAdaptiveMetric::Consolidate;
			}
		}
		return IFXAdaptiveMetric::Sustain;
	}
}
Beispiel #3
0
void SilhouetteEdges::GetFaceNormals(ID3DXMesh*   mesh,
								     ID3DXBuffer* adjBuffer,
									 D3DXVECTOR3* currentFaceNormal,
									 D3DXVECTOR3  adjFaceNormals[3],
									 DWORD        faceIndex)

{
	MeshVertex* vertices = 0;
	mesh->LockVertexBuffer(0, (void**)&vertices);

	WORD* indices = 0;
	mesh->LockIndexBuffer(0, (void**)&indices);

	DWORD* adj = (DWORD*)adjBuffer->GetBufferPointer();

	//
	// Get the face normal.
	GetFaceNormal(mesh, faceIndex, currentFaceNormal);

	//
	// Get adjacent face indices
	WORD faceIndex0 = (WORD)adj[faceIndex * 3];
	WORD faceIndex1 = (WORD)adj[faceIndex * 3 + 1];
	WORD faceIndex2 = (WORD)adj[faceIndex * 3 + 2];

	//
	// Get adjacent face normals, if there is no adjacent face,
	// then set the adjacent face normal to the opposite of the
	// "currentFaceNormal".  Recall we do this because edges that
	// don't have an adjacent triangle are automatically considered
	// silhouette edges.  And in order to make that happen, we need
	// the current face normal and adjacent face normal to point
	// in the opposite direction.  Also, recall that an entry
	// in the adjacency buffer equal to -1 denotes that the edge
	// doesn't have an adjacent triangle.

	D3DXVECTOR3 faceNormal0, faceNormal1, faceNormal2;
    
	if( faceIndex0 != USHRT_MAX ) // is there an adjacent triangle?
	{
		WORD i0 = indices[faceIndex0 * 3];
		WORD i1 = indices[faceIndex0 * 3 + 1];
		WORD i2 = indices[faceIndex0 * 3 + 2];

		D3DXVECTOR3 v0 = vertices[i0].position;
		D3DXVECTOR3 v1 = vertices[i1].position;
		D3DXVECTOR3 v2 = vertices[i2].position;

		D3DXVECTOR3 edge0 = v1 - v0;
		D3DXVECTOR3 edge1 = v2 - v0;
		D3DXVec3Cross(&faceNormal0, &edge0, &edge1);
		D3DXVec3Normalize(&faceNormal0, &faceNormal0);
	}
	else
	{
		faceNormal0 = -(*currentFaceNormal);
	}

	if( faceIndex1 != USHRT_MAX ) // is there an adjacent triangle?
	{
		WORD i0 = indices[faceIndex1 * 3];
		WORD i1 = indices[faceIndex1 * 3 + 1];
		WORD i2 = indices[faceIndex1 * 3 + 2];

		D3DXVECTOR3 v0 = vertices[i0].position;
		D3DXVECTOR3 v1 = vertices[i1].position;
		D3DXVECTOR3 v2 = vertices[i2].position;

		D3DXVECTOR3 edge0 = v1 - v0;
		D3DXVECTOR3 edge1 = v2 - v0;
		D3DXVec3Cross(&faceNormal1, &edge0, &edge1);
		D3DXVec3Normalize(&faceNormal1, &faceNormal1);
	}
	else
	{
		faceNormal1 = -(*currentFaceNormal);
	}

	if( faceIndex2 != USHRT_MAX ) // is there an adjacent triangle?
	{
		WORD i0 = indices[faceIndex2 * 3];
		WORD i1 = indices[faceIndex2 * 3 + 1];
		WORD i2 = indices[faceIndex2 * 3 + 2];

		D3DXVECTOR3 v0 = vertices[i0].position;
		D3DXVECTOR3 v1 = vertices[i1].position;
		D3DXVECTOR3 v2 = vertices[i2].position;

		D3DXVECTOR3 edge0 = v1 - v0;
		D3DXVECTOR3 edge1 = v2 - v0;
		D3DXVec3Cross(&faceNormal2, &edge0, &edge1);
		D3DXVec3Normalize(&faceNormal2, &faceNormal2);
	}
	else
	{
		faceNormal2 = -(*currentFaceNormal);
	}

	// save adjacent face normals
	adjFaceNormals[0] = faceNormal0;
	adjFaceNormals[1] = faceNormal1;
	adjFaceNormals[2] = faceNormal2;

	mesh->UnlockVertexBuffer();
	mesh->UnlockIndexBuffer();
}
Beispiel #4
0
void _stdcall COBJModel::RenderToDisplayList(const Face *pFaces, 
									const unsigned int iFaceCount,
									const Material *pMaterials)
{
	////////////////////////////////////////////////////////////////////////
	// Render a list of faces into a display list
	////////////////////////////////////////////////////////////////////////

	int i, j;
	float fNormal[3];
	int iPreviousMaterial = -1;
	
	// Generate & save display list index
	

	// Render model into the display list
	glNewList(m_iDisplayList, GL_COMPILE);

		// Save texture bit to recover from the various texture state changes
		glPushAttrib(GL_TEXTURE_BIT);

			// Activate automatic texture coord generation if no coords loaded
			if (!pFaces[0].pTexCoords)
				GenTexCoords();
		
			// Use default material if no materials are loaded
			if (!pMaterials)
				UseMaterial(NULL);

			// Process all faces
			for (i=0; i<(int) iFaceCount; i++)
			{
				// Any materials loaded ?
				
				if (pMaterials)
					
					// Set material (if it differs from the previous one)
					if (iPreviousMaterial != (int) pFaces[i].iMaterialIndex)
					{
						iPreviousMaterial = pFaces[i].iMaterialIndex;
						UseMaterial(&pMaterials[pFaces[i].iMaterialIndex]);
					}

				// Set primitive of the current face
				switch (pFaces[i].iNumVertices)
				{
					case 3:
						glBegin(GL_TRIANGLES);
						break;
					case 4:
						glBegin(GL_QUADS);
						break;
					default:
						glBegin(GL_POLYGON);
				}
							
				// Calculate and set face normal if no vertex normals are specified
				if (!pFaces[i].pNormals)
				{
					GetFaceNormal(fNormal, &pFaces[i]);
					glNormal3fv(fNormal);
				}

				// Process all vertices
				for (j=0; j<(int) pFaces[i].iNumVertices; j++)
				{
					// Set vertex normal (if vertex normals are specified)
					if (pFaces[i].pNormals)
						glNormal3f(pFaces[i].pNormals[j].fX, 
							pFaces[i].pNormals[j].fY, pFaces[i].pNormals[j].fZ);

					// Set texture coordinates (if any specified)
					if (pFaces[i].pTexCoords)
						glTexCoord2f(pFaces[i].pTexCoords[j].fX, 
							pFaces[i].pTexCoords[j].fY);

					// Set vertex
					glVertex3f(pFaces[i].pVertices[j].fX, 
						pFaces[i].pVertices[j].fY, pFaces[i].pVertices[j].fZ);
				}

				glEnd();	
			}

		glPopAttrib();

	glEndList();
}
Beispiel #5
0
//! creates/loads an animated mesh from the file.
//! \return Pointer to the created mesh. Returns 0 if loading failed.
//! If you no longer need the mesh, you should call IAnimatedMesh::drop().
//! See IReferenceCounted::drop() for more information.
IAnimatedMesh* COCTLoader::createMesh(io::IReadFile* file) 
{
	if (!file)
		return 0;

	octHeader header;
	file->read(&header, sizeof(octHeader));

	octVert * verts = new octVert[header.numVerts];
	octFace * faces = new octFace[header.numFaces];
	octTexture * textures = new octTexture[header.numTextures];
	octLightmap * lightmaps = new octLightmap[header.numLightmaps];
	octLight * lights = new octLight[header.numLights];

	file->read(verts, sizeof(octVert) * header.numVerts);
	file->read(faces, sizeof(octFace) * header.numFaces);
	//TODO: Make sure id is in the legal range for Textures and Lightmaps

	u32 i;
	for (i = 0; i < header.numTextures; i++) {
		octTexture t;
		file->read(&t, sizeof(octTexture));
		textures[t.id] = t;
	}	
	for (i = 0; i < header.numLightmaps; i++) {
		octLightmap t;
		file->read(&t, sizeof(octLightmap));
		lightmaps[t.id] = t;
	}
	file->read(lights, sizeof(octLight) * header.numLights);

	//TODO: Now read in my extended OCT header (flexible lightmaps and vertex normals)
	

	// This is the method Nikolaus Gebhardt used in the Q3 loader -- create a
	// meshbuffer for every possible combination of lightmap and texture including
	// a "null" texture and "null" lightmap.  Ones that end up with nothing in them
	// will be removed later.

	SMesh * Mesh = new SMesh();
	for (i=0; i<(header.numTextures+1) * (header.numLightmaps+1); ++i)
	{
		scene::SMeshBufferLightMap* buffer = new scene::SMeshBufferLightMap();

		buffer->Material.MaterialType = video::EMT_LIGHTMAP;
		buffer->Material.Lighting = false;
		Mesh->addMeshBuffer(buffer);
		buffer->drop();
	}

	
	// Build the mesh buffers
	for (i = 0; i < header.numFaces; i++)
	{
		if (faces[i].numVerts < 3)
			continue;

		const core::vector3df normal =
			GetFaceNormal(verts[faces[i].firstVert].pos,
					verts[faces[i].firstVert+1].pos,
					verts[faces[i].firstVert+2].pos);

		const u32 textureID = core::min_(s32(faces[i].textureID), s32(header.numTextures - 1)) + 1;
		const u32 lightmapID = core::min_(s32(faces[i].lightmapID),s32(header.numLightmaps - 1)) + 1;
		SMeshBufferLightMap * meshBuffer = (SMeshBufferLightMap*)Mesh->getMeshBuffer(lightmapID * (header.numTextures + 1) + textureID);
		const u32 base = meshBuffer->Vertices.size();
		
		// Add this face's verts
		u32 v;
		for (v = 0; v < faces[i].numVerts; ++v)
		{
			octVert * vv = &verts[faces[i].firstVert + v];
			video::S3DVertex2TCoords vert;
			vert.Pos.set(vv->pos[0], vv->pos[1], vv->pos[2]);
			vert.Color = video::SColor(0,255,255,255);
			vert.Normal.set(normal);

			if (textureID == 0)
			{
				// No texture -- just a lightmap.  Thus, use lightmap coords for texture 1.
				// (the actual texture will be swapped later)
				vert.TCoords.set(vv->lc[0], vv->lc[1]);
			}
			else
			{
				vert.TCoords.set(vv->tc[0], vv->tc[1]);
				vert.TCoords2.set(vv->lc[0], vv->lc[1]);
			}

			meshBuffer->Vertices.push_back(vert);
		}

		// Now add the indices
		// This weird loop turns convex polygons into triangle strips.
		// I do it this way instead of a simple fan because it usually looks a lot better in wireframe, for example.
		u32 h = faces[i].numVerts - 1, l = 0, c; // High, Low, Center
		for (v = 0; v < faces[i].numVerts - 2; ++v)
		{
			if (v & 1)
				c = h - 1;
			else
				c = l + 1;

			meshBuffer->Indices.push_back(base + h);
			meshBuffer->Indices.push_back(base + l);
			meshBuffer->Indices.push_back(base + c);

			if (v & 1)
				--h;
			else
				++l;
		}
	} 


	// load textures
	core::array<video::ITexture*> tex;
	tex.set_used(header.numTextures + 1);
	tex[0] = 0;
	
	for (i = 1; i < (header.numTextures + 1); i++)
	{
		tex[i] = Driver->getTexture(textures[i-1].fileName);
	}


	// prepare lightmaps
	core::array<video::ITexture*> lig;
	lig.set_used(header.numLightmaps + 1);

	u32 lightmapWidth = 128, lightmapHeight = 128;
	lig[0] = 0;
	core::dimension2d<s32> lmapsize(lightmapWidth, lightmapHeight);

	bool oldMipMapState = Driver->getTextureCreationFlag(video::ETCF_CREATE_MIP_MAPS);
	Driver->setTextureCreationFlag(video::ETCF_CREATE_MIP_MAPS, false);

	for (i = 1; i < (header.numLightmaps + 1); i++)
	{
		core::stringc lightmapname = file->getFileName();
		lightmapname += ".lightmap.";
		lightmapname += (int)i;
		lig[i] = Driver->addTexture(lmapsize, lightmapname.c_str());

		if (lig[i]->getSize() != lmapsize)
			os::Printer::log("OCTLoader: Created lightmap is not of the requested size", ELL_ERROR);

		if (lig[i])
		{
			void* pp = lig[i]->lock();

			if (pp)
			{
				video::ECOLOR_FORMAT format = lig[i]->getColorFormat();
				if (format == video::ECF_A1R5G5B5)
				{
					s16* p = (s16*)pp;

					octLightmap * lm;					
					lm = &lightmaps[i-1];

					for (u32 x=0; x<lightmapWidth; ++x)
						for (u32 y=0; y<lightmapHeight; ++y)
						{
							p[x*128 + y] = video::RGB16(
								lm->data[x][y][2],
								lm->data[x][y][1],
								lm->data[x][y][0]);
						}
				}
				else
				if (format == video::ECF_A8R8G8B8)
				{
					s32* p = (s32*)pp;

					octLightmap* lm;
					lm = &lightmaps[i-1];

					for (u32 x=0; x<lightmapWidth; ++x)
						for (u32 y=0; y<lightmapHeight; ++y)
						{
							p[x*128 + y] = video::SColor(255,
								lm->data[x][y][2],
								lm->data[x][y][1],
								lm->data[x][y][0]).color;
						}
				}
				else
					os::Printer::log(
						"OCTLoader: Could not create lightmap, unsupported texture format.", ELL_ERROR);
			}

			lig[i]->unlock();
		}
		else
			os::Printer::log("OCTLoader: Could not create lightmap, driver created no texture.", ELL_ERROR);
	}
	Driver->setTextureCreationFlag(video::ETCF_CREATE_MIP_MAPS, oldMipMapState);


	// Free stuff
	delete [] verts;
	delete [] faces;
	delete [] textures;
	delete [] lightmaps;
	delete [] lights;


	// attach materials
	for (i = 0; i < header.numLightmaps + 1; i++)
	{
		for (u32 j = 0; j < header.numTextures + 1; j++)
		{
			u32 mb = i * (header.numTextures + 1) + j;
			SMeshBufferLightMap * meshBuffer = (SMeshBufferLightMap*)Mesh->getMeshBuffer(mb);
			meshBuffer->Material.setTexture(0, tex[j]);
			meshBuffer->Material.setTexture(1, lig[i]);

			if (meshBuffer->Material.getTexture(0) == 0)
			{
				// This material has no texture, so we'll just show the lightmap if there is one.
				// We swapped the texture coordinates earlier.
				meshBuffer->Material.setTexture(0, meshBuffer->Material.getTexture(1));
				meshBuffer->Material.setTexture(1, 0);
			}
			if (meshBuffer->Material.getTexture(1) == 0)
			{
				// If there is only one texture, it should be solid and lit.
				// Among other things, this way you can preview OCT lights.
				meshBuffer->Material.MaterialType = video::EMT_SOLID;
				meshBuffer->Material.Lighting = true;
			}
		}
	}


	// delete all buffers without geometry in it.
	i = 0;
	while(i < Mesh->MeshBuffers.size())
	{
		if (Mesh->MeshBuffers[i]->getVertexCount() == 0 ||
			Mesh->MeshBuffers[i]->getIndexCount() == 0 ||
			Mesh->MeshBuffers[i]->getMaterial().getTexture(0) == 0)
		{
			// Meshbuffer is empty -- drop it
			Mesh->MeshBuffers[i]->drop();
			Mesh->MeshBuffers.erase(i);		
		}
		else
		{
			++i;
		}
	}


	// create bounding box
	for (i = 0; i < Mesh->MeshBuffers.size(); ++i)
	{
		Mesh->MeshBuffers[i]->recalculateBoundingBox();
	}
	Mesh->recalculateBoundingBox();


	// Set up an animated mesh to hold the mesh
	SAnimatedMesh* AMesh = new SAnimatedMesh();
	AMesh->Type = EAMT_OCT;
	AMesh->addMesh(Mesh);
	AMesh->recalculateBoundingBox();
	Mesh->drop();

	return AMesh;
}