//---------------------------------------------------------------------
    void GLSLProgramManagerCommon::extractUniforms(GLuint programObject, 
        const GpuConstantDefinitionMap* vertexConstantDefs,
        const GpuConstantDefinitionMap* geometryConstantDefs,
        const GpuConstantDefinitionMap* fragmentConstantDefs,
        const GpuConstantDefinitionMap* hullConstantDefs,
        const GpuConstantDefinitionMap* domainConstantDefs,
        const GpuConstantDefinitionMap* computeConstantDefs,
        GLUniformReferenceList& list, GLUniformBufferList& sharedList)
    {
        // Scan through the active uniforms and add them to the reference list
        GLint uniformCount = 0;
#define uniformLength 200
        //      GLint uniformLength = 0;
        //        glGetProgramiv(programObject, GL_ACTIVE_UNIFORM_MAX_LENGTH, &uniformLength);

        char uniformName[uniformLength];
        GLUniformReference newGLUniformReference;

        // Get the number of active uniforms
        OGRE_CHECK_GL_ERROR(glGetProgramiv(programObject, GL_ACTIVE_UNIFORMS, &uniformCount));

        // Loop over each of the active uniforms, and add them to the reference container
        // only do this for user defined uniforms, ignore built in gl state uniforms
        for (int index = 0; index < uniformCount; index++)
        {
            GLint arraySize;
            GLenum glType;
            OGRE_CHECK_GL_ERROR(glGetActiveUniform(programObject, index, uniformLength, NULL,
                                                   &arraySize, &glType, uniformName));

            // Don't add built in uniforms
            OGRE_CHECK_GL_ERROR(newGLUniformReference.mLocation = glGetUniformLocation(programObject, uniformName));
            if (newGLUniformReference.mLocation >= 0)
            {
                // User defined uniform found, add it to the reference list
                String paramName = String( uniformName );

                // Current ATI drivers (Catalyst 7.2 and earlier) and older NVidia drivers will include all array elements as uniforms but we only want the root array name and location
                // Also note that ATI Catalyst 6.8 to 7.2 there is a bug with glUniform that does not allow you to update a uniform array past the first uniform array element
                // ie you can't start updating an array starting at element 1, must always be element 0.

                // If the uniform name has a "[" in it then its an array element uniform.
                String::size_type arrayStart = paramName.find("[");
                if (arrayStart != String::npos)
                {
                    // if not the first array element then skip it and continue to the next uniform
                    if (paramName.compare(arrayStart, paramName.size() - 1, "[0]") != 0) continue;
                    paramName = paramName.substr(0, arrayStart);
                }

                // Find out which params object this comes from
                bool foundSource = completeParamSource(paramName,
                                                       vertexConstantDefs, geometryConstantDefs,
                                                       fragmentConstantDefs, hullConstantDefs,
                                                       domainConstantDefs, computeConstantDefs, newGLUniformReference);

                // Only add this parameter if we found the source
                if (foundSource)
                {
                    assert(size_t (arraySize) == newGLUniformReference.mConstantDef->arraySize
                           && "GL doesn't agree with our array size!");
                    list.push_back(newGLUniformReference);
                }

                // Don't bother adding individual array params, they will be
                // picked up in the 'parent' parameter can copied all at once
                // anyway, individual indexes are only needed for lookup from
                // user params
            } // end if
        } // end for

        // Now deal with uniform blocks

        GLint blockCount = 0;

        OGRE_CHECK_GL_ERROR(glGetProgramiv(programObject, GL_ACTIVE_UNIFORM_BLOCKS, &blockCount));

        for (int index = 0; index < blockCount; index++)
        {
            OGRE_CHECK_GL_ERROR(glGetActiveUniformBlockName(programObject, index, uniformLength, NULL, uniformName));

            GpuSharedParametersPtr blockSharedParams = GpuProgramManager::getSingleton().getSharedParameters(uniformName);

            GLint blockSize, blockBinding;
            OGRE_CHECK_GL_ERROR(glGetActiveUniformBlockiv(programObject, index, GL_UNIFORM_BLOCK_DATA_SIZE, &blockSize));
            OGRE_CHECK_GL_ERROR(glGetActiveUniformBlockiv(programObject, index, GL_UNIFORM_BLOCK_BINDING, &blockBinding));
            HardwareUniformBufferSharedPtr newUniformBuffer = HardwareBufferManager::getSingleton().createUniformBuffer(blockSize, HardwareBuffer::HBU_DYNAMIC_WRITE_ONLY_DISCARDABLE, false, uniformName);

            GL3PlusHardwareUniformBuffer* hwGlBuffer = static_cast<GL3PlusHardwareUniformBuffer*>(newUniformBuffer.get());
            hwGlBuffer->setGLBufferBinding(blockBinding);
            sharedList.push_back(newUniformBuffer);
        }
    }
Example #2
0
bool	operator>=(const String& str1, const String& str2)
{
	return (str1.compare(str2) >= 0);
}
Example #3
0
bool	operator>=(const utf8* utf8_str, const String& str)
{
	return (str.compare(utf8_str) <= 0);
}
static S32 buildFileList(const char* pattern, bool recurse, bool multiMatch)
{
   static const String sSlash( "/" );

   sgFindFilesResults.clear();

   String sPattern(Torque::Path::CleanSeparators(pattern));
   if(sPattern.isEmpty())
   {
      Con::errorf("findFirstFile() requires a search pattern");
      return -1;
   }

   if(!Con::expandScriptFilename(sgScriptFilenameBuffer, sizeof(sgScriptFilenameBuffer), sPattern.c_str()))
   {
      Con::errorf("findFirstFile() given initial directory cannot be expanded: '%s'", pattern);
      return -1;
   }
   sPattern = String::ToString(sgScriptFilenameBuffer);

   String::SizeType slashPos = sPattern.find('/', 0, String::Right);
//    if(slashPos == String::NPos)
//    {
//       Con::errorf("findFirstFile() missing search directory or expression: '%s'", sPattern.c_str());
//       return -1;
//    }

   // Build the initial search path
   Torque::Path givenPath(Torque::Path::CompressPath(sPattern));
   givenPath.setFileName("*");
   givenPath.setExtension("*");

   if(givenPath.getPath().length() > 0 && givenPath.getPath().find('*', 0, String::Right) == givenPath.getPath().length()-1)
   {
      // Deal with legacy searches of the form '*/*.*'
      String suspectPath = givenPath.getPath();
      String::SizeType newLen = suspectPath.length()-1;
      if(newLen > 0 && suspectPath.find('/', 0, String::Right) == suspectPath.length()-2)
      {
         --newLen;
      }
      givenPath.setPath(suspectPath.substr(0, newLen));
   }

   Torque::FS::FileSystemRef fs = Torque::FS::GetFileSystem(givenPath);
   //Torque::Path path = fs->mapTo(givenPath);
   Torque::Path path = givenPath;
   
   // Make sure that we have a root so the correct file system can be determined when using zips
   if(givenPath.isRelative())
      path = Torque::Path::Join(Torque::FS::GetCwd(), '/', givenPath);
   
   path.setFileName(String::EmptyString);
   path.setExtension(String::EmptyString);
   if(!Torque::FS::IsDirectory(path))
   {
      Con::errorf("findFirstFile() invalid initial search directory: '%s'", path.getFullPath().c_str());
      return -1;
   }

   // Build the search expression
   const String expression(slashPos != String::NPos ? sPattern.substr(slashPos+1) : sPattern);
   if(expression.isEmpty())
   {
      Con::errorf("findFirstFile() requires a search expression: '%s'", sPattern.c_str());
      return -1;
   }

   S32 results = Torque::FS::FindByPattern(path, expression, recurse, sgFindFilesResults, multiMatch );
   if(givenPath.isRelative() && results > 0)
   {
      // Strip the CWD out of the returned paths
      // MakeRelativePath() returns incorrect results (it adds a leading ..) so doing this the dirty way
      const String cwd = Torque::FS::GetCwd().getFullPath();
      for(S32 i = 0;i < sgFindFilesResults.size();++i)
      {
         String str = sgFindFilesResults[i];
         if(str.compare(cwd, cwd.length(), String::NoCase) == 0)
            str = str.substr(cwd.length());
         sgFindFilesResults[i] = str;
      }
   }
   return results;
}
Example #5
0
bool StringUtils::stringEquals( const String& string1, const String& string2 )
{
	return string1.compare(string2) == 0;
}
Example #6
0
 static int compareElements (String& s1, String& s2) noexcept    { return s1.compare (s2); }
//-----------------------------------------------------------------------------
///
void WindBatchPage::_updateShaders()
{
	if (!m_bShadersSupported)
		return;

	unsigned int i = 0;
	BatchedGeometry::TSubBatchIterator it = m_pBatchGeom->getSubBatchIterator();
	while (it.hasMoreElements())
   {
      BatchedGeometry::SubBatch *subBatch = it.getNext();
		const MaterialPtr &ptrMat = m_vecUnfadedMaterials[i++];

		//Check if lighting should be enabled
		bool lightingEnabled = false;
		for (unsigned short t = 0, techCnt = ptrMat->getNumTechniques(); t < techCnt; ++t)
      {
			Technique *tech = ptrMat->getTechnique(t);
			for (unsigned short p = 0, passCnt = tech->getNumPasses(); p < passCnt; ++p)
         {
            if (tech->getPass(p)->getLightingEnabled())
            {
					lightingEnabled = true;
					break;
				}
			}

			if (lightingEnabled)
            break;
		}

		//Compile the shader script based on various material / fade options
		StringUtil::StrStreamType tmpName;
		tmpName << "BatchPage_";
		if (m_bFadeEnabled)
			tmpName << "fade_";
		if (lightingEnabled)
			tmpName << "lit_";
		if (subBatch->m_pVertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
			tmpName << "clr_";

		for (unsigned short i = 0; i < subBatch->m_pVertexData->vertexDeclaration->getElementCount(); ++i)
      {
			const VertexElement *el = subBatch->m_pVertexData->vertexDeclaration->getElement(i);
			if (el->getSemantic() == VES_TEXTURE_COORDINATES)
         {
				String uvType;
            switch (el->getType())
            {
            case VET_FLOAT1: uvType = "1"; break;
            case VET_FLOAT2: uvType = "2"; break;
            case VET_FLOAT3: uvType = "3"; break;
            case VET_FLOAT4: uvType = "4"; break;
            }
            tmpName << uvType << '_';
			}
		}

		tmpName << "vp";

		const String vertexProgName = tmpName.str();

		String shaderLanguage;
		if (Root::getSingleton().getRenderSystem()->getName() == "Direct3D9 Rendering Subsystem")
			shaderLanguage = "hlsl";
		else if(Root::getSingleton().getRenderSystem()->getName() == "OpenGL Rendering Subsystem")
			shaderLanguage = "glsl";
		else
			shaderLanguage = "cg";

		//If the shader hasn't been created yet, create it
		if (HighLevelGpuProgramManager::getSingleton().getByName(vertexProgName).isNull())
		{
			Pass *pass = ptrMat->getTechnique(0)->getPass(0);
			String vertexProgSource;

			if(!shaderLanguage.compare("hlsl") || !shaderLanguage.compare("cg"))
			{

				vertexProgSource =
					"void main( \n"
					"	float4 iPosition : POSITION, \n"
					"	float3 normal	 : NORMAL, \n"
					"	out float4 oPosition : POSITION, \n";

				if (subBatch->m_pVertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
				{
					vertexProgSource += 
						"	float4 iColor	 : COLOR, \n";
				}

				int texNum = 0;

				unsigned short texCoordCount = 0;
				for (unsigned short j = 0; j < subBatch->m_pVertexData->vertexDeclaration->getElementCount(); ++j) 
				{
					const VertexElement *el = subBatch->m_pVertexData->vertexDeclaration->getElement(j);
					if (el->getSemantic() == VES_TEXTURE_COORDINATES) 
					{
						++ texCoordCount;
					}
				}

				for (unsigned short i = 0; i < subBatch->m_pVertexData->vertexDeclaration->getElementCount(); ++i)
				{
					const VertexElement *el = subBatch->m_pVertexData->vertexDeclaration->getElement(i);
					if (el->getSemantic() == VES_TEXTURE_COORDINATES)
					{
						if (el->getIndex() == texCoordCount - 2)
						{
							vertexProgSource += 
								"	float4 params 	: TEXCOORD" + StringConverter::toString(texCoordCount-2) + ", \n";
						}
						else
						{
							if (el->getIndex() == texCoordCount - 1)
							{
								vertexProgSource += 
									"	float4 originPos 	: TEXCOORD" + StringConverter::toString(texCoordCount-1) + ", \n";
							}
							else
							{
								String uvType = "";
								switch (el->getType())
								{
									case VET_FLOAT1: uvType = "float"; break;
									case VET_FLOAT2: uvType = "float2"; break;
									case VET_FLOAT3: uvType = "float3"; break;
									case VET_FLOAT4: uvType = "float4"; break;
								}

								vertexProgSource += 
									"	" + uvType + " iUV" + StringConverter::toString(texNum) + "			: TEXCOORD" + StringConverter::toString(texNum) + ", \n"
									"	out " + uvType + " oUV" + StringConverter::toString(texNum) + "		: TEXCOORD" + StringConverter::toString(texNum) + ", \n";
							}
							++texNum;
						}
					}
				}

				vertexProgSource +=
					"	out float oFog : FOG, \n"
					"	out float4 oColor : COLOR, \n";

				if (lightingEnabled)
				{
					 vertexProgSource +=
						"	uniform float4 objSpaceLight, \n"
						"	uniform float4 lightDiffuse, \n"
						"	uniform float4 lightAmbient, \n";
				}

				if (m_bFadeEnabled)
				{
					vertexProgSource +=
						"	uniform float3 camPos, \n"
						"	uniform float fadeGap, \n"
						"	uniform float invisibleDist, \n";
				}

				vertexProgSource +=
					"	uniform float4x4 worldViewProj,\n"
					"	uniform float time) \n "
					"{	\n";

				if (lightingEnabled)
				{
					//Perform lighting calculations (no specular)
					vertexProgSource +=
						"	float3 light = normalize(objSpaceLight.xyz - (iPosition.xyz * objSpaceLight.w)); \n"
						"	float diffuseFactor = max(dot(normal, light), 0); \n";

					if (subBatch->m_pVertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
					{
						vertexProgSource +=
							"	oColor = (lightAmbient + diffuseFactor * lightDiffuse) * iColor; \n";
					}
					else
					{
						vertexProgSource +=
							"	oColor = (lightAmbient + diffuseFactor * lightDiffuse); \n";
					}
				}
				else
				{
					if (subBatch->m_pVertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
					{
						vertexProgSource +=
							"	oColor = iColor; \n";
					}
					else
					{
						vertexProgSource +=
							"	oColor = float4(1, 1, 1, 1); \n";
					}
				}

				if (m_bFadeEnabled)
				{
					//Fade out in the distance
					 vertexProgSource +=
						"	float dist = distance(camPos.xz, iPosition.xz); \n"
						"	oColor.a *= (invisibleDist - dist) / fadeGap; \n";
				}

				for (unsigned short i = 0; i < texCoordCount - 2; ++i)
				{
					vertexProgSource += 
						"	oUV" + StringConverter::toString(i) + " = iUV" + StringConverter::toString(i) + "; \n";
				}

				vertexProgSource +=
					"	float radiusCoeff = params.x; \n"
					"	float heightCoeff = params.y; \n"
					"	float factorX = params.z; \n"
					"	float factorY = params.w; \n"
					"	float4 tmpPos = iPosition; \n"

					/* 
					2 different methods are used to for the sin calculation :
					- the first one gives a better effect but at the cost of a few fps because of the 2 sines
					- the second one uses less ressources but is a bit less realistic

						a sin approximation could be use to optimize performances
					*/
	#if 0
					"	tmpPos.y += sin(time + originPos.z + tmpPos.y + tmpPos.x) * radiusCoeff * radiusCoeff * factorY; \n"
					"	tmpPos.x += sin(time + originPos.z ) * heightCoeff * heightCoeff * factorX ; \n"
	#else
					"	float sinval = sin(time + originPos.z ); \n"
					"	tmpPos.y += sinval * radiusCoeff * radiusCoeff * factorY; \n"
					"	tmpPos.x += sinval * heightCoeff * heightCoeff * factorX ; \n"
	#endif
					"	oPosition = mul(worldViewProj, tmpPos); \n"
					"	oFog = oPosition.z; \n"
					"}";
			}

			if(!shaderLanguage.compare("glsl"))
			{
				unsigned short texCoordCount = 0;
				for (unsigned short j = 0; j < subBatch->m_pVertexData->vertexDeclaration->getElementCount(); ++j) 
				{
					const VertexElement *el = subBatch->m_pVertexData->vertexDeclaration->getElement(j);
					if (el->getSemantic() == VES_TEXTURE_COORDINATES) 
					{
						++ texCoordCount;
					}
				}

				if (lightingEnabled)
				{
					 vertexProgSource +=
						"uniform vec4 objSpaceLight; \n"
						"uniform vec4 lightDiffuse; \n"
						"uniform vec4 lightAmbient; \n";
				}

				if (m_bFadeEnabled)
				{
					 vertexProgSource +=
						"uniform vec3 camPos; \n"
						"uniform float fadeGap; \n"
						"uniform float invisibleDist; \n";
				}

				vertexProgSource +=
					"uniform float time; \n"
					"void main() \n"
					"{ \n";

				int texNum = 0;

				for (unsigned short i = 0; i < subBatch->m_pVertexData->vertexDeclaration->getElementCount(); ++i)
				{
					const VertexElement *el = subBatch->m_pVertexData->vertexDeclaration->getElement(i);
					if (el->getSemantic() == VES_TEXTURE_COORDINATES)
					{
						if (el->getIndex() == texCoordCount - 2)
						{
							vertexProgSource += 
								"	vec4 params = gl_MultiTexCoord" + StringConverter::toString(texCoordCount-2) + "; \n";
						}
						else
						{
							if (el->getIndex() == texCoordCount - 1)
							{
								vertexProgSource += 
									"	vec4 originPos = gl_MultiTexCoord" + StringConverter::toString(texCoordCount-1) + "; \n";
							}
							else
							{
								vertexProgSource += 
								"	gl_TexCoord[" + StringConverter::toString(texNum) + "]	= gl_MultiTexCoord" + StringConverter::toString(texNum) + "; \n";
							}
							++texNum;
						}
					}
				}

				if (lightingEnabled)
				{
					//Perform lighting calculations (no specular)
					vertexProgSource +=
						"	vec3 light = normalize(objSpaceLight.xyz - (gl_Vertex.xyz * objSpaceLight.w)); \n"
						"	float diffuseFactor = max(dot(gl_Normal.xyz, light), 0.0); \n";

					if (subBatch->m_pVertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
					{
						vertexProgSource +=
							"	gl_FrontColor = (lightAmbient + diffuseFactor * lightDiffuse) * gl_Color; \n";
					}
					else
					{
						vertexProgSource +=
							"	gl_FrontColor = (lightAmbient + diffuseFactor * lightDiffuse); \n";
					}
				}
				else
				{
					if (subBatch->m_pVertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
					{
						vertexProgSource += "	gl_FrontColor = gl_Color; \n";
					}
					else
					{
						vertexProgSource += "	gl_FrontColor = vec4(1.0, 1.0, 1.0, 1.0); \n";
					}
				}

				if (m_bFadeEnabled)
				{
					//Fade out in the distance
					vertexProgSource +=
						"	float dist = distance(camPos.xz, gl_Vertex.xz);	\n"
						"	gl_FrontColor.a *= (invisibleDist - dist) / fadeGap; \n";
				}

				vertexProgSource +=
					"	float radiusCoeff = params.x; \n"
					"	float heightCoeff = params.y; \n"
					"	float factorX = params.z; \n"
					"	float factorY = params.w; \n"
					"	vec4 tmpPos = gl_Vertex; \n"
					
					/* 
					2 different methods are used to for the sin calculation :
					- the first one gives a better effect but at the cost of a few fps because of the 2 sines
					- the second one uses less ressources but is a bit less realistic

					a sin approximation could be use to optimize performances
					*/
	#if 1
					"	tmpPos.y += sin(time + originPos.z + tmpPos.y + tmpPos.x) * radiusCoeff * radiusCoeff * factorY; \n"
					"	tmpPos.x += sin(time + originPos.z ) * heightCoeff * heightCoeff * factorX; \n"
	#else
	 				
					"	float sinval = sin(time + originPos.z ); \n"
					"	tmpPos.y += sinval * radiusCoeff * radiusCoeff * factorY; \n"
					"	tmpPos.x += sinval * heightCoeff * heightCoeff * factorX; \n"
	#endif
					"	gl_Position = gl_ModelViewProjectionMatrix * tmpPos; \n"
					"	gl_FogFragCoord = gl_Position.z; \n"
					"}";
			}

			// test for shader source
			//std::ofstream shaderOutput;
			//shaderOutput.open((vertexProgName+std::string(".cg")).c_str());
			//shaderOutput << vertexProgSource;
			//shaderOutput.close();

			// end test for shader source

			HighLevelGpuProgramPtr vertexShader = HighLevelGpuProgramManager::getSingleton().createProgram(
				vertexProgName,
				ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
				shaderLanguage, GPT_VERTEX_PROGRAM);

			vertexShader->setSource(vertexProgSource);

			if (shaderLanguage == "hlsl")
			{
				vertexShader->setParameter("target", "vs_1_1");
				vertexShader->setParameter("entry_point", "main");
			}
			else if(shaderLanguage == "cg")
			{
				vertexShader->setParameter("profiles", "vs_1_1 arbvp1");
				vertexShader->setParameter("entry_point", "main");
			}
			// GLSL can only have one entry point "main".

			vertexShader->load();
		}

		//Now that the shader is ready to be applied, apply it
		StringUtil::StrStreamType materialSignature;
		materialSignature << "BatchMat|";
		materialSignature << ptrMat->getName() << "|";
		if (m_bFadeEnabled)
      {
			materialSignature << m_fVisibleDist << "|";
			materialSignature << m_fInvisibleDist << "|";
		}

		//Search for the desired material
		MaterialPtr generatedMaterial = MaterialManager::getSingleton().getByName(materialSignature.str());
		if (generatedMaterial.isNull())
      {
			//Clone the material
			generatedMaterial = ptrMat->clone(materialSignature.str());

			//And apply the fade shader
			for (unsigned short t = 0; t < generatedMaterial->getNumTechniques(); ++t){
				Technique *tech = generatedMaterial->getTechnique(t);
				for (unsigned short p = 0; p < tech->getNumPasses(); ++p){
					Pass *pass = tech->getPass(p);

					//Setup vertex program
					if (pass->getVertexProgramName() == "")
						pass->setVertexProgram(vertexProgName);

					try{
						GpuProgramParametersSharedPtr params = pass->getVertexProgramParameters();

						if (lightingEnabled) {
							params->setNamedAutoConstant("objSpaceLight", GpuProgramParameters::ACT_LIGHT_POSITION_OBJECT_SPACE);
							params->setNamedAutoConstant("lightDiffuse", GpuProgramParameters::ACT_DERIVED_LIGHT_DIFFUSE_COLOUR);
							params->setNamedAutoConstant("lightAmbient", GpuProgramParameters::ACT_DERIVED_AMBIENT_LIGHT_COLOUR);
							//params->setNamedAutoConstant("matAmbient", GpuProgramParameters::ACT_SURFACE_AMBIENT_COLOUR);
						}

						params->setNamedConstantFromTime("time", 1);

						if(shaderLanguage.compare("glsl"))
						{
							//glsl can use the built in gl_ModelViewProjectionMatrix
							params->setNamedAutoConstant("worldViewProj", GpuProgramParameters::ACT_WORLDVIEWPROJ_MATRIX);
						}

						if (m_bFadeEnabled)
                  {
							params->setNamedAutoConstant("camPos", GpuProgramParameters::ACT_CAMERA_POSITION_OBJECT_SPACE);

							//Set fade ranges
							params->setNamedAutoConstant("invisibleDist", GpuProgramParameters::ACT_CUSTOM);
							params->setNamedConstant("invisibleDist", m_fInvisibleDist);

							params->setNamedAutoConstant("fadeGap", GpuProgramParameters::ACT_CUSTOM);
							params->setNamedConstant("fadeGap", m_fInvisibleDist - m_fVisibleDist);

							if (pass->getAlphaRejectFunction() == CMPF_ALWAYS_PASS)
								pass->setSceneBlending(SBT_TRANSPARENT_ALPHA);
						}
					}
					catch (const Ogre::Exception &e)
					{
						// test for shader source	
						std::ofstream shaderOutput;
						shaderOutput.open("exception.log");
						shaderOutput << e.getDescription();
						shaderOutput.close();
					}
					catch (...)
               {
						OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR,
                     "Error configuring batched geometry transitions. If you're using materials with custom\
                     vertex shaders, they will need to implement fade transitions to be compatible with BatchPage.",
                     "BatchPage::_updateShaders()");
					}
				}
			}

		}

		//Apply the material
		subBatch->setMaterial(generatedMaterial);
	}
Example #8
0
int main( int argc, const char** argv )
{
    CvCapture* capture = 0;
    Mat frame, frameCopy, image;
    const String scaleOpt = "--scale=";
    size_t scaleOptLen = scaleOpt.length();
    const String cascadeOpt = "--cascade=";
    size_t cascadeOptLen = cascadeOpt.length();
    const String nestedCascadeOpt = "--nested-cascade";
    size_t nestedCascadeOptLen = nestedCascadeOpt.length();
    String inputName;
	
    CascadeClassifier cascade, nestedCascade;
    double scale = 1;
	
    for( int i = 1; i < argc; i++ )
    {
        if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
            cascadeName.assign( argv[i] + cascadeOptLen );
        else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
        {
            if( argv[i][nestedCascadeOpt.length()] == '=' )
                nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
            if( !nestedCascade.load( nestedCascadeName ) )
                cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
        }
        else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
        {
            if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
                scale = 1;
        }
        else if( argv[i][0] == '-' )
        {
            cerr << "WARNING: Unknown option %s" << argv[i] << endl;
        }
        else
            inputName.assign( argv[i] );
    }
	
	
    IplImage* motion = 0;
	
	
	// init server bitch!
	
	if (init_server() == -1)
	{
		perror("init server failed");
		return (-1);
	}
	
    if( !cascade.load( cascadeName ) )
    {
        cerr << "ERROR: Could not load classifier cascade" << endl;
        cerr << "Usage: facedetect [--cascade=\"<cascade_path>\"]\n"
		"   [--nested-cascade[=\"nested_cascade_path\"]]\n"
		"   [--scale[=<image scale>\n"
		"   [filename|camera_index]\n" ;
		
		close_connection();
		
		
		// send close // wait for answer before close or if -1 close
		
        return -1;
    }
	
    if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
        capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
    else if( inputName.size() )
    {
        image = imread( inputName, 1 );
        if( image.empty() )
            capture = cvCaptureFromAVI( inputName.c_str() );
    }
    else
        image = imread( "lena.jpg", 1 );
	
    cvNamedWindow( "result", 1 );
	cvNamedWindow( "Motion", 1 );
	
    if( capture )
    {
		
		// we should send via socket here :D
        for(;;)
        {
            IplImage* iplImg = cvQueryFrame( capture );
            frame = iplImg;
			
			
            if( !motion )
            {
                motion = cvCreateImage( cvSize(iplImg->width,iplImg->height), 8, 3 );
                cvZero( motion );
                motion->origin = iplImg->origin;
            }
			
            if( frame.empty() )
                break;
            if( iplImg->origin == IPL_ORIGIN_TL )
                frame.copyTo( frameCopy );
            else
                flip( frame, frameCopy, 0 );
			
			// send
			// recv inside the detectAndDraw
            detectAndDraw( frameCopy, iplImg, cascade, nestedCascade, scale, motion);
			
            if( waitKey( 10 ) >= 0 )
                goto _cleanup_;
        }
		close_connection();
        waitKey(0);
	_cleanup_:
		// send close // wait for answer before close or if -1 close
        cvReleaseCapture( &capture );
    }/*
	  else
	  {
	  if( !image.empty() )
	  {
	  detectAndDraw( image, cascade, nestedCascade, scale, motion );
	  waitKey(0);
	  }
	  else if( !inputName.empty() )
	  {
	  FILE* f = fopen( inputName.c_str(), "rt" );
	  if( f )
	  {
	  char buf[1000+1];
	  while( fgets( buf, 1000, f ) )
	  {
	  int len = (int)strlen(buf), c;
	  while( len > 0 && isspace(buf[len-1]) )
	  len--;
	  buf[len] = '\0';
	  cout << "file " << buf << endl;
	  image = imread( buf, 1 );
	  if( !image.empty() )
	  {
	  detectAndDraw( image, cascade, nestedCascade, scale, motion );
	  c = waitKey(0);
	  if( c == 27 || c == 'q' || c == 'Q' )
	  break;
	  }
	  }
	  fclose(f);
	  }
	  }
	  }*/
    cvDestroyWindow( "Motion" );
    cvDestroyWindow("result");
	
    return 0;
}
Example #9
0
// return true if NativeView was created
String CMainWindow::processForNativeView(String _url) {

	String url = _url.c_str();
	String callback_prefix = "call_stay_native";

	// find protocol:navto pairs

	int last = -1;
	int cur = url.find_first_of(':', last+1);
	while (cur > 0) {
		String protocol = url.substr(last+1, cur - last - 1);
		String navto = url.substr(cur+1, url.size() - cur);
		
		if (callback_prefix.compare(protocol) == 0) {
			// navigate but still in native view
			String cleared_url = url.substr(callback_prefix.size()+1, url.size() - callback_prefix.size());
			return cleared_url;
		}
		// check protocol for nativeView
		NativeViewFactory* nvf = RhoNativeViewManagerWM::getFactoryByViewType(protocol.c_str());
		if (nvf != NULL) {
			// we should switch to NativeView
			if (mNativeView != NULL) {
				if (protocol.compare(mNativeViewType) == 0) {
					mNativeView->navigate(navto.c_str());
					return "";
				}
			}

			restoreWebView();
			NativeView* nv = nvf->getNativeView(protocol.c_str());
			if (nv != NULL) {

				openNativeView(nvf, nv, protocol);

				nv->navigate(navto.c_str());

				mIsOpenedByURL = true;

				return "";
			}
			restoreWebView();
			return url;
		}
		last = cur;
		int c1 = url.find_first_of(':', last+1);
		int c2 = url.find_first_of('/', last+1);
		if ((c1 < c2)) {
			if (c1 <= 0) {
				cur = c2;
			}
			else {
				cur = c1;
			}
		}
		else {
			if (c2 <= 0) {
				cur = c1;
			}
			else {
				cur = c2;
			}
		}
	}
	if (mIsOpenedByURL) {
		restoreWebView();
	}
	return url;
}
Example #10
0
		static bool compare(String const& a, char* b) { return a.compare(b) == 0; }
Example #11
0
/* throws Exception */
bool BlobTest::checkBlob(const String & retrBytes)
{
  bool passed=true;

  std::fstream & bIn=testBlobFile->getStream();

  bIn.seekg(0, std::ios_base::beg);

  ASSERT_MESSAGE(!bIn.fail(), "seekg 0 position from the beginning - failed");
  ASSERT_MESSAGE(!bIn.eof(), "stream is at eof");

  unsigned int fileLength=testBlobFile->getSize();

  ASSERT_EQUALS((unsigned int) retrBytes.size(), fileLength);

  int substrIdx=0;

  while (!bIn.eof())
  {
    char fromFile[8192];
    bIn.read(fromFile, sizeof (fromFile));

    ASSERT_MESSAGE(!bIn.fail() || bIn.eof(), "read from file failed");
    if (bIn.gcount() == 0) {
      logMsg("We did not get any data from our input stream, we cannot do the compare input and output.");
      logMsg("Lets be gentle and consider the test as passed.");
      break;
    }

    if (retrBytes.compare(substrIdx, bIn.gcount(), fromFile, bIn.gcount()) != 0)
    {
      passed=false;
      int j=0;

      TestsListener::errorsLog() << "compare returned !=0 at " << substrIdx
              << ", read from file " << bIn.gcount() << std::endl;

      while (j < bIn.gcount() && fromFile[ j ] == retrBytes[substrIdx + j])
        ++j;


      if (j < bIn.gcount())
      {
        TestsListener::errorsLog() << "Byte pattern differed at position "
                << j << " , Retrieved: " << retrBytes[ substrIdx + j ]
                << "(" << StringUtils::toHexString(retrBytes[ substrIdx + j ])
                << ") != From File:" << fromFile[ j ] << "("
                << StringUtils::toHexString(fromFile[ j ]) << ")"
                << std::endl;

        if (j < bIn.gcount() - 1)
        {
          TestsListener::errorsLog() << "Following bytes (table:file):";

          while (j < bIn.gcount())
          {
            // current byte was already printed;
            ++j;
            TestsListener::errorsLog()
                    << " (" << StringUtils::toHexString(retrBytes[ substrIdx + j ])
                    << ":" << StringUtils::toHexString(fromFile[ j ]) << ")";
          }

          TestsListener::errorsLog() << std::endl;
        }
      }

      break;
    }

    substrIdx+=static_cast<int> (bIn.gcount());
  }

  return passed;
}
Example #12
0
inline bool operator<(const String &lhs, const String &rhs) {
  return lhs.compare(rhs) < 0;
}
 bool operator()(const BaseObjectComponent* const obj)
 {
     return idStr.compare(obj->getData()[Ids::identifier].toString()) == 0;
 }
int main( int argc, char* argv[] )
{
	CvCapture* capture = 0;
	Mat frame, frameCopy, image;
	const String scaleOpt = "--scale=";
	size_t scaleOptLen = scaleOpt.length();
	const String cascadeOpt = "--cascade=";
	size_t cascadeOptLen = cascadeOpt.length();
	const String nestedCascadeOpt = "--nested-cascade";
	size_t nestedCascadeOptLen = nestedCascadeOpt.length();
	String inputName;
	//	help();

	CascadeClassifier cascade, nestedCascade;
	double scale = 1;

	for( int i = 1; i < argc; i++ )
	{
		cout << "Processing " << i << " " <<  argv[i] << endl;
		if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
		{
			cascadeName.assign( argv[i] + cascadeOptLen );
			cout << "  from which we have cascadeName= " << cascadeName << endl;
		}
		else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
		{
			if( argv[i][nestedCascadeOpt.length()] == '=' )
				nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
			if( !nestedCascade.load( nestedCascadeName ) )
				cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
		}
		else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
		{
			if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
				scale = 1;
			cout << " from which we read scale = " << scale << endl;
		}
		else if( argv[i][0] == '-' )
		{
			cerr << "WARNING: Unknown option %s" << argv[i] << endl;
		}
		else
			inputName.assign( argv[i] );
	}

	if( !cascade.load( cascadeName ) )
	{
		cerr << "ERROR: Could not load classifier cascade" << endl;
		cerr << "Usage: facedetect [--cascade=<cascade_path>]\n"
			"   [--nested-cascade[=nested_cascade_path]]\n"
			"   [--scale[=<image scale>\n"
			"   [filename|camera_index]\n" << endl ;
		return -1;
	}

	capture = cvCaptureFromCAM( 0 );
	int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
	if(!capture) cout << "Capture from CAM " <<  c << " didn't work" << endl;

	cvNamedWindow( "result", 1 );

	if( capture )
	{
		cout << "In capture ..." << endl;
		for(;;)
		{
			IplImage* iplImg = cvQueryFrame( capture );
			frame = iplImg;
			if( frame.empty() )
				break;
			if( iplImg->origin == IPL_ORIGIN_TL )
				frame.copyTo( frameCopy );
			else
				flip( frame, frameCopy, 0 );

			detectAndDraw( frameCopy, cascade, nestedCascade, scale, argv );

			if( waitKey( 10 ) >= 0 )
				goto _cleanup_;
		}

		waitKey(0);

_cleanup_:
		cvReleaseCapture( &capture );
	}

	cvDestroyWindow("result");

	return 0;
}
Example #15
0
bool Enemy::init(Dictionary *properties) {
    
    if (!GameObject::init(properties))
        return false;
    
    _walkRoutine = routineNone;
    _playerDistanceToFollow = -1;
    _ignoreRoom = false;
    _walkTries = 0;
    
    String *facingProperty = (String*)properties->objectForKey("Facing");
    String *walkProperty = (String*)properties->objectForKey("Walk");
    String *speedProperty = (String*)properties->objectForKey("Speed");
    
    String *distance = (String*)properties->objectForKey("DistanceToFollow");
    if (distance)
        _playerDistanceToFollow = distance->intValue();
    
    String *ignoreRoomProperty = (String*)properties->objectForKey("IgnoreRoom");
    if (ignoreRoomProperty)
        _ignoreRoom = true;
    
    if (facingProperty) {
        if (facingProperty->compare("up") == 0)
            _facing = facingUp;
        else if (facingProperty->compare("left") == 0)
            _facing = facingLeft;
        else if (facingProperty->compare("right") == 0)
            _facing = facingRight;
    }
    
    if (speedProperty) {
        float speed = speedProperty->intValue();
        if (speed > 0) {
            _walkTime = speed / 1000.0f;
        }
    }
    
    const char *frameName = this->getDirectionName();
    
    this->setSpriteByFrameName(String::createWithFormat("enemy%s1.png", frameName)->getCString());
    
    int x = ((String*)properties->objectForKey("x"))->intValue();
    int y = ((String*)properties->objectForKey("y"))->intValue();
    
    this->getNode()->setAnchorPoint(ccp(0, 0));
    this->getNode()->setPosition(ccp(x, y));
    
    if (walkProperty) {
        if (walkProperty->compare("bf") == 0)
            _walkRoutine = routineBackAndForth;
        else if (walkProperty->compare("cw") == 0)
            _walkRoutine = routineClockwise;
        else if (walkProperty->compare("ccw") == 0)
            _walkRoutine = routineCounterClockwise;
        else if (walkProperty->compare("r") == 0)
            _walkRoutine = routineRandom;
        else if (walkProperty->compare("fp") == 0)
            _walkRoutine = routineFollowPlayer;
        else if (walkProperty->compare("fpr") == 0)
            _walkRoutine = routineFollowPlayerAndRandom;
        
        this->walk(false);
    }
    
    return true;
}
  double getScore_(String& engine, const PeptideHit& hit)
  {
    if (engine == "OMSSA")
    {
      return (-1) * log10(max(hit.getScore(), smallest_e_value_));
    }
    else if (engine == "MyriMatch")
    {
      //double e_val = exp(-hit.getScore());
      //double score_val = ((-1)* log10(max(e_val,smallest_e_value_)));
      //printf("myri score: %e ; e_val: %e ; score_val: %e\n",hit.getScore(),e_val,score_val);
      //return score_val;
      return hit.getScore();
    }
    else if (engine.compare("XTandem") == 0)
    {
      return (-1) * log10(max((double)hit.getMetaValue("E-Value"), smallest_e_value_));
    }
    else if (engine == "MASCOT")
    {
      // issue #740: unable to fit data with score 0
      if (hit.getScore() == 0.0) 
      {
        return numeric_limits<double>::quiet_NaN();
      }
      // end issue #740
      if (hit.metaValueExists("EValue"))
      {
        return (-1) * log10(max((double)hit.getMetaValue("EValue"), smallest_e_value_));
      }
      if (hit.metaValueExists("expect"))
      {
        return (-1) * log10(max((double)hit.getMetaValue("expect"), smallest_e_value_));
      }
    }
    else if (engine == "SpectraST")
    {
      return 100 * hit.getScore(); // SpectraST f-val
    }
    else if (engine == "SimTandem")
    {
      if (hit.metaValueExists("E-Value"))
      {
        return (-1) * log10(max((double)hit.getMetaValue("E-Value"), smallest_e_value_));
      }
    }
    else if ((engine == "MSGFPlus") || (engine == "MS-GF+"))
    {
      if (hit.metaValueExists("MS:1002053"))  // name: MS-GF:EValue
      {
        return (-1) * log10(max((double)hit.getMetaValue("MS:1002053"), smallest_e_value_));
      }
      else if (hit.metaValueExists("expect"))
      {
        return (-1) * log10(max((double)hit.getMetaValue("expect"), smallest_e_value_));
      }
    }
    else if (engine == "Comet")
    {
      if (hit.metaValueExists("MS:1002257")) // name: Comet:expectation value
      {
        return (-1) * log10(max((double)hit.getMetaValue("MS:1002257"), smallest_e_value_));
      }
      else if (hit.metaValueExists("expect"))
      {
        return (-1) * log10(max((double)hit.getMetaValue("expect"), smallest_e_value_));
      }
    }
    else
    {
      throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No parameters for chosen search engine", "The chosen search engine is currently not supported");
    }

    // avoid compiler warning (every code path must return a value, even if there is a throw() somewhere)
    return std::numeric_limits<double>::max();
  }
Example #17
0
Point Enemy::getTargetPosition() {
    
    Size tileSize = this->getCave()->getTileSize();
    Point currentPosition = this->getNode()->getPosition();
    Point currentTileCoord = this->getCave()->getCharacterTileCoordForPosition(currentPosition);
    
    if (_walkRoutine == routineFollowPlayer || _walkRoutine == routineFollowPlayerAndRandom) {
        
        Point playerPosition = this->getCave()->getPlayer()->getNode()->getPosition();
        Point playerTileCoord = this->getCave()->getCharacterTileCoordForPosition(playerPosition);
        
        bool playerInRoom = this->isPlayerInRoom();
        int xDifference = currentTileCoord.x - playerTileCoord.x;
        int yDifference = currentTileCoord.y - playerTileCoord.y;
        int xAbsoluteDifference = abs(xDifference);
        int yAbsoluteDifference = abs(yDifference);
        
        if ((playerInRoom || _ignoreRoom) && (_playerDistanceToFollow <= 0 || (xAbsoluteDifference <= _playerDistanceToFollow && yAbsoluteDifference <= _playerDistanceToFollow))) {
            
            // Follows the player
            
            bool moveHorizontal = xAbsoluteDifference == yAbsoluteDifference ? rand() % 2 : xAbsoluteDifference > yAbsoluteDifference;
            
            if (moveHorizontal) {
                _facing = xDifference > 0 ? facingLeft : facingRight;
            } else {
                _facing = yDifference > 0 ? facingUp : facingDown;
            }
            
        } else {
            
            if (_walkRoutine == routineFollowPlayerAndRandom)
                _facing = (CharacterFacing)(rand() % 4); // Walk randomically
            else
                return currentPosition; // Stop
            
        }
        
    } else if (_walkRoutine == routineRandom) {
        
        _facing = (CharacterFacing)(rand() % 4);
        
    } else {
        
        String *goProperty = this->getCave()->getTileProperty(this->getCave()->getMetaLayer(), currentTileCoord, "Go");
        
        if (goProperty) {
            if (goProperty->compare("up") == 0)
                _facing = facingUp;
            else if (goProperty->compare("down") == 0)
                _facing = facingDown;
            else if (goProperty->compare("left") == 0)
                _facing = facingLeft;
            else if (goProperty->compare("right") == 0)
                _facing = facingRight;
            else if (goProperty->compare("upleft") == 0)
                _facing = _facing == facingUp || _facing == facingRight ? facingUp : facingLeft;
            else if (goProperty->compare("upright") == 0)
                _facing = _facing == facingUp || _facing == facingLeft ? facingUp : facingRight;
            else if (goProperty->compare("downleft") == 0)
                _facing = _facing == facingDown || _facing == facingRight ? facingDown : facingLeft;
            else if (goProperty->compare("downright") == 0)
                _facing = _facing == facingDown || _facing == facingLeft ? facingDown : facingRight;
        }
    }
    
    Point targetPosition = currentPosition;
    
    switch (_facing) {
        case facingUp: targetPosition.y += tileSize.height; break;
        case facingDown: targetPosition.y -= tileSize.height; break;
        case facingRight: targetPosition.x += tileSize.width; break;
        case facingLeft: targetPosition.x -= tileSize.width; break;
        default: break;
    }
    
    return targetPosition;
}
Example #18
0
bool StringUtils::startsWith(const String& src, const String& substr)
{
    return src.compare(0, substr.length(), substr) == 0;
}
void BatchPage::_updateShaders()
{
	if (!shadersSupported)
		return;

	uint32 i = 0;
	BatchedGeometry::SubBatchIterator it = batch->getSubBatchIterator();
	while (it.hasMoreElements()){
		BatchedGeometry::SubBatch *subBatch = it.getNext();
		MaterialPtr mat = unfadedMaterials[i++];

		//Check if lighting should be enabled
		bool lightingEnabled = false;
		for (unsigned short t = 0; t < mat->getNumTechniques(); ++t){
			Technique *tech = mat->getTechnique(t);
			for (unsigned short p = 0; p < tech->getNumPasses(); ++p){
				Pass *pass = tech->getPass(p);
				if (pass->getLightingEnabled()) {
					lightingEnabled = true;
					break;
				}
			}
			if (lightingEnabled)
				break;
		}

		//Compile the CG shader script based on various material / fade options
		StringUtil::StrStreamType tmpName;
		tmpName << "BatchPage_";
		if (fadeEnabled)
			tmpName << "fade_";
		if (lightingEnabled)
			tmpName << "lit_";
		if (subBatch->vertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
			tmpName << "clr_";

		for (unsigned short i = 0; i < subBatch->vertexData->vertexDeclaration->getElementCount(); ++i)
		{
			const VertexElement *el = subBatch->vertexData->vertexDeclaration->getElement(i);
			if (el->getSemantic() == VES_TEXTURE_COORDINATES) {
				String uvType = "";
				switch (el->getType()) {
						case VET_FLOAT1: uvType = "1"; break;
						case VET_FLOAT2: uvType = "2"; break;
						case VET_FLOAT3: uvType = "3"; break;
						case VET_FLOAT4: uvType = "4"; break;
				}
				tmpName << uvType << '_';
			}
		}

		tmpName << "vp";

		const String vertexProgName = tmpName.str();

		String shaderLanguage;
		if (Root::getSingleton().getRenderSystem()->getName() == "Direct3D9 Rendering Subsystem")
			shaderLanguage = "hlsl";
		else if(Root::getSingleton().getRenderSystem()->getName() == "OpenGL Rendering Subsystem")
			shaderLanguage = "glsl";
		else
			shaderLanguage = "cg";

		//If the shader hasn't been created yet, create it
		if (HighLevelGpuProgramManager::getSingleton().getByName(vertexProgName).isNull())
		{
			Pass *pass = mat->getTechnique(0)->getPass(0);
			String vertexProgSource;

			if(!shaderLanguage.compare("hlsl") || !shaderLanguage.compare("cg"))
			{

				vertexProgSource =
					"void main( \n"
					"	float4 iPosition : POSITION, \n"
					"	float3 normal    : NORMAL,	\n"
					"	out float4 oPosition : POSITION, \n";

				if (subBatch->vertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL) vertexProgSource +=
					"	float4 iColor    : COLOR, \n";

				unsigned texNum = 0;
				for (unsigned short i = 0; i < subBatch->vertexData->vertexDeclaration->getElementCount(); ++i) {
					const VertexElement *el = subBatch->vertexData->vertexDeclaration->getElement(i);
					if (el->getSemantic() == VES_TEXTURE_COORDINATES) {
						String uvType = "";
						switch (el->getType()) {
							case VET_FLOAT1: uvType = "float"; break;
							case VET_FLOAT2: uvType = "float2"; break;
							case VET_FLOAT3: uvType = "float3"; break;
							case VET_FLOAT4: uvType = "float4"; break;
						}

						vertexProgSource +=
						"	" + uvType + " iUV" + StringConverter::toString(texNum) + "			: TEXCOORD" + StringConverter::toString(texNum) + ",	\n"
						"	out " + uvType + " oUV" + StringConverter::toString(texNum) + "		: TEXCOORD" + StringConverter::toString(texNum) + ",	\n";

						++texNum;
					}
				}

				vertexProgSource +=
					"	out float oFog : FOG,	\n"
					"	out float4 oColor : COLOR, \n";

				if (lightingEnabled) vertexProgSource +=
					"	uniform float4 objSpaceLight,	\n"
					"	uniform float4 lightDiffuse,	\n"
					"	uniform float4 lightAmbient,	\n";

				if (fadeEnabled) vertexProgSource +=
					"	uniform float3 camPos, \n";

				vertexProgSource +=
					"	uniform float4x4 worldViewProj,	\n"
					"	uniform float fadeGap, \n"
					"   uniform float invisibleDist )\n"
					"{	\n";

				if (lightingEnabled) {
					//Perform lighting calculations (no specular)
					vertexProgSource +=
					"	float3 light = normalize(objSpaceLight.xyz - (iPosition.xyz * objSpaceLight.w)); \n"
					"	float diffuseFactor = max(dot(normal, light), 0); \n";
					if (subBatch->vertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
						vertexProgSource += "oColor = (lightAmbient + diffuseFactor * lightDiffuse) * iColor; \n";
					else
						vertexProgSource += "oColor = (lightAmbient + diffuseFactor * lightDiffuse); \n";
				} else {
					if (subBatch->vertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
						vertexProgSource += "oColor = iColor; \n";
					else
						vertexProgSource += "oColor = float4(1, 1, 1, 1); \n";
				}

				if (fadeEnabled) vertexProgSource +=
					//Fade out in the distance
					"	float dist = distance(camPos.xz, iPosition.xz);	\n"
					"	oColor.a *= (invisibleDist - dist) / fadeGap;   \n";

				texNum = 0;
				for (unsigned short i = 0; i < subBatch->vertexData->vertexDeclaration->getElementCount(); ++i) {
					const VertexElement *el = subBatch->vertexData->vertexDeclaration->getElement(i);
					if (el->getSemantic() == VES_TEXTURE_COORDINATES) {
						vertexProgSource +=
						"	oUV" + StringConverter::toString(texNum) + " = iUV" + StringConverter::toString(texNum) + ";	\n";
						++texNum;
					}
				}

				vertexProgSource +=
					"	oPosition = mul(worldViewProj, iPosition);  \n"
					"	oFog = oPosition.z; \n"
					"}";
			}

			if(!shaderLanguage.compare("glsl"))
			{
				vertexProgSource =
					"uniform float fadeGap;        \n"
					"uniform float invisibleDist;   \n";

				if (lightingEnabled) vertexProgSource +=
					"uniform vec4 objSpaceLight;   \n"
					"uniform vec4 lightDiffuse;	   \n"
					"uniform vec4 lightAmbient;	   \n";

				if (fadeEnabled) vertexProgSource +=
					"uniform vec3 camPos;          \n";

				vertexProgSource +=
					"void main() \n"
					"{ \n";

				if (lightingEnabled)
				{
					//Perform lighting calculations (no specular)
					vertexProgSource +=
					"   vec3 light = normalize(objSpaceLight.xyz - (gl_Vertex.xyz * objSpaceLight.w)); \n"
					"   float diffuseFactor = max(dot(gl_Normal, light), 0.0); \n";
					if (subBatch->vertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
					{
						vertexProgSource += "   gl_FrontColor = (lightAmbient + diffuseFactor * lightDiffuse) * gl_Color; \n";
					}
					else
					{
						vertexProgSource += "   gl_FrontColor = (lightAmbient + diffuseFactor * lightDiffuse); \n";
					}
				}
				else
				{
					if (subBatch->vertexData->vertexDeclaration->findElementBySemantic(VES_DIFFUSE) != NULL)
					{
						vertexProgSource += "   gl_FrontColor = gl_Color; \n";
					}
					else
					{
						vertexProgSource += "   gl_FrontColor = vec4(1.0, 1.0, 1.0, 1.0); \n";
					}
				}

				if (fadeEnabled)
				{
					vertexProgSource +=
					//Fade out in the distance
					"   float dist = distance(camPos.xz, gl_Vertex.xz);	\n"
					"   gl_FrontColor.a *= (invisibleDist - dist) / fadeGap;   \n";
				}

				unsigned texNum = 0;
				for (unsigned short i = 0; i < subBatch->vertexData->vertexDeclaration->getElementCount(); ++i)
				{
					const VertexElement *el = subBatch->vertexData->vertexDeclaration->getElement(i);
					if (el->getSemantic() == VES_TEXTURE_COORDINATES)
					{
						vertexProgSource +=
						"   gl_TexCoord[" + StringConverter::toString(texNum) + "] = gl_MultiTexCoord" + StringConverter::toString(texNum) + ";	\n";
						++texNum;
					}
				}

				vertexProgSource +=
					"   gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;  \n"
					"   gl_FogFragCoord = gl_Position.z; \n"
					"}";
			}


			HighLevelGpuProgramPtr vertexShader = HighLevelGpuProgramManager::getSingleton().createProgram(
				vertexProgName,
				ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
				shaderLanguage, GPT_VERTEX_PROGRAM);

			vertexShader->setSource(vertexProgSource);

			if (shaderLanguage == "hlsl")
			{
				vertexShader->setParameter("target", "vs_1_1");
				vertexShader->setParameter("entry_point", "main");
			}
			else if(shaderLanguage == "cg")
			{
				vertexShader->setParameter("profiles", "vs_1_1 arbvp1");
				vertexShader->setParameter("entry_point", "main");
			}
			// GLSL can only have one entry point "main".

			vertexShader->load();
		}

		//Now that the shader is ready to be applied, apply it
		StringUtil::StrStreamType materialSignature;
		materialSignature << "BatchMat|";
		materialSignature << mat->getName() << "|";
		if (fadeEnabled){
			materialSignature << visibleDist << "|";
			materialSignature << invisibleDist << "|";
		}

		//Search for the desired material
		MaterialPtr generatedMaterial = MaterialManager::getSingleton().getByName(materialSignature.str());
		if (generatedMaterial.isNull()){
			//Clone the material
			generatedMaterial = mat->clone(materialSignature.str());

			//And apply the fade shader
			for (unsigned short t = 0; t < generatedMaterial->getNumTechniques(); ++t){
				Technique *tech = generatedMaterial->getTechnique(t);
				for (unsigned short p = 0; p < tech->getNumPasses(); ++p){
					Pass *pass = tech->getPass(p);

					//Setup vertex program
					if (pass->getVertexProgramName() == "")
						pass->setVertexProgram(vertexProgName);

					try{
						GpuProgramParametersSharedPtr params = pass->getVertexProgramParameters();

						if (lightingEnabled) {
							params->setNamedAutoConstant("objSpaceLight", GpuProgramParameters::ACT_LIGHT_POSITION_OBJECT_SPACE);
							params->setNamedAutoConstant("lightDiffuse", GpuProgramParameters::ACT_DERIVED_LIGHT_DIFFUSE_COLOUR);
							params->setNamedAutoConstant("lightAmbient", GpuProgramParameters::ACT_DERIVED_AMBIENT_LIGHT_COLOUR);
							//params->setNamedAutoConstant("matAmbient", GpuProgramParameters::ACT_SURFACE_AMBIENT_COLOUR);
						}

						if(shaderLanguage.compare("glsl"))
						{
							//glsl can use the built in gl_ModelViewProjectionMatrix
							params->setNamedAutoConstant("worldViewProj", GpuProgramParameters::ACT_WORLDVIEWPROJ_MATRIX);
						}

						if (fadeEnabled)
						{
							params->setNamedAutoConstant("camPos", GpuProgramParameters::ACT_CAMERA_POSITION_OBJECT_SPACE);

							//Set fade ranges
							params->setNamedAutoConstant("invisibleDist", GpuProgramParameters::ACT_CUSTOM);
							params->setNamedConstant("invisibleDist", invisibleDist);

							params->setNamedAutoConstant("fadeGap", GpuProgramParameters::ACT_CUSTOM);
							params->setNamedConstant("fadeGap", invisibleDist - visibleDist);

							if (pass->getAlphaRejectFunction() == CMPF_ALWAYS_PASS)
								pass->setSceneBlending(SBT_TRANSPARENT_ALPHA);
						}
					}
					catch (...) {
						OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, "Error configuring batched geometry transitions. If you're using materials with custom vertex shaders, they will need to implement fade transitions to be compatible with BatchPage.", "BatchPage::_updateShaders()");
					}
				}
			}

		}

		//Apply the material
		subBatch->setMaterial(generatedMaterial);
	}

}
Example #20
0
bool StringUtils::endsWith(const String& src, const String& substr)
{
    return src.compare(src.length()-substr.length(), substr.length(), substr) == 0;
}
    //---------------------------------------------------------------------
    void GLSLLinkProgramManager::extractUniforms(GLhandleARB programObject, 
        const GpuConstantDefinitionMap* vertexConstantDefs, 
        const GpuConstantDefinitionMap* geometryConstantDefs,
        const GpuConstantDefinitionMap* fragmentConstantDefs,
        GLUniformReferenceList& list)
    {
        // scan through the active uniforms and add them to the reference list
        GLint uniformCount = 0;

        #define BUFFERSIZE 200
        char   uniformName[BUFFERSIZE] = "";
        //GLint location;
        GLUniformReference newGLUniformReference;

        // get the number of active uniforms
        glGetObjectParameterivARB(programObject, GL_OBJECT_ACTIVE_UNIFORMS_ARB,
            &uniformCount);

        // Loop over each of the active uniforms, and add them to the reference container
        // only do this for user defined uniforms, ignore built in gl state uniforms
        for (int index = 0; index < uniformCount; index++)
        {
            GLint arraySize = 0;
            GLenum glType;
            glGetActiveUniformARB(programObject, index, BUFFERSIZE, NULL, 
                &arraySize, &glType, uniformName);
            // don't add built in uniforms
            newGLUniformReference.mLocation = glGetUniformLocationARB(programObject, uniformName);
            if (newGLUniformReference.mLocation >= 0)
            {
                // user defined uniform found, add it to the reference list
                String paramName = String( uniformName );

                // Current ATI drivers (Catalyst 7.2 and earlier) and older NVidia drivers will include all array elements as uniforms but we only want the root array name and location
                // Also note that ATI Catalyst 6.8 to 7.2 there is a bug with glUniform that does not allow you to update a uniform array past the first uniform array element
                // ie you can't start updating an array starting at element 1, must always be element 0.

                // if the uniform name has a "[" in it then its an array element uniform.
                String::size_type arrayStart = paramName.find("[");
                if (arrayStart != String::npos)
                {
                    // if not the first array element then skip it and continue to the next uniform
                    if (paramName.compare(arrayStart, paramName.size() - 1, "[0]") != 0) continue;
                    paramName = paramName.substr(0, arrayStart);
                }

                // find out which params object this comes from
                bool foundSource = completeParamSource(paramName,
                        vertexConstantDefs, geometryConstantDefs, fragmentConstantDefs, newGLUniformReference);

                // only add this parameter if we found the source
                if (foundSource)
                {
                    assert(size_t (arraySize) == newGLUniformReference.mConstantDef->arraySize
                            && "GL doesn't agree with our array size!");
                    list.push_back(newGLUniformReference);
                }

                // Don't bother adding individual array params, they will be
                // picked up in the 'parent' parameter can copied all at once
                // anyway, individual indexes are only needed for lookup from
                // user params
            } // end if
        } // end for

    }
Example #22
0
int StringUtils::compareIC(const String& src, const String& str)
{
    String srcLow = StringUtils::toLower(src);
    String strLow = StringUtils::toLower(str);
    return srcLow.compare(strLow);
}
Example #23
0
 static int compareElements (String& first, String& second)      { return first.compare (second); }
Example #24
0
bool WddxPacket::recursiveAddVar(const String& varName,
                                 const Variant& varVariant,
                                 bool hasVarTag) {

  bool isArray = varVariant.isArray();
  bool isObject = varVariant.isObject();

  if (isArray || isObject) {
    if (hasVarTag) {
      m_packetString.append("<var name='");
      m_packetString.append(varName.data());
      m_packetString.append("'>");
    }

    Array varAsArray;
    Object varAsObject = varVariant.toObject();
    if (isArray) varAsArray = varVariant.toArray();
    if (isObject) varAsArray = varAsObject.toArray();

    int length = varAsArray.length();
    if (length > 0) {
      ArrayIter it = ArrayIter(varAsArray);
      if (it.first().isString()) isObject = true;
      if (isObject) {
        m_packetString.append("<struct>");
        if (!isArray) {
          m_packetString.append("<var name='php_class_name'><string>");
          m_packetString.append(varAsObject->getClassName());
          m_packetString.append("</string></var>");
        }
      } else {
        m_packetString.append("<array length='");
        m_packetString.append(std::to_string(length));
        m_packetString.append("'>");
      }
      for (ArrayIter it(varAsArray); it; ++it) {
        Variant key = it.first();
        Variant value = it.second();
        recursiveAddVar(key.toString(), value, isObject);
      }
      if (isObject) {
        m_packetString.append("</struct>");
      }
      else {
        m_packetString.append("</array>");
      }
    }
    else {
      //empty object
      if (isObject) {
        m_packetString.append("<struct>");
        if (!isArray) {
          m_packetString.append("<var name='php_class_name'><string>");
          m_packetString.append(varAsObject->getClassName());
          m_packetString.append("</string></var>");
        }
        m_packetString.append("</struct>");
      }
    }
    if (hasVarTag) {
      m_packetString.append("</var>");
    }
    return true;
  }

  String varType = getDataTypeString(varVariant.getType());
  if (!getWddxEncoded(varType, "", varName, false).empty()) {
    String varValue;
    if (varType.compare("boolean") == 0) {
      varValue = varVariant.toBoolean() ? "true" : "false";
    } else {
      varValue = StringUtil::HtmlEncode(varVariant.toString(),
                                        StringUtil::QuoteStyle::Double,
                                        "UTF-8", false, false).toCppString();
    }
    m_packetString.append(
      getWddxEncoded(varType, varValue, varName, hasVarTag));
    return true;
  }

  return false;
}
Example #25
0
bool	operator<(const String& str1, const String& str2)
{
	return (str1.compare(str2) < 0);
}
int main( int argc, const char** argv )
{
    CvCapture* capture = 0;
    Mat frame, frameCopy, image;
    const String scaleOpt = "--scale=";
    size_t scaleOptLen = scaleOpt.length();
    const String cascadeOpt = "--cascade=";
    size_t cascadeOptLen = cascadeOpt.length();
    const String nestedCascadeOpt = "--nested-cascade";
    size_t nestedCascadeOptLen = nestedCascadeOpt.length();
    String inputName;

    help();

    CascadeClassifier cascade, nestedCascade;
    double scale = 1;

    for( int i = 1; i < argc; i++ )
    {
        cout << "Processing " << i << " " <<  argv[i] << endl;
        if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
        {
            cascadeName.assign( argv[i] + cascadeOptLen );
            cout << "  from which we have cascadeName= " << cascadeName << endl;
        }
        else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
        {
            if( argv[i][nestedCascadeOpt.length()] == '=' )
                nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
            if( !nestedCascade.load( nestedCascadeName ) )
                cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
        }
        else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
        {
            if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
                scale = 1;
            cout << " from which we read scale = " << scale << endl;
        }
        else if( argv[i][0] == '-' )
        {
            cerr << "WARNING: Unknown option %s" << argv[i] << endl;
        }
        else
            inputName.assign( argv[i] );
    }

    if( !cascade.load( cascadeName ) )
    {
        cerr << "ERROR: Could not load classifier cascade" << endl;
        cerr << "Usage: facedetect [--cascade=<cascade_path>]\n"
             "   [--nested-cascade[=nested_cascade_path]]\n"
             "   [--scale[=<image scale>\n"
             "   [filename|camera_index]\n" << endl ;
        return -1;
    }

    if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
    {
        capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
        int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
        if(!capture) cout << "Capture from CAM " <<  c << " didn't work" << endl;
    }
    else if( inputName.size() )
    {
        image = imread( inputName, 1 );
        if( image.empty() )
        {
            capture = cvCaptureFromAVI( inputName.c_str() );
            if(!capture) cout << "Capture from AVI didn't work" << endl;
        }
    }
    else
    {
        image = imread( "lena.jpg", 1 );
        if(image.empty()) cout << "Couldn't read lena.jpg" << endl;
    }

    cvNamedWindow( "result", 1 );

    if( capture )
    {
        cout << "In capture ..." << endl;
        for(;;)
        {
            IplImage* iplImg = cvQueryFrame( capture );
            frame = iplImg;
            if( frame.empty() )
                break;
            if( iplImg->origin == IPL_ORIGIN_TL )
                frame.copyTo( frameCopy );
            else
                flip( frame, frameCopy, 0 );

            detectAndDraw( frameCopy, cascade, nestedCascade, scale );

            if( waitKey( 10 ) >= 0 )
                goto _cleanup_;
        }

        waitKey(0);

_cleanup_:
        cvReleaseCapture( &capture );
    }
    else
    {
        cout << "In image read" << endl;
        if( !image.empty() )
        {
            detectAndDraw( image, cascade, nestedCascade, scale );
            waitKey(0);
        }
        else if( !inputName.empty() )
        {
            /* assume it is a text file containing the
            list of the image filenames to be processed - one per line */
            FILE* f = fopen( inputName.c_str(), "rt" );
            if( f )
            {
                char buf[1000+1];
                while( fgets( buf, 1000, f ) )
                {
                    int len = (int)strlen(buf), c;
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';
                    cout << "file " << buf << endl;
                    image = imread( buf, 1 );
                    if( !image.empty() )
                    {
                        detectAndDraw( image, cascade, nestedCascade, scale );
                        c = waitKey(0);
                        if( c == 27 || c == 'q' || c == 'Q' )
                            break;
                    }
                    else
                    {
                        cerr << "Aw snap, couldn't read image " << buf << endl;
                    }
                }
                fclose(f);
            }
        }
    }

    cvDestroyWindow("result");

    return 0;
}
Example #27
0
bool	operator>=(const std::string& std_str, const String& str)
{
	return (str.compare(std_str) <= 0);
}
Example #28
0
 static int compareElements (String const s, Device const* const rhs)
 {
   return s.compare (rhs->getName ());
 }
Example #29
0
bool operator<(const String& str, const char* c_str)
{
	return (str.compare(c_str) < 0);
}
Example #30
0
bool GFXD3D9Shader::_compileShader( const Torque::Path &filePath, 
                                    const String& target,                                  
                                    const D3DXMACRO *defines, 
                                    GenericConstBufferLayout* bufferLayoutF, 
                                    GenericConstBufferLayout* bufferLayoutI,
                                    Vector<GFXShaderConstDesc> &samplerDescriptions )
{
   PROFILE_SCOPE( GFXD3D9Shader_CompileShader );

   HRESULT res = D3DERR_INVALIDCALL;
   LPD3DXBUFFER code = NULL;
   LPD3DXBUFFER errorBuff = NULL;

#ifdef TORQUE_DEBUG
   U32 flags = D3DXSHADER_DEBUG;
#else
   U32 flags = 0;
#endif

#ifdef TORQUE_OS_XENON
   flags |= D3DXSHADER_PREFER_FLOW_CONTROL;
#endif

#ifdef D3DXSHADER_USE_LEGACY_D3DX9_31_DLL
   if( D3DX_SDK_VERSION >= 32 )
   {
      // will need to use old compiler for 1_1 shaders - check for pixel
      // or vertex shader with appropriate version.
      if ((target.compare("vs1", 3) == 0) || (target.compare("vs_1", 4) == 0))      
         flags |= D3DXSHADER_USE_LEGACY_D3DX9_31_DLL;

      if ((target.compare("ps1", 3) == 0) || (target.compare("ps_1", 4) == 0))
         flags |= D3DXSHADER_USE_LEGACY_D3DX9_31_DLL;
   }
#endif

#if !defined(TORQUE_OS_XENON) && (D3DX_SDK_VERSION <= 40)
#error This version of the DirectX SDK is too old. Please install a newer version of the DirectX SDK: http://msdn.microsoft.com/en-us/directx/default.aspx
#endif

   ID3DXConstantTable* table = NULL;

   static String sHLSLStr( "hlsl" );
   static String sOBJStr( "obj" );

   // Is it an HLSL shader?
   if ( filePath.getExtension().equal(sHLSLStr, String::NoCase) )   
   {
      FrameAllocatorMarker fam;
      char *buffer = NULL;

      // Set this so that the D3DXInclude::Open will have this 
      // information for relative paths.
      smD3DXInclude->setPath( filePath.getRootAndPath() );

      FileStream s;
      if ( !s.open( filePath, Torque::FS::File::Read ) )
      {
         AssertISV(false, avar("GFXD3D9Shader::initShader - failed to open shader '%s'.", filePath.getFullPath().c_str()));

         if ( smLogErrors )
            Con::errorf( "GFXD3D9Shader::_compileShader - Failed to open shader file '%s'.", 
               filePath.getFullPath().c_str() );

         return false;
      }

      // Convert the path which might have virtualized
      // mount paths to a real file system path.
      Torque::Path realPath;
      if ( !FS::GetFSPath( filePath, realPath ) )
         realPath = filePath;

      // Add a #line pragma so that error and warning messages
      // returned by the HLSL compiler report the right file.
      String linePragma = String::ToString( "#line 1 \"%s\"\r\n", realPath.getFullPath().c_str() );
      U32 linePragmaLen = linePragma.length();

      U32 bufSize = s.getStreamSize();
      buffer = (char *)fam.alloc( bufSize + linePragmaLen + 1 );
      dStrncpy( buffer, linePragma.c_str(), linePragmaLen );
      s.read( bufSize, buffer + linePragmaLen );
      buffer[bufSize+linePragmaLen] = 0;

      res = GFXD3DX.D3DXCompileShader( buffer, bufSize + linePragmaLen, defines, smD3DXInclude, "main", 
         target, flags, &code, &errorBuff, &table );
   }

   // Is it a precompiled obj shader?
   else if ( filePath.getExtension().equal( sOBJStr, String::NoCase ) )
   {     
      FileStream  s;
      if(!s.open(filePath, Torque::FS::File::Read))
      {
         AssertISV(false, avar("GFXD3D9Shader::initShader - failed to open shader '%s'.", filePath.getFullPath().c_str()));

         if ( smLogErrors )
            Con::errorf( "GFXD3D9Shader::_compileShader - Failed to open shader file '%s'.", 
               filePath.getFullPath().c_str() );

         return false;
      }

      res = GFXD3DX.D3DXCreateBuffer(s.getStreamSize(), &code);
      AssertISV(res == D3D_OK, "Unable to create buffer!");
      s.read(s.getStreamSize(), code->GetBufferPointer());
      
      if (res == D3D_OK)
      {
         DWORD* data = (DWORD*) code->GetBufferPointer();
         res = GFXD3DX.D3DXGetShaderConstantTable(data, &table);
      }
   }
   else
   {
      if ( smLogErrors )
         Con::errorf( "GFXD3D9Shader::_compileShader - Unsupported shader file type '%s'.", 
            filePath.getFullPath().c_str() );

      return false;
   }

   if ( res != D3D_OK && smLogErrors )
      Con::errorf( "GFXD3D9Shader::_compileShader - Error compiling shader: %s: %s (%x)", 
         DXGetErrorStringA(res), DXGetErrorDescriptionA(res), res );

   if ( errorBuff )
   {
      // remove \n at end of buffer
      U8 *buffPtr = (U8*) errorBuff->GetBufferPointer();
      U32 len = dStrlen( (const char*) buffPtr );
      buffPtr[len-1] = '\0';

      if( res != D3D_OK )
      {
         if ( smLogErrors )
            Con::errorf( "   %s", (const char*) errorBuff->GetBufferPointer() );
      }
      else
      {
         if ( smLogWarnings )
            Con::warnf( "%s", (const char*) errorBuff->GetBufferPointer() );
      }
   }
   else if ( code == NULL && smLogErrors )
      Con::errorf( "GFXD3D9Shader::_compileShader - no compiled code produced; possibly missing file '%s'.", 
         filePath.getFullPath().c_str() );

   // Create the proper shader if we have code
   if( code != NULL )
   {
      #ifndef TORQUE_SHIPPING

         LPD3DXBUFFER disassem = NULL;
         D3DXDisassembleShader( (DWORD*)code->GetBufferPointer(), false, NULL, &disassem );
         mDissasembly = (const char*)disassem->GetBufferPointer();         
         SAFE_RELEASE( disassem );

         if ( gDisassembleAllShaders )
         {
             String filename = filePath.getFullPath();
            filename.replace( ".hlsl", "_dis.txt" );

            FileStream *fstream = FileStream::createAndOpen( filename, Torque::FS::File::Write );
            if ( fstream )
            {            
               fstream->write( mDissasembly );
               fstream->close();
               delete fstream;   
            }
         }

      #endif

      if (target.compare("ps_", 3) == 0)      
         res = mD3D9Device->CreatePixelShader( (DWORD*)code->GetBufferPointer(), &mPixShader );
      else
         res = mD3D9Device->CreateVertexShader( (DWORD*)code->GetBufferPointer(), &mVertShader );

      if (res == S_OK)
         _getShaderConstants(table, bufferLayoutF, bufferLayoutI, samplerDescriptions);

#ifdef TORQUE_ENABLE_CSF_GENERATION

      // Ok, we've got a valid shader and constants, let's write them all out.
      if ( !_saveCompiledOutput(filePath, code, bufferLayoutF, bufferLayoutI) && smLogErrors )
         Con::errorf( "GFXD3D9Shader::_compileShader - Unable to save shader compile output for: %s", 
            filePath.getFullPath().c_str() );

#endif

      SAFE_RELEASE(table);

      if ( res != S_OK && smLogErrors )
         Con::errorf( "GFXD3D9Shader::_compileShader - Unable to create shader for '%s'.", 
            filePath.getFullPath().c_str() );
   }

   bool result = code != NULL && res == S_OK;

   SAFE_RELEASE( code );
   SAFE_RELEASE( errorBuff );

   return result;
}