bool Process::Run (/*[in]*/ const PathName & fileName, /*[in]*/ const char * lpszArguments, /*[out]*/ IRunProcessCallback * pCallback, /*[out]*/ int * pExitCode, /*[in]*/ const char * lpszWorkingDirectory) { MIKTEX_ASSERT_STRING_OR_NIL (lpszArguments); MIKTEX_ASSERT_STRING_OR_NIL (lpszWorkingDirectory); ProcessStartInfo startinfo; startinfo.FileName = fileName.Get(); if (lpszArguments != 0) { startinfo.Arguments = lpszArguments; } startinfo.StandardInput = 0; startinfo.RedirectStandardInput = false; startinfo.RedirectStandardOutput = (pCallback != 0); startinfo.RedirectStandardError = false; if (lpszWorkingDirectory != 0) { startinfo.WorkingDirectory = lpszWorkingDirectory; } auto_ptr<Process> pProcess (Process::Start(startinfo)); if (pCallback != 0) { SessionImpl::GetSession()->trace_process->WriteLine ("core", T_("start reading the pipe")); const size_t CHUNK_SIZE = 64; char buf[ CHUNK_SIZE ]; bool cancelled = false; FileStream stdoutStream (pProcess->get_StandardOutput()); size_t total = 0; while (! cancelled && feof(stdoutStream.Get()) == 0) { size_t n = fread(buf, 1, CHUNK_SIZE, stdoutStream.Get()); int err = ferror(stdoutStream.Get()); if (err != 0 && err != EPIPE) { FATAL_CRT_ERROR ("fread", 0); } // pass output to caller total += n; cancelled = ! pCallback->OnProcessOutput(buf, n); } SessionImpl::GetSession()->trace_process->WriteFormattedLine ("core", T_("read %u bytes from the pipe"), static_cast<unsigned>(total)); } // wait for the process to finish pProcess->WaitForExit (); // get the exit code & close process int exitCode = pProcess->get_ExitCode(); pProcess->Close (); if (pExitCode != 0) { *pExitCode = exitCode; return (true); } else if (exitCode == 0) { return (true); } else { TraceError (T_("%s returned %d"), Q_(fileName), static_cast<int>(exitCode)); return (false); } }
/// @copydoc ResourceHandler::CacheResource() bool ShaderResourceHandler::CacheResource( AssetPreprocessor* pAssetPreprocessor, Resource* pResource, const String& rSourceFilePath ) { HELIUM_ASSERT( pAssetPreprocessor ); HELIUM_ASSERT( pResource ); const Shader* pShader = Reflect::AssertCast< const Shader >( pResource ); AssetPath shaderPath = pShader->GetPath(); HELIUM_TRACE( TraceLevels::Info, TXT( "ShaderResourceHandler: Caching \"%s\".\n" ), *shaderPath.ToString() ); DefaultAllocator allocator; FileStream* pSourceFileStream = FileStream::OpenFileStream( rSourceFilePath, FileStream::MODE_READ ); if( !pSourceFileStream ) { HELIUM_TRACE( TraceLevels::Error, TXT( "ShaderResourceHandler: Source file for shader resource \"%s\" failed to open properly.\n" ), *shaderPath.ToString() ); return false; } // Load the entire shader resource into memory. int64_t size64 = pSourceFileStream->GetSize(); HELIUM_ASSERT( size64 != -1 ); HELIUM_ASSERT( static_cast< uint64_t >( size64 ) <= static_cast< size_t >( -1 ) ); if( size64 > static_cast< uint64_t >( static_cast< size_t >( -1 ) ) ) { HELIUM_TRACE( TraceLevels::Error, ( TXT( "ShaderResourceHandler: Source file for shader resource \"%s\" is too large to fit into " ) TXT( "memory for preprocessing.\n" ) ), *shaderPath.ToString() ); delete pSourceFileStream; return false; } size_t size = static_cast< size_t >( size64 ); void* pShaderData = allocator.Allocate( size ); HELIUM_ASSERT( pShaderData ); if( !pShaderData ) { HELIUM_TRACE( TraceLevels::Error, ( TXT( "ShaderResourceHandler: Failed to allocate %" ) PRIuSZ TXT( " bytes for loading the source " ) TXT( "data of \"%s\" for preprocessing.\n" ) ), size, *shaderPath.ToString() ); delete pSourceFileStream; return false; } BufferedStream( pSourceFileStream ).Read( pShaderData, 1, size ); delete pSourceFileStream; // Parse all preprocessor toggle and selection tokens from the shader source. StrongPtr< Shader::PersistentResourceData > resourceData( new Shader::PersistentResourceData() ); const char* pLineEnd = static_cast< const char* >( pShaderData ); const char* pShaderEnd = pLineEnd + size; do { const char* pLineStart = pLineEnd; while( pLineEnd < pShaderEnd ) { char character = *pLineEnd; if( character == '\n' || character == '\r' ) { break; } ++pLineEnd; } ParseLine( shaderPath, *resourceData, pLineStart, pLineEnd ); while( pLineEnd < pShaderEnd ) { char character = *pLineEnd; if( character != '\n' && character != '\r' ) { break; } ++pLineEnd; } } while( pLineEnd < pShaderEnd ); allocator.Free( pShaderData ); // Serialize the persistent shader resource data for each platform. for( size_t platformIndex = 0; platformIndex < static_cast< size_t >( Cache::PLATFORM_MAX ); ++platformIndex ) { PlatformPreprocessor* pPreprocessor = pAssetPreprocessor->GetPlatformPreprocessor( static_cast< Cache::EPlatform >( platformIndex ) ); if( !pPreprocessor ) { continue; } Resource::PreprocessedData& rPreprocessedData = pResource->GetPreprocessedData( static_cast< Cache::EPlatform >( platformIndex ) ); SaveObjectToPersistentDataBuffer(resourceData.Get(), rPreprocessedData.persistentDataBuffer); rPreprocessedData.subDataBuffers.Resize( 0 ); rPreprocessedData.bLoaded = true; } return true; }
// ReceiveFileData //------------------------------------------------------------------------------ bool ToolManifest::ReceiveFileData( uint32_t fileId, const void * data, size_t & dataSize ) { MutexHolder mh( m_Mutex ); File & f = m_Files[ fileId ]; // gracefully handle multiple receipts of the same data if ( f.m_Content ) { ASSERT( f.m_SyncState == File::SYNCHRONIZED ); return true; } ASSERT( f.m_SyncState == File::SYNCHRONIZING ); // prepare name for this file AStackString<> fileName; GetRemoteFilePath( fileId, fileName ); // prepare destination AStackString<> pathOnly( fileName.Get(), fileName.FindLast( NATIVE_SLASH ) ); if ( !FileIO::EnsurePathExists( pathOnly ) ) { return false; // FAILED } // write to disk FileStream fs; if ( !fs.Open( fileName.Get(), FileStream::WRITE_ONLY ) ) { return false; // FAILED } if ( fs.Write( data, dataSize ) != dataSize ) { return false; // FAILED } fs.Close(); // open read-only AutoPtr< FileStream > fileStream( FNEW( FileStream ) ); if ( fileStream.Get()->Open( fileName.Get(), FileStream::READ_ONLY ) == false ) { return false; // FAILED } // This file is now synchronized f.m_FileLock = fileStream.Release(); // NOTE: Keep file open to prevent deletion f.m_SyncState = File::SYNCHRONIZED; // is completely synchronized? const File * const end = m_Files.End(); for ( const File * it = m_Files.Begin(); it != end; ++it ) { if ( it->m_SyncState != File::SYNCHRONIZED ) { // still some files to be received return true; // file stored ok } } // all files received m_Synchronized = true; return true; // file stored ok }
// Extracts specified files from zip void ZipFile::Extract(String* FileSpec, String* DestPath, UpdateOption OverwriteWhen, Boolean RecurseSubdirectories, PathInclusion CreatePathMethod) { // Any file spec in destination path will be ignored DestPath = JustPath(DestPath); char* pszPassword = NULL; char* pszFileName = NULL; OpenFileForUnzip(); // Just wrapping in a try/catch/finally to make sure unmanaged code allocations always get released try { Regex* filePattern = GetFilePatternRegularExpression(FileSpec, caseSensitive); IEnumerator* filesEnum = files->GetEnumerator(); CompressedFile* file; String* sourceFileName; String* destFileName; bool writeFile; int err; // Get ANSI password, if one was provided if (password) if (password->get_Length()) pszPassword = StringToCharBuffer(password); // Loop through compressed file collection while (filesEnum->MoveNext()) { file = static_cast<CompressedFile*>(filesEnum->Current); sourceFileName = file->get_FileName(); if (filePattern->IsMatch(GetSearchFileName(FileSpec, sourceFileName, RecurseSubdirectories))) { pszFileName = StringToCharBuffer(sourceFileName); err = unzLocateFile(hUnzipFile, pszFileName, (caseSensitive ? 1 : 2)); free(pszFileName); pszFileName = NULL; // We should find file in zip file if it was in our compressed file collection if (err != Z_OK) throw new CompressionException(String::Concat(S"Extract Zip File Error: Compressed file \"", sourceFileName, "\" cannot be found in zip file!")); // Open compressed file for unzipping if (pszPassword) err = unzOpenCurrentFilePassword(hUnzipFile, pszPassword); else err = unzOpenCurrentFile(hUnzipFile); if (err != Z_OK) throw new CompressionException(S"Extract Zip File", err); // Get full destination file name switch (CreatePathMethod) { case PathInclusion::FullPath: destFileName = sourceFileName; break; case PathInclusion::NoPath: destFileName = String::Concat(DestPath, Path::GetFileName(sourceFileName)); break; case PathInclusion::RelativePath: destFileName = String::Concat(DestPath, sourceFileName); break; } // Make sure destination directory exists Directory::CreateDirectory(JustPath(destFileName)); // See if destination file already exists if (File::Exists(destFileName)) { DateTime lastUpdate = File::GetLastWriteTime(destFileName); switch (OverwriteWhen) { case UpdateOption::Never: writeFile = false; break; case UpdateOption::Always: writeFile = true; break; case UpdateOption::ZipFileIsNewer: writeFile = (DateTime::Compare(file->get_FileDateTime(), lastUpdate) > 0); break; case UpdateOption::DiskFileIsNewer: writeFile = (DateTime::Compare(file->get_FileDateTime(), lastUpdate) < 0); break; default: writeFile = false; break; } } else writeFile = true; if (writeFile) { System::Byte buffer[] = new System::Byte[BufferSize]; System::Byte __pin * destBuff = &buffer[0]; // pin buffer so it can be safely passed into unmanaged code... FileStream* fileStream = File::Create(destFileName); int read; __int64 total = 0, len = -1; // Send initial progress event len = file->get_UncompressedSize(); CurrentFile(destFileName, sourceFileName); FileProgress(0, len); // Read initial buffer read = unzReadCurrentFile(hUnzipFile, destBuff, buffer->get_Length()); if (read < 0) throw new CompressionException(S"Extract Zip File", read); while (read) { // Write data to file stream, fileStream->Write(buffer, 0, read); // Raise progress event total += read; FileProgress(total, len); // Read next buffer from source file stream read = unzReadCurrentFile(hUnzipFile, destBuff, buffer->get_Length()); if (read < 0) throw new CompressionException(S"Extract Zip File", read); } fileStream->Close(); } // Close compressed file unzCloseCurrentFile(hUnzipFile); } } } catch (CompressionException* ex) { // We just rethrow any errors back to user throw ex; } catch (Exception* ex) { throw ex; } __finally { if (pszPassword) free(pszPassword); if (pszFileName) free(pszFileName); } }
long OggSoundFile::cb_tell(void* source) { FileStream *file = static_cast<FileStream*>(source); return file->tell(); }
bool storeModule( Options& options, Module* mod ) { // this is the base path for the module Path modPath( mod->path() ); Path tgtPath; tgtPath.setFullLocation( options.m_sTargetDir ); // normalize module path while( modPath.getFullLocation().startsWith("./") ) { modPath.setFullLocation( modPath.getFullLocation().subString(2) ); } message( String("Processing module ").A( modPath.get() ) ); // strip the main script path from the module path. String modloc = modPath.getFullLocation(); if ( modloc.find( options.m_sMainScriptPath ) == 0 ) { // The thing came from below the main script. modloc = modloc.subString(options.m_sMainScriptPath.length() ); if ( modloc != "" && modloc.getCharAt(0) == '/' ) { modloc = modloc.subString(1); } tgtPath.setFullLocation( tgtPath.get() + "/" + modloc ); } else { // if it's coming from somewhere else in the loadpath hierarcy, // we must store it below the topmost dir. tgtPath.setFullLocation( tgtPath.get() + "/" + options.m_sSystemRoot ); // Find the path in LoadPath that caused this module to load, // strip it away and reproduce it below the SystemRoot. // For example, strip /usr/lib/falcon/ from system scripts. std::vector<String> paths; splitPaths( options.m_sLoadPath, paths ); for( uint32 i = 0; i < paths.size(); ++i ) { if( modloc.startsWith( paths[i] ) ) { String sSysPath = modloc.subString( paths[i].size() + 1 ); if( sSysPath != "" ) { tgtPath.setFullLocation( tgtPath.get() + "/" + sSysPath ); } break; } } } // store it int fsStatus; if ( ! Sys::fal_mkdir( tgtPath.getFullLocation(), fsStatus, true ) ) { error( String("Can't create ") + tgtPath.getFullLocation() ); return false; } tgtPath.setFilename( modPath.getFilename() ); // should we store just sources, just fam or both? if( modPath.getExtension() != "fam" && modPath.getExtension() != DllLoader::dllExt() ) { // it's a source file. if ( ! options.m_bStripSources ) { if( ! copyFile( modPath.get(), tgtPath.get() ) ) { error( String("Can't copy \"") + modPath.get() + "\" into \"" + tgtPath.get() + "\"" ); return false; } } // should we save the fam? if( options.m_bStripSources || options.m_bPackFam ) { tgtPath.setExtension("fam"); FileStream famFile; if ( ! famFile.create( tgtPath.get(), (Falcon::BaseFileStream::t_attributes) 0644 ) || ! mod->save(&famFile) ) { error( "Can't create \"" + tgtPath.get() + "\"" ); return false; } famFile.flush(); famFile.close(); } } else { // just blindly copy everything else. if( ! copyFile( modPath.get(), tgtPath.get() ) ) { error( "Can't copy \"" + modPath.get() + "\" into \"" + tgtPath.get() + "\"" ); return false; } } // now copy .ftr files, if any. modPath.setExtension( "ftr" ); FileStat ftrStat; if ( Sys::fal_stats( modPath.get(), ftrStat ) ) { message( "Copying translation file " + modPath.get() ); tgtPath.setExtension( "ftr" ); // just blindly copy everything else. if( ! copyFile( modPath.get(), tgtPath.get() ) ) { warning( "Can't copy \"" + modPath.get() + "\" into \"" + tgtPath.get() + "\"\n" ); } } // Should we store .ftt as well? if ( ! options.m_bStripSources ) { copyFtr( modPath, tgtPath ); } // and now, the resources. std::vector<String> reslist; if( getAttribute( mod, "resources", reslist ) ) { for ( uint32 i = 0; i < reslist.size(); ++i ) { copyResource( options, reslist[i], modPath, tgtPath ); } } // and finally, the dynamic libraries associated with this module. std::vector<String> dynliblist; if( getAttribute( mod, "dynlib", dynliblist ) ) { copyDynlibs( options, mod->path(), dynliblist ); } return true; }
/// @copydoc ResourceHandler::CacheResource() bool ShaderVariantResourceHandler::CacheResource( ObjectPreprocessor* pObjectPreprocessor, Resource* pResource, const String& rSourceFilePath ) { HELIUM_ASSERT( pObjectPreprocessor ); HELIUM_ASSERT( pResource ); ShaderVariant* pVariant = Reflect::AssertCast< ShaderVariant >( pResource ); // Parse the shader type and user option index from the variant name. Name variantName = pVariant->GetName(); const tchar_t* pVariantNameString = *variantName; HELIUM_ASSERT( pVariantNameString ); tchar_t shaderTypeCharacter = pVariantNameString[ 0 ]; HELIUM_ASSERT( shaderTypeCharacter != TXT( '\0' ) ); RShader::EType shaderType; switch( shaderTypeCharacter ) { case TXT( 'v' ): { shaderType = RShader::TYPE_VERTEX; break; } case TXT( 'p' ): { shaderType = RShader::TYPE_PIXEL; break; } default: { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: Failed to determine shader type from the name of object " ) TXT( "\"%s\".\n" ) ), *pVariant->GetPath().ToString() ); return false; } } uint32_t userOptionIndex = 0; ++pVariantNameString; int parseResult; #if HELIUM_UNICODE #if HELIUM_CC_CL parseResult = swscanf_s( pVariantNameString, TXT( "%" ) TSCNu32, &userOptionIndex ); #else parseResult = swscanf( pVariantNameString, TXT( "%" ) TSCNu32, &userOptionIndex ); #endif #else #if HELIUM_CC_CL parseResult = sscanf_s( pVariantNameString, TXT( "%" ) TSCNu32, &userOptionIndex ); #else parseResult = sscanf( pVariantNameString, TXT( "%" ) TSCNu32, &userOptionIndex ); #endif #endif if( parseResult != 1 ) { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: Failed to parse user shader option set index from the name of " ) TXT( "option \"%s\".\n" ) ), *pVariant->GetPath().ToString() ); return false; } // Get the parent shader. Shader* pShader = Reflect::AssertCast< Shader >( pVariant->GetOwner() ); HELIUM_ASSERT( pShader ); HELIUM_ASSERT( pShader->GetAnyFlagSet( GameObject::FLAG_PRECACHED ) ); // Acquire the user preprocessor option set associated with the target shader type and user option set index. const Shader::Options& rUserOptions = pShader->GetUserOptions(); DynArray< Name > toggleNames; DynArray< Shader::SelectPair > selectPairs; rUserOptions.GetOptionSetFromIndex( shaderType, userOptionIndex, toggleNames, selectPairs ); DynArray< PlatformPreprocessor::ShaderToken > shaderTokens; size_t userToggleNameCount = toggleNames.GetSize(); for( size_t toggleNameIndex = 0; toggleNameIndex < userToggleNameCount; ++toggleNameIndex ) { PlatformPreprocessor::ShaderToken* pToken = shaderTokens.New(); HELIUM_ASSERT( pToken ); StringConverter< tchar_t, char >::Convert( pToken->name, *toggleNames[ toggleNameIndex ] ); pToken->definition = "1"; } size_t userSelectPairCount = selectPairs.GetSize(); for( size_t selectPairIndex = 0; selectPairIndex < userSelectPairCount; ++selectPairIndex ) { const Shader::SelectPair& rPair = selectPairs[ selectPairIndex ]; PlatformPreprocessor::ShaderToken* pToken = shaderTokens.New(); HELIUM_ASSERT( pToken ); StringConverter< tchar_t, char >::Convert( pToken->name, *rPair.name ); pToken->definition = "1"; pToken = shaderTokens.New(); HELIUM_ASSERT( pToken ); StringConverter< tchar_t, char >::Convert( pToken->name, *rPair.choice ); pToken->definition = "1"; } size_t userShaderTokenCount = shaderTokens.GetSize(); // Load the entire shader resource into memory. FileStream* pSourceFileStream = File::Open( rSourceFilePath, FileStream::MODE_READ ); if( !pSourceFileStream ) { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: Source file for shader variant resource \"%s\" failed to open " ) TXT( "properly.\n" ) ), *pVariant->GetPath().ToString() ); return false; } int64_t size64 = pSourceFileStream->GetSize(); HELIUM_ASSERT( size64 != -1 ); HELIUM_ASSERT( static_cast< uint64_t >( size64 ) <= static_cast< size_t >( -1 ) ); if( size64 > static_cast< uint64_t >( static_cast< size_t >( -1 ) ) ) { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: Source file for shader resource \"%s\" is too large to fit " ) TXT( "into memory for preprocessing.\n" ) ), *pShader->GetPath().ToString() ); delete pSourceFileStream; return false; } size_t size = static_cast< size_t >( size64 ); DefaultAllocator allocator; void* pShaderSource = allocator.Allocate( size ); HELIUM_ASSERT( pShaderSource ); if( !pShaderSource ) { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: Failed to allocate %" ) TPRIuSZ TXT( " bytes for loading the " ) TXT( "source data of \"%s\" for preprocessing.\n" ) ), size, *pShader->GetPath().ToString() ); delete pSourceFileStream; return false; } BufferedStream( pSourceFileStream ).Read( pShaderSource, 1, size ); delete pSourceFileStream; // Compile each variant of system options for each shader profile in each supported target platform. const Shader::Options& rSystemOptions = pShader->GetSystemOptions(); size_t systemOptionSetCount = rSystemOptions.ComputeOptionSetCount( shaderType ); if( systemOptionSetCount > UINT32_MAX ) { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: System option set count (%" ) TPRIuSZ TXT( ") in shader \"%s\" " ) TXT( "exceeds the maximum supported (%" ) TPRIuSZ TXT( ").\n" ) ), systemOptionSetCount, *pShader->GetPath().ToString(), static_cast< size_t >( UINT32_MAX ) ); allocator.Free( pShaderSource ); return false; } uint32_t systemOptionSetCount32 = static_cast< uint32_t >( systemOptionSetCount ); for( size_t platformIndex = 0; platformIndex < static_cast< size_t >( Cache::PLATFORM_MAX ); ++platformIndex ) { PlatformPreprocessor* pPreprocessor = pObjectPreprocessor->GetPlatformPreprocessor( static_cast< Cache::EPlatform >( platformIndex ) ); if( !pPreprocessor ) { continue; } Resource::PreprocessedData& rPreprocessedData = pVariant->GetPreprocessedData( static_cast< Cache::EPlatform >( platformIndex ) ); ShaderVariant::PersistentResourceData persistentResourceData; persistentResourceData.m_resourceCount = systemOptionSetCount32; SaveObjectToPersistentDataBuffer(&persistentResourceData, rPreprocessedData.persistentDataBuffer); size_t shaderProfileCount = pPreprocessor->GetShaderProfileCount(); size_t shaderCount = shaderProfileCount * systemOptionSetCount; DynArray< DynArray< uint8_t > >& rSubDataBuffers = rPreprocessedData.subDataBuffers; rSubDataBuffers.Reserve( shaderCount ); rSubDataBuffers.Resize( 0 ); rSubDataBuffers.Resize( shaderCount ); rSubDataBuffers.Trim(); rPreprocessedData.bLoaded = true; } // DynArray< uint8_t > compiledCodeBuffer; // DynArray< ShaderConstantBufferInfo > constantBuffers, pcSm4ConstantBuffers; // DynArray< ShaderSamplerInfo > samplerInputs; // DynArray< ShaderTextureInfo > textureInputs; CompiledShaderData csd_pc_sm4; for( size_t systemOptionSetIndex = 0; systemOptionSetIndex < systemOptionSetCount; ++systemOptionSetIndex ) { rSystemOptions.GetOptionSetFromIndex( shaderType, systemOptionSetIndex, toggleNames, selectPairs ); size_t systemToggleNameCount = toggleNames.GetSize(); for( size_t toggleNameIndex = 0; toggleNameIndex < systemToggleNameCount; ++toggleNameIndex ) { PlatformPreprocessor::ShaderToken* pToken = shaderTokens.New(); HELIUM_ASSERT( pToken ); StringConverter< tchar_t, char >::Convert( pToken->name, *toggleNames[ toggleNameIndex ] ); pToken->definition = "1"; } size_t systemSelectPairCount = selectPairs.GetSize(); for( size_t selectPairIndex = 0; selectPairIndex < systemSelectPairCount; ++selectPairIndex ) { const Shader::SelectPair& rPair = selectPairs[ selectPairIndex ]; PlatformPreprocessor::ShaderToken* pToken = shaderTokens.New(); HELIUM_ASSERT( pToken ); StringConverter< tchar_t, char >::Convert( pToken->name, *rPair.name ); pToken->definition = "1"; pToken = shaderTokens.New(); HELIUM_ASSERT( pToken ); StringConverter< tchar_t, char >::Convert( pToken->name, *rPair.choice ); pToken->definition = "1"; } // Compile for PC shader model 4 first so that we can get the constant buffer information. PlatformPreprocessor* pPreprocessor = pObjectPreprocessor->GetPlatformPreprocessor( Cache::PLATFORM_PC ); HELIUM_ASSERT( pPreprocessor ); csd_pc_sm4.compiledCodeBuffer.Resize( 0 ); bool bCompiled = CompileShader( pVariant, pPreprocessor, Cache::PLATFORM_PC, ShaderProfile::PC_SM4, shaderType, pShaderSource, size, shaderTokens, csd_pc_sm4.compiledCodeBuffer ); if( !bCompiled ) { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: Failed to compile shader for PC shader model 4, which is " ) TXT( "needed for reflection purposes. Additional shader targets will not be built.\n" ) ) ); } else { csd_pc_sm4.constantBuffers.Resize( 0 ); csd_pc_sm4.samplerInputs.Resize( 0 ); csd_pc_sm4.textureInputs.Resize( 0 ); bool bReadConstantBuffers = pPreprocessor->FillShaderReflectionData( ShaderProfile::PC_SM4, csd_pc_sm4.compiledCodeBuffer.GetData(), csd_pc_sm4.compiledCodeBuffer.GetSize(), csd_pc_sm4.constantBuffers, csd_pc_sm4.samplerInputs, csd_pc_sm4.textureInputs ); if( !bReadConstantBuffers ) { HELIUM_TRACE( TRACE_ERROR, ( TXT( "ShaderVariantResourceHandler: Failed to read reflection information for PC shader " ) TXT( "model 4. Additional shader targets will not be built.\n" ) ) ); } else { Resource::PreprocessedData& rPcPreprocessedData = pVariant->GetPreprocessedData( Cache::PLATFORM_PC ); DynArray< DynArray< uint8_t > >& rPcSubDataBuffers = rPcPreprocessedData.subDataBuffers; DynArray< uint8_t >& rPcSm4SubDataBuffer = rPcSubDataBuffers[ ShaderProfile::PC_SM4 * systemOptionSetCount + systemOptionSetIndex ]; Cache::WriteCacheObjectToBuffer(csd_pc_sm4, rPcSm4SubDataBuffer); // FOR EACH PLATFORM for( size_t platformIndex = 0; platformIndex < static_cast< size_t >( Cache::PLATFORM_MAX ); ++platformIndex ) { PlatformPreprocessor* pPreprocessor = pObjectPreprocessor->GetPlatformPreprocessor( static_cast< Cache::EPlatform >( platformIndex ) ); if( !pPreprocessor ) { continue; } // GET PLATFORM'S SUBDATA BUFFER Resource::PreprocessedData& rPreprocessedData = pVariant->GetPreprocessedData( static_cast< Cache::EPlatform >( platformIndex ) ); DynArray< DynArray< uint8_t > >& rSubDataBuffers = rPreprocessedData.subDataBuffers; size_t shaderProfileCount = pPreprocessor->GetShaderProfileCount(); for( size_t shaderProfileIndex = 0; shaderProfileIndex < shaderProfileCount; ++shaderProfileIndex ) { CompiledShaderData csd; // Already cached PC shader model 4... if( shaderProfileIndex == ShaderProfile::PC_SM4 && platformIndex == Cache::PLATFORM_PC ) { continue; } bCompiled = CompileShader( pVariant, pPreprocessor, platformIndex, shaderProfileIndex, shaderType, pShaderSource, size, shaderTokens, csd.compiledCodeBuffer ); if( !bCompiled ) { continue; } csd.constantBuffers = csd_pc_sm4.constantBuffers; csd.samplerInputs.Resize( 0 ); csd.textureInputs.Resize( 0 ); bReadConstantBuffers = pPreprocessor->FillShaderReflectionData( shaderProfileIndex, csd.compiledCodeBuffer.GetData(), csd.compiledCodeBuffer.GetSize(), csd.constantBuffers, csd.samplerInputs, csd.textureInputs ); if( !bReadConstantBuffers ) { continue; } DynArray< uint8_t >& rTargetSubDataBuffer = rSubDataBuffers[ shaderProfileIndex * systemOptionSetCount + systemOptionSetIndex ]; Cache::WriteCacheObjectToBuffer(csd, rTargetSubDataBuffer); } } } } // Trim the system tokens off the shader token list for the next pass. shaderTokens.Resize( userShaderTokenCount ); } allocator.Free( pShaderSource ); return true; }
void LOLExporter::ImportAnm( const String& anmFilename ) { FileStream file; if (!file.Open(anmFilename)) { printf("ERROR: could not open %s\n", anmFilename.c_str()); exit(1); } char id[8]; file.Read(id, sizeof(id)); uint32_t version = file.ReadUInt(); mLOLAnimation.Version = version; // Version 0, 1, 2, 3 Code if (version == 0 || version == 1 || version == 2 || version == 3) { uint32_t magic = file.ReadUInt(); uint32_t numBones = file.ReadUInt(); uint32_t numFrames = file.ReadUInt(); uint32_t playbackFPS = file.ReadUInt(); char nameBuffer[32]; // Read in all the bones mLOLAnimation.Clip.AnimationTracks.resize(numBones); for (uint32_t i = 0; i < numBones; ++i) { LOLAnimation::AnimationClip::AnimationTrack& animTrack = mLOLAnimation.Clip.AnimationTracks[i]; file.Read(nameBuffer, sizeof(nameBuffer)); animTrack.BoneName = nameBuffer; // Unknown uint32_t boneType = file.ReadUInt(); // For each bone, read in its value at each frame in the animation. animTrack.KeyFrames.resize(numFrames); for (LOLAnimation::AnimationClip::KeyFrame& frame : animTrack.KeyFrames) { // Read in the frame's quaternion. frame.Orientation[3] = file.ReadFloat(); // x frame.Orientation[1] = file.ReadFloat(); // y frame.Orientation[2] = file.ReadFloat(); // z frame.Orientation[0] = file.ReadFloat(); // w // Read in the frame's position. file.Read(&frame.Position, sizeof(float3)); } } } else if (version == 4) { uint32_t magic = file.ReadUInt(); // Not sure what any of these mean. float unknown = file.ReadFloat(); unknown = file.ReadFloat(); unknown = file.ReadFloat(); uint32_t numBones = file.ReadUInt(); uint32_t numFrames = file.ReadUInt(); uint32_t playbackFPS = (uint32_t)(1.0f / file.ReadFloat() + 0.5f); // These are offsets to specific data sections in the file. uint32_t unknownOffset = file.ReadUInt(); unknownOffset = file.ReadUInt(); unknownOffset = file.ReadUInt(); uint32_t positionOffset = file.ReadUInt(); uint32_t orientationOffset = file.ReadUInt(); uint32_t indexOffset = file.ReadUInt(); // These last three values are confusing. // They aren't a vector and they throw off the offset values // by 12 bytes. Just ignore them and keep reading. unknownOffset = file.ReadUInt(); unknownOffset = file.ReadUInt(); unknownOffset = file.ReadUInt(); // // Vector section. // std::vector<float> positions; uint32_t numPositions = (orientationOffset - positionOffset) / sizeof(float); for (uint32_t i = 0; i < numPositions; ++i) positions.push_back(file.ReadFloat()); // // Quaternion section. // std::vector<float> orientations; uint32_t numOrientations = (indexOffset - orientationOffset) / sizeof(float); for (uint32_t i = 0; i < numOrientations; ++i) orientations.push_back(file.ReadFloat()); // // Offset section. // // Note: Unlike versions 0-3, data in this version is // Frame 1: // Bone 1: // Bone 2: // ... // Frame 2: // Bone 1: // ... // //Dictionary<UInt32, ANMBone> boneMap = new Dictionary<UInt32, ANMBone>(); for (uint32_t i = 0; i < numBones; ++i) { // The first frame is a special case since we are allocating bones // as we read them in. // Read in the offset data. uint32_t boneID = file.ReadUInt(); uint16_t positionID = file.ReadUShort(); uint16_t unknownIndex = file.ReadUShort(); // Unknown. uint16_t orientationID = file.ReadUShort(); unknownIndex = file.ReadUShort(); // Unknown. Seems to always be zero. // Allocate the bone. //ANMBone bone = new ANMBone(); //bone.id = boneID; //// Allocate all the frames for the bone. //for (int j = 0; j < numBones; ++j) //{ // bone.frames.Add(new ANMFrame()); //} //// Retrieve the data for the first frame. //ANMFrame frame = bone.frames[0]; //frame.position = LookUpVector(positionID, positions); //frame.orientation = LookUpQuaternion(orientationID, orientations); //// Store the bone in the dictionary by bone ID. //boneMap[boneID] = bone; } } }
~CSVSamplerBackend() { mStream.close(); }
void LOLExporter::ImportSkl( const String& sklFilename ) { FileStream stream; if (!stream.Open(sklFilename)) { printf("ERROR: could not open %s\n", sklFilename.c_str()); exit(1); } char id[8]; stream.Read(id, 8); mLOLSkeleton.Version = stream.ReadUInt(); if (mLOLSkeleton.Version == 1 || mLOLSkeleton.Version == 2) { uint32_t designerID = stream.ReadUInt(); char nameBuffer[32]; float matrix[12]; // Read in the bones. uint32_t numBones = stream.ReadUInt(); mLOLSkeleton.Bones.resize(numBones); for (uint32_t i = 0; i < numBones; ++i) { LOLBone& bone = mLOLSkeleton.Bones[i]; stream.Read(nameBuffer, 32); bone.Name = nameBuffer; bone.Index = i; bone.ParentIndex = stream.ReadInt(); bone.Scale = stream.ReadFloat(); // Read in transform matrix. stream.Read(matrix, sizeof(matrix)); float4x4 rotation( matrix[0], matrix[4], matrix[8], 0.0f, matrix[1], matrix[5], matrix[9], 0.0f, matrix[2], matrix[6], matrix[10], 0.0f, 0.0f, 0.0f, 0.0f, 1.0f); bone.Orientation = QuaternionFromRotationMatrix(rotation); bone.Position = float3(matrix[3], matrix[7], matrix[11]); } // Version two contains bone IDs. if (mLOLSkeleton.Version == 2) { uint32_t numBoneIDs = stream.ReadUInt(); for (uint32_t i = 0; i < numBoneIDs; ++i) mLOLSkeleton.BoneIDs.push_back(stream.ReadUInt()); } } else if (mLOLSkeleton.Version == 0) { uint16_t zero = stream.ReadUShort(); uint16_t numBones = stream.ReadUShort(); uint32_t numBoneIDs = stream.ReadUInt(); uint16_t offsetToVertexData = stream.ReadUShort(); // Should be 64. int unknown = stream.ReadShort(); // ? int offset1 = stream.ReadInt(); int offsetToAnimationIndices = stream.ReadInt(); int offset2 = stream.ReadInt(); int offset3 = stream.ReadInt(); int offsetToStrings = stream.ReadInt(); // Not sure what this data represents. // I think it's padding incase more header data is required later. //stream.Seek(stream.GetPosition() + 20); mLOLSkeleton.Bones.resize(numBones); stream.Seek(offsetToVertexData); for (int i = 0; i < numBones; ++i) { LOLBone& bone = mLOLSkeleton.Bones[i]; // The old scale was always 0.1. For now, just go with it. bone.Scale = 0.1f; zero = stream.ReadShort(); // ? bone.Index = stream.ReadShort(); bone.ParentIndex = stream.ReadShort(); unknown = stream.ReadShort(); // ? int namehash = stream.ReadInt(); float twoPointOne = stream.ReadFloat(); stream.Read(&bone.Position, sizeof(float3)); float one = stream.ReadFloat(); // ? Maybe scales for X, Y, and Z one = stream.ReadFloat(); one = stream.ReadFloat(); stream.Read(&bone.Orientation, sizeof(Quaternionf)); float ctx = stream.ReadFloat(); // ctx float cty = stream.ReadFloat(); // cty float ctz = stream.ReadFloat(); // ctz // The rest of the bone data is unknown. Maybe padding? stream.Seek(stream.GetPosition() + 32); } stream.Seek(offset1); for (uint32_t i = 0; i < numBones; ++i) // Inds for version 4 animation. { // 8 bytes uint32_t sklID = stream.ReadUInt(); uint32_t anmID = stream.ReadUInt(); mLOLSkeleton.BoneIDMap[anmID] = sklID; } stream.Seek(offsetToAnimationIndices); for (uint32_t i = 0; i < numBoneIDs; ++i) // Inds for animation { // 2 bytes uint16_t boneID = stream.ReadUShort(); mLOLSkeleton.BoneIDs.push_back(boneID); } stream.Seek(offsetToStrings); char nameBuffer[4]; for (int i = 0; i < numBones; ++i) { bool finished = false; do { stream.Read(nameBuffer, 4); for (char c : nameBuffer) { if (c == '\0') { finished = true; break; } mLOLSkeleton.Bones[i].Name.push_back(c); } } while (!finished); } } }
void LOLExporter::ImportSkn( const String& sknFilename ) { FileStream file; if (!file.Open(sknFilename)) { printf("ERROR: could not open %s\n", sknFilename.c_str()); exit(1); } uint32_t magic = file.ReadInt(); uint16_t version = file.ReadUShort(); uint16_t numObjects = file.ReadUShort(); mLOLSkinMesh.Version = version; if (version == 1 || version == 2) { // Contains material headers. uint32_t numParts = file.ReadUInt(); char nameBuffer[64]; mLOLSkinMesh.MeshParts.resize(numParts); for (uint32_t i = 0; i < numParts; ++i) { // Read in the headers. LOLSkinMesh::MeshPart& meshPart = mLOLSkinMesh.MeshParts[i]; file.Read(nameBuffer, sizeof(nameBuffer)); meshPart.Material = nameBuffer; meshPart.StartVertex = file.ReadInt(); meshPart.VertexCount = file.ReadUInt(); meshPart.StartIndex = file.ReadInt(); meshPart.IndexCount = file.ReadUInt(); } uint32_t numIndices = file.ReadUInt(); uint32_t numVertices = file.ReadUInt(); mLOLSkinMesh.Indices.resize(numIndices); file.Read(&mLOLSkinMesh.Indices[0], numIndices * sizeof(uint16_t)); mLOLSkinMesh.Verteces.resize(numVertices); for (LOLSkinMesh::Vertex& vertex : mLOLSkinMesh.Verteces) { file.Read(&vertex.Position, sizeof(float3)); file.Read(&vertex.BoneIndices, sizeof(uint8_t)*4); file.Read(&vertex.BoneWeights, sizeof(float)*4); file.Read(&vertex.Normal, sizeof(float3)); file.Read(&vertex.Texcoords, sizeof(float2)); // Check SkinModelVertex /*float totalWeight = 0.0f; for (int i = 0; i < 4; ++i) { if (vertex.BoneIndices[i] >= mBones.size()) printf("Bone Index Out of Range!"); totalWeight += vertex.weights[i]; } if ( fabsf(totalWeight - 1.0f) > 0.001) printf("Unnormalized Bone Weights!"); if ( vertex.texcoords[0] < 0.0f || vertex.texcoords[0] > 1.0f || vertex.texcoords[1] < 0.0f || vertex.texcoords[1] > 1.0f ) printf("Texcoords Index Out of Range!");*/ } for ( size_t i = 0; i < mLOLSkinMesh.MeshParts.size(); ++i ) { LOLSkinMesh::MeshPart& lolMeshPart = mLOLSkinMesh.MeshParts[i]; const int32_t StartIndex = lolMeshPart.StartIndex; const int32_t EndIndex = lolMeshPart.StartIndex + lolMeshPart.IndexCount; for (int32_t j = StartIndex; j < EndIndex; ++j) { uint16_t index = mLOLSkinMesh.Indices[j]; const LOLSkinMesh::Vertex& vertex = mLOLSkinMesh.Verteces[index]; lolMeshPart.Bound.Merge(vertex.Position); } mLOLSkinMesh.Bound.Merge(lolMeshPart.Bound); } } else { printf("Unsupported Skn format!\n"); exit(1); } printf("SkinnedMesh %s\n", sknFilename.c_str()); printf("Version: %d\n", mLOLSkinMesh.Version); printf("Number of Objects: %d\n", numObjects); printf("Number of Material Headers: %d\n", mLOLSkinMesh.MeshParts.size()); printf("Number of Vertices: %d\n", mLOLSkinMesh.Verteces.size()); printf("Number of Indices: %d\n", mLOLSkinMesh.Indices.size()); }
/* ================ Map_LoadFile ================ */ void Map_LoadFile( const char *filename ){ clock_t start, finish; double elapsed_time; start = clock(); Sys_BeginWait(); Select_Deselect(); /*! \todo FIXME TTimo why is this commented out? stability issues maybe? or duplicate feature? forcing to show the console during map load was a good thing IMO */ //SetInspectorMode(W_CONSOLE); Sys_Printf( "Loading map from %s\n", filename ); Map_Free(); //++timo FIXME: maybe even easier to have Group_Init called from Map_Free? Group_Init(); g_qeglobals.d_num_entities = 0; g_qeglobals.d_parsed_brushes = 0; // cancel the map loading process // used when conversion between standard map format and BP format is required and the user cancels the process g_bCancel_Map_LoadFile = false; strcpy( currentmap, filename ); g_bScreenUpdates = false; // leo: avoid redraws while loading the map (see fenris:1952) // prepare to let the map module do the parsing FileStream file; const char* type = strrchr( filename,'.' ); if ( type != NULL ) { type++; } // NOTE TTimo opening has binary doesn't make a lot of sense // but opening as text confuses the scriptlib parser // this may be a problem if we "rb" and use the XML parser, might have an incompatibility if ( file.Open( filename, "rb" ) ) { Map_Import( &file, type ); } else{ Sys_FPrintf( SYS_ERR, "ERROR: failed to open %s for read\n", filename ); } file.Close(); g_bScreenUpdates = true; if ( g_bCancel_Map_LoadFile ) { Sys_Printf( "Map_LoadFile canceled\n" ); Map_New(); Sys_EndWait(); return; } if ( !world_entity ) { Sys_Printf( "No worldspawn in map.\n" ); Map_New(); Sys_EndWait(); return; } finish = clock(); elapsed_time = (double)( finish - start ) / CLOCKS_PER_SEC; Sys_Printf( "--- LoadMapFile ---\n" ); Sys_Printf( "%s\n", filename ); Sys_Printf( "%5i brushes\n", g_qeglobals.d_parsed_brushes ); Sys_Printf( "%5i entities\n", g_qeglobals.d_num_entities ); Sys_Printf( "%5.2f second(s) load time\n", elapsed_time ); Sys_EndWait(); Map_RestoreBetween(); // // move the view to a start position // Map_StartPosition(); Map_RegionOff(); modified = false; Sys_SetTitle( filename ); Texture_ShowInuse(); QERApp_SortActiveShaders(); Sys_UpdateWindows( W_ALL ); }
REGISTER_TESTS_END // TestStaleDynamicDeps //------------------------------------------------------------------------------ void TestObject::TestStaleDynamicDeps() const { const char* fileA = "../../../../ftmp/Test/Object/StaleDynamicDeps/GeneratedInput/FileA.h"; const char* fileB = "../../../../ftmp/Test/Object/StaleDynamicDeps/GeneratedInput/FileB.h"; const char* fileC = "../../../../ftmp/Test/Object/StaleDynamicDeps/GeneratedInput/FileC.h"; const char* database = "../../../../ftmp/Test/Object/StaleDynamicDeps/fbuild.fdb"; // Generate some header files { // Need FBuild for CleanPath FBuildOptions options; FBuild fBuild( options ); // Ensure output path exists AStackString<> fullOutputPath; NodeGraph::CleanPath( AStackString<>( fileA ), fullOutputPath ); TEST_ASSERT( Node::EnsurePathExistsForFile( fullOutputPath ) ); // Create files FileStream f; TEST_ASSERT( f.Open( fileA, FileStream::WRITE_ONLY ) ); f.Close(); TEST_ASSERT( f.Open( fileB, FileStream::WRITE_ONLY ) ); f.Close(); TEST_ASSERT( f.Open( fileC, FileStream::WRITE_ONLY ) ); f.Close(); } // Build CPP Generator { // Init FBuildOptions options; options.m_ConfigFile = "Data/TestObject/StaleDynamicDeps/cppgenerator.bff"; options.m_ForceCleanBuild = true; FBuild fBuild( options ); TEST_ASSERT( fBuild.Initialize() ); // Compile TEST_ASSERT( fBuild.Build( AStackString<>( "CPPGenerator" ) ) ); } // Build using CPP Generator (clean) { // Init FBuildOptions options; options.m_ConfigFile = "Data/TestObject/StaleDynamicDeps/staledeps.bff"; options.m_ForceCleanBuild = true; options.m_ShowSummary = true; // required to generate stats for node count checks FBuild fBuild( options ); TEST_ASSERT( fBuild.Initialize() ); // Compile TEST_ASSERT( fBuild.Build( AStackString<>( "StaleDynamicDeps" ) ) ); // Save DB TEST_ASSERT( fBuild.SaveDependencyGraph( database ) ); // Check stats // Seen, Built, Type CheckStatsNode ( 1, 1, Node::DIRECTORY_LIST_NODE ); CheckStatsNode ( 2, 2, Node::COMPILER_NODE ); CheckStatsNode ( 4, 4, Node::OBJECT_NODE ); // 3xCPPGen + 1xUnity } // Delete one of the generated headers EnsureFileDoesNotExist( fileB ); // Work around poor time resolution of file system on OSX by waiting at least 1 second // TODO:C Changes to the way dependencies are managed might make this unnecessary #if defined( __OSX__ ) Thread::Sleep(1001); #endif // Build Again { // Init FBuildOptions options; options.m_ConfigFile = "Data/TestObject/StaleDynamicDeps/staledeps.bff"; options.m_ShowSummary = true; // required to generate stats for node count checks FBuild fBuild( options ); TEST_ASSERT( fBuild.Initialize( database ) ); // Compile TEST_ASSERT( fBuild.Build( AStackString<>( "StaleDynamicDeps" ) ) ); // Check stats // Seen, Built, Type CheckStatsNode ( 1, 1, Node::DIRECTORY_LIST_NODE ); CheckStatsNode ( 2, 0, Node::COMPILER_NODE ); CheckStatsNode ( 3, 1, Node::OBJECT_NODE ); // 3xCPPGen + 1xUnity, rebuild of unity } }
bool StandardMainLoop::handleCommandLine( S32 argc, const char **argv ) { // Allow the window manager to process command line inputs; this is // done to let web plugin functionality happen in a fairly transparent way. PlatformWindowManager::get()->processCmdLineArgs(argc, argv); Process::handleCommandLine( argc, argv ); // Set up the command line args for the console scripts... Con::setIntVariable("Game::argc", argc); U32 i; std::vector<const char*> arguments; arguments.push_back(StringTable->insert("main")); for (i = 1; i < argc; i++) { Con::setVariable(avar("Game::argv%d", i), argv[i]); if (i > 0) { arguments.push_back(argv[i]); } } //WLE - Vince if (Winterleaf_EngineCallback::mWLE_GlobalFunction!=0) { char sbuffer[8000]; Winterleaf_EngineCallback::mWLE_GlobalFunction(argc , &arguments[0], sbuffer); //If you can find the main routine in C# then use it, if it fails use stock. if (atoi(sbuffer)) { BuildCacheCRC(); return true; } } #ifdef TORQUE_PLAYER if(argc > 2 && dStricmp(argv[1], "-project") == 0) { char playerPath[1024]; Platform::makeFullPathName(argv[2], playerPath, sizeof(playerPath)); Platform::setCurrentDirectory(playerPath); argv += 2; argc -= 2; // Re-locate the game:/ asset mount. Torque::FS::Unmount( "game" ); Torque::FS::Mount( "game", Platform::FS::createNativeFS( playerPath ) ); } #endif // Executes an entry script file. This is "main.cs" // by default, but any file name (with no whitespace // in it) may be run if it is specified as the first // command-line parameter. The script used, default // or otherwise, is not compiled and is loaded here // directly because the resource system restricts // access to the "root" directory. #ifdef TORQUE_ENABLE_VFS Zip::ZipArchive *vfs = openEmbeddedVFSArchive(); bool useVFS = vfs != NULL; #endif Stream *mainCsStream = NULL; // The working filestream. FileStream str; const char *defaultScriptName = "main.cs"; bool useDefaultScript = true; // Check if any command-line parameters were passed (the first is just the app name). if (argc > 1) { // If so, check if the first parameter is a file to open. if ( (dStrcmp(argv[1], "") != 0 ) && (str.open(argv[1], Torque::FS::File::Read)) ) { // If it opens, we assume it is the script to run. useDefaultScript = false; #ifdef TORQUE_ENABLE_VFS useVFS = false; #endif mainCsStream = &str; } } if (useDefaultScript) { bool success = false; #ifdef TORQUE_ENABLE_VFS if(useVFS) success = (mainCsStream = vfs->openFile(defaultScriptName, Zip::ZipArchive::Read)) != NULL; else #endif success = str.open(defaultScriptName, Torque::FS::File::Read); #if defined( TORQUE_DEBUG ) && defined (TORQUE_TOOLS) && !defined( _XBOX ) if (!success) { OpenFileDialog ofd; FileDialogData &fdd = ofd.getData(); fdd.mFilters = StringTable->insert("Main Entry Script (main.cs)|main.cs|"); fdd.mTitle = StringTable->insert("Locate Game Entry Script"); // Get the user's selection if( !ofd.Execute() ) return false; // Process and update CWD so we can run the selected main.cs S32 pathLen = dStrlen( fdd.mFile ); FrameTemp<char> szPathCopy( pathLen + 1); dStrcpy( szPathCopy, fdd.mFile ); //forwardslash( szPathCopy ); const char *path = dStrrchr(szPathCopy, '/'); if(path) { U32 len = path - (const char*)szPathCopy; szPathCopy[len+1] = 0; Platform::setCurrentDirectory(szPathCopy); // Re-locate the game:/ asset mount. Torque::FS::Unmount( "game" ); Torque::FS::Mount( "game", Platform::FS::createNativeFS( ( const char* ) szPathCopy ) ); success = str.open(fdd.mFile, Torque::FS::File::Read); if(success) defaultScriptName = fdd.mFile; } } #endif if( !success ) { char msg[1024]; dSprintf(msg, sizeof(msg), "Failed to open \"%s\".", defaultScriptName); Platform::AlertOK("Error", msg); #ifdef TORQUE_ENABLE_VFS closeEmbeddedVFSArchive(); #endif return false; } #ifdef TORQUE_ENABLE_VFS if(! useVFS) #endif mainCsStream = &str; } // This should rarely happen, but lets deal with // it gracefully if it does. if ( mainCsStream == NULL ) return false; U32 size = mainCsStream->getStreamSize(); char *script = new char[size + 1]; mainCsStream->read(size, script); #ifdef TORQUE_ENABLE_VFS if(useVFS) vfs->closeFile(mainCsStream); else #endif str.close(); script[size] = 0; char buffer[1024], *ptr; Platform::makeFullPathName(useDefaultScript ? defaultScriptName : argv[1], buffer, sizeof(buffer), Platform::getCurrentDirectory()); ptr = dStrrchr(buffer, '/'); if(ptr != NULL) *ptr = 0; Platform::setMainDotCsDir(buffer); Platform::setCurrentDirectory(buffer); Con::evaluate(script, false, useDefaultScript ? defaultScriptName : argv[1]); delete[] script; #ifdef TORQUE_ENABLE_VFS closeEmbeddedVFSArchive(); #endif BuildCacheCRC(); return true; }
bool GFXGLShader::_loadShaderFromStream( GLuint shader, const Torque::Path &path, FileStream *s, const Vector<GFXShaderMacro> ¯os ) { Vector<char*> buffers; Vector<U32> lengths; // The GLSL version declaration must go first! const char *versionDecl = "#version 150\r\n"; buffers.push_back( dStrdup( versionDecl ) ); lengths.push_back( dStrlen( versionDecl ) ); if(gglHasExtension(EXT_gpu_shader4)) { const char *extension = "#extension GL_EXT_gpu_shader4 : enable\r\n"; buffers.push_back( dStrdup( extension ) ); lengths.push_back( dStrlen( extension ) ); } if(gglHasExtension(ARB_gpu_shader5)) { const char *extension = "#extension GL_ARB_gpu_shader5 : enable\r\n"; buffers.push_back( dStrdup( extension ) ); lengths.push_back( dStrlen( extension ) ); } const char *newLine = "\r\n"; buffers.push_back( dStrdup( newLine ) ); lengths.push_back( dStrlen( newLine ) ); // Now add all the macros. for( U32 i = 0; i < macros.size(); i++ ) { if(macros[i].name.isEmpty()) // TODO OPENGL continue; String define = String::ToString( "#define %s %s\n", macros[i].name.c_str(), macros[i].value.c_str() ); buffers.push_back( dStrdup( define.c_str() ) ); lengths.push_back( define.length() ); } // Now finally add the shader source. U32 shaderLen = s->getStreamSize(); char *buffer = _handleIncludes(path, s); if ( !buffer ) return false; buffers.push_back(buffer); lengths.push_back(shaderLen); glShaderSource(shader, buffers.size(), (const GLchar**)const_cast<const char**>(buffers.address()), NULL); #if defined(TORQUE_DEBUG) && defined(TORQUE_DEBUG_GFX) FileStream stream; if ( !stream.open( path.getFullPath()+"_DEBUG", Torque::FS::File::Write ) ) { AssertISV(false, avar("GFXGLShader::initShader - failed to write debug shader '%s'.", path.getFullPath().c_str())); } for(int i = 0; i < buffers.size(); ++i) stream.writeText(buffers[i]); #endif // Cleanup the shader source buffer. for ( U32 i=0; i < buffers.size(); i++ ) dFree( buffers[i] ); glCompileShader(shader); return true; }
void newline() { mStream.write( 1, "\n" ); }
void Renderer::Execute() { Network::Packet* packet = m_Net->RecvNonBlocking(0); if (packet != NULL) { Kernel::GetInstance()->Log(m_LogTag | Logger::IS_INFORMATION, "Got package: %s", packet->GetID()); // Some sort of data we need for rendering future pipeline if (packet->GetTag() == m_NetTag_Datacache) { if (StringCompare(packet->GetID(), "res") == 0) { unsigned long hash = packet->GetObject<unsigned long>(0); char* filename = packet->GetArray<char*>(packet->ObjectSize(1), 1); unsigned long datalen = packet->GetObject<unsigned long>(2); char* data = packet->GetArray<char*>(packet->ObjectSize(3) ,3); char location[256]; Format(location, "datacache/%X_%s", (unsigned int)hash, filename); Kernel::GetInstance()->Log(m_LogTag | Logger::IS_INFORMATION, "Saving data to cache: %s", location); FileStream stream; stream.OpenWriteBinary(location); stream.Write(data, datalen); stream.Close(); delete [] filename; delete [] data; m_NumRecieved++; Network::Packet* ackpack = m_NetDevice->CreateEmptyPacket("ackres", m_NetTag_Datacache); ackpack->PushInt(m_NumRecieved); m_Net->SendAllPacket(ackpack); delete ackpack; } else if (StringCompare(packet->GetID(), "ack") == 0) { // has_everything_lets_do_some_rendering() } } // New pipeline to render else if (packet->GetTag() == m_NetTag_Pipeline) { m_SendPreviews = true; CleanUp(); m_JsonDataSize = packet->GetLength(); m_JsonData = new char[m_JsonDataSize]; StringCopy(m_JsonData, packet->GetData(), m_JsonDataSize); //Kernel::GetInstance()->Log(m_LogTag | Logger::IS_INFORMATION, "Got new pipeline data, loading JSON and building graph: %s", m_JsonData); Kernel::GetInstance()->Log(m_LogTag | Logger::IS_INFORMATION, "Got new pipeline data, loading JSON and building graph."); LoadJson(); BuildGraph(); m_NumRecieved = 0; if (m_RootBlock) { m_RootBlock->ResetPerformed(); m_RootBlock->Execute(m_SendPreviews); m_SendPreviews = false; /*if (m_Net->NumClients() > 0) { Resource::Image* img = m_gfx->CreateImageFromTexture(GetResult()); Network::Packet* imgpacket = m_NetDevice->CreateEmptyPacket("imgdata", m_NetTag_Preview); imgpacket->PushInt(img->Width()); imgpacket->PushInt(img->Height()); imgpacket->PushInt(img->Channels()); imgpacket->PushString((const char*)img->Ptr(), img->Height()*img->Width()*img->Channels()); Kernel::GetInstance()->Log(m_LogTag | Logger::IS_INFORMATION, "Sending final image to client '%d'", packet->GetSender()); m_Net->SendAllPacket(imgpacket); m_NumRecieved = 0; delete imgpacket; delete img; }*/ } else { Kernel::GetInstance()->Log(m_LogTag | Logger::IS_CRITICAL, "Failed to execute root block since it's NULL."); } } } if (m_RootBlock) { m_RootBlock->ResetPerformed(); m_RootBlock->Execute(m_SendPreviews); m_SendPreviews = false; } else { Kernel::GetInstance()->Log(m_LogTag | Logger::IS_CRITICAL, "Failed to execute root block since it's NULL."); } }
void close() { #if XL_LOGGING_ENABLED s_logFile.close(); #endif }
void TSLastDetail::_update() { // We're gonna render... make sure we can. bool sceneBegun = GFX->canCurrentlyRender(); if ( !sceneBegun ) GFX->beginScene(); _validateDim(); Vector<GBitmap*> bitmaps; Vector<GBitmap*> normalmaps; // We need to create our own instance to render with. TSShapeInstance *shape = new TSShapeInstance( mShape, true ); // Animate the shape once. shape->animate( mDl ); // So we don't have to change it everywhere. const GFXFormat format = GFXFormatR8G8B8A8; S32 imposterCount = ( ((2*mNumPolarSteps) + 1 ) * mNumEquatorSteps ) + ( mIncludePoles ? 2 : 0 ); // Figure out the optimal texture size. Point2I texSize( smMaxTexSize, smMaxTexSize ); while ( true ) { Point2I halfSize( texSize.x / 2, texSize.y / 2 ); U32 count = ( halfSize.x / mDim ) * ( halfSize.y / mDim ); if ( count < imposterCount ) { // Try half of the height. count = ( texSize.x / mDim ) * ( halfSize.y / mDim ); if ( count >= imposterCount ) texSize.y = halfSize.y; break; } texSize = halfSize; } GBitmap *imposter = NULL; GBitmap *normalmap = NULL; GBitmap destBmp( texSize.x, texSize.y, true, format ); GBitmap destNormal( texSize.x, texSize.y, true, format ); U32 mipLevels = destBmp.getNumMipLevels(); ImposterCapture *imposterCap = new ImposterCapture(); F32 equatorStepSize = M_2PI_F / (F32)mNumEquatorSteps; static const MatrixF topXfm( EulerF( -M_PI_F / 2.0f, 0, 0 ) ); static const MatrixF bottomXfm( EulerF( M_PI_F / 2.0f, 0, 0 ) ); MatrixF angMat; F32 polarStepSize = 0.0f; if ( mNumPolarSteps > 0 ) polarStepSize = -( 0.5f * M_PI_F - mDegToRad( mPolarAngle ) ) / (F32)mNumPolarSteps; PROFILE_START(TSLastDetail_snapshots); S32 currDim = mDim; for ( S32 mip = 0; mip < mipLevels; mip++ ) { if ( currDim < 1 ) currDim = 1; dMemset( destBmp.getWritableBits(mip), 0, destBmp.getWidth(mip) * destBmp.getHeight(mip) * GFXFormat_getByteSize( format ) ); dMemset( destNormal.getWritableBits(mip), 0, destNormal.getWidth(mip) * destNormal.getHeight(mip) * GFXFormat_getByteSize( format ) ); bitmaps.clear(); normalmaps.clear(); F32 rotX = 0.0f; if ( mNumPolarSteps > 0 ) rotX = -( mDegToRad( mPolarAngle ) - 0.5f * M_PI_F ); // We capture the images in a particular order which must // match the order expected by the imposter renderer. imposterCap->begin( shape, mDl, currDim, mRadius, mCenter ); for ( U32 j=0; j < (2 * mNumPolarSteps + 1); j++ ) { F32 rotZ = -M_PI_F / 2.0f; for ( U32 k=0; k < mNumEquatorSteps; k++ ) { angMat.mul( MatrixF( EulerF( rotX, 0, 0 ) ), MatrixF( EulerF( 0, 0, rotZ ) ) ); imposterCap->capture( angMat, &imposter, &normalmap ); bitmaps.push_back( imposter ); normalmaps.push_back( normalmap ); rotZ += equatorStepSize; } rotX += polarStepSize; if ( mIncludePoles ) { imposterCap->capture( topXfm, &imposter, &normalmap ); bitmaps.push_back(imposter); normalmaps.push_back( normalmap ); imposterCap->capture( bottomXfm, &imposter, &normalmap ); bitmaps.push_back( imposter ); normalmaps.push_back( normalmap ); } } imposterCap->end(); Point2I texSize( destBmp.getWidth(mip), destBmp.getHeight(mip) ); // Ok... pack in bitmaps till we run out. for ( S32 y=0; y+currDim <= texSize.y; ) { for ( S32 x=0; x+currDim <= texSize.x; ) { // Copy the next bitmap to the dest texture. GBitmap* bmp = bitmaps.first(); bitmaps.pop_front(); destBmp.copyRect( bmp, RectI( 0, 0, currDim, currDim ), Point2I( x, y ), 0, mip ); delete bmp; // Copy the next normal to the dest texture. GBitmap* normalmap = normalmaps.first(); normalmaps.pop_front(); destNormal.copyRect( normalmap, RectI( 0, 0, currDim, currDim ), Point2I( x, y ), 0, mip ); delete normalmap; // Did we finish? if ( bitmaps.empty() ) break; x += currDim; } // Did we finish? if ( bitmaps.empty() ) break; y += currDim; } // Next mip... currDim /= 2; } PROFILE_END(); // TSLastDetail_snapshots delete imposterCap; delete shape; // Should we dump the images? if ( Con::getBoolVariable( "$TSLastDetail::dumpImposters", false ) ) { String imposterPath = mCachePath + ".imposter.png"; String normalsPath = mCachePath + ".imposter_normals.png"; FileStream stream; if ( stream.open( imposterPath, Torque::FS::File::Write ) ) destBmp.writeBitmap( "png", stream ); stream.close(); if ( stream.open( normalsPath, Torque::FS::File::Write ) ) destNormal.writeBitmap( "png", stream ); stream.close(); } // DEBUG: Some code to force usage of a test image. //GBitmap* tempMap = GBitmap::load( "./forest/data/test1234.png" ); //tempMap->extrudeMipLevels(); //mTexture.set( tempMap, &GFXDefaultStaticDiffuseProfile, false ); //delete tempMap; DDSFile *ddsDest = DDSFile::createDDSFileFromGBitmap( &destBmp ); DDSUtil::squishDDS( ddsDest, GFXFormatDXT3 ); DDSFile *ddsNormals = DDSFile::createDDSFileFromGBitmap( &destNormal ); DDSUtil::squishDDS( ddsNormals, GFXFormatDXT5 ); // Finally save the imposters to disk. FileStream fs; if ( fs.open( _getDiffuseMapPath(), Torque::FS::File::Write ) ) { ddsDest->write( fs ); fs.close(); } if ( fs.open( _getNormalMapPath(), Torque::FS::File::Write ) ) { ddsNormals->write( fs ); fs.close(); } delete ddsDest; delete ddsNormals; // If we did a begin then end it now. if ( !sceneBegun ) GFX->endScene(); }
void ShaderGen::generateShader( const MaterialFeatureData &featureData, char *vertFile, char *pixFile, F32 *pixVersion, const GFXVertexFormat *vertexFormat, const char* cacheName, Vector<GFXShaderMacro> ¯os ) { PROFILE_SCOPE( ShaderGen_GenerateShader ); mFeatureData = featureData; mVertexFormat = vertexFormat; _uninit(); _init(); char vertShaderName[256]; char pixShaderName[256]; // Note: We use a postfix of _V/_P here so that it sorts the matching // vert and pixel shaders together when listed alphabetically. dSprintf( vertShaderName, sizeof(vertShaderName), "shadergen:/%s_V.%s", cacheName, mFileEnding.c_str() ); dSprintf( pixShaderName, sizeof(pixShaderName), "shadergen:/%s_P.%s", cacheName, mFileEnding.c_str() ); dStrcpy( vertFile, vertShaderName ); dStrcpy( pixFile, pixShaderName ); // this needs to change - need to optimize down to ps v.1.1 *pixVersion = GFX->getPixelShaderVersion(); if ( !Con::getBoolVariable( "ShaderGen::GenNewShaders", true ) ) { // If we are not regenerating the shader we will return here. // But we must fill in the shader macros first! _processVertFeatures( macros, true ); _processPixFeatures( macros, true ); return; } // create vertex shader //------------------------ FileStream* s = new FileStream(); if(!s->open(vertShaderName, Torque::FS::File::Write )) { AssertFatal(false, "Failed to open Shader Stream" ); return; } mOutput = new MultiLine; mInstancingFormat.clear(); _processVertFeatures(macros); _printVertShader( *s ); delete s; ((ShaderConnector*)mComponents[C_CONNECTOR])->reset(); LangElement::deleteElements(); // create pixel shader //------------------------ s = new FileStream(); if(!s->open(pixShaderName, Torque::FS::File::Write )) { AssertFatal(false, "Failed to open Shader Stream" ); return; } mOutput = new MultiLine; _processPixFeatures(macros); _printPixShader( *s ); delete s; LangElement::deleteElements(); }
void TerrainBlock::_updateBaseTexture(bool writeToCache) { if ( !mBaseShader && !_initBaseShader() ) return; // This can sometimes occur outside a begin/end scene. const bool sceneBegun = GFX->canCurrentlyRender(); if ( !sceneBegun ) GFX->beginScene(); GFXDEBUGEVENT_SCOPE( TerrainBlock_UpdateBaseTexture, ColorI::GREEN ); PROFILE_SCOPE( TerrainBlock_UpdateBaseTexture ); GFXTransformSaver saver; const U32 maxTextureSize = GFX->getCardProfiler()->queryProfile( "maxTextureSize", 1024 ); U32 baseTexSize = getNextPow2( mBaseTexSize ); baseTexSize = getMin( maxTextureSize, baseTexSize ); Point2I destSize( baseTexSize, baseTexSize ); // Setup geometry GFXVertexBufferHandle<GFXVertexPT> vb; { F32 copyOffsetX = 2.0f * GFX->getFillConventionOffset() / (F32)destSize.x; F32 copyOffsetY = 2.0f * GFX->getFillConventionOffset() / (F32)destSize.y; GFXVertexPT points[4]; points[0].point = Point3F(1.0 - copyOffsetX, -1.0 + copyOffsetY, 0.0); points[0].texCoord = Point2F(1.0, 1.0f); points[1].point = Point3F(1.0 - copyOffsetX, 1.0 + copyOffsetY, 0.0); points[1].texCoord = Point2F(1.0, 0.0f); points[2].point = Point3F(-1.0 - copyOffsetX, -1.0 + copyOffsetY, 0.0); points[2].texCoord = Point2F(0.0, 1.0f); points[3].point = Point3F(-1.0 - copyOffsetX, 1.0 + copyOffsetY, 0.0); points[3].texCoord = Point2F(0.0, 0.0f); vb.set( GFX, 4, GFXBufferTypeVolatile ); GFXVertexPT *ptr = vb.lock(); if(ptr) { dMemcpy( ptr, points, sizeof(GFXVertexPT) * 4 ); vb.unlock(); } } GFXTexHandle blendTex; // If the base texture is already a valid render target then // use it to render to else we create one. if ( mBaseTex.isValid() && mBaseTex->isRenderTarget() && mBaseTex->getFormat() == GFXFormatR8G8B8A8 && mBaseTex->getWidth() == destSize.x && mBaseTex->getHeight() == destSize.y ) blendTex = mBaseTex; else blendTex.set( destSize.x, destSize.y, GFXFormatR8G8B8A8, &GFXDefaultRenderTargetProfile, "" ); GFX->pushActiveRenderTarget(); // Set our shader stuff GFX->setShader( mBaseShader ); GFX->setShaderConstBuffer( mBaseShaderConsts ); GFX->setStateBlock( mBaseShaderSB ); GFX->setVertexBuffer( vb ); mBaseTarget->attachTexture( GFXTextureTarget::Color0, blendTex ); GFX->setActiveRenderTarget( mBaseTarget ); GFX->clear( GFXClearTarget, ColorI(0,0,0,255), 1.0f, 0 ); GFX->setTexture( 0, mLayerTex ); mBaseShaderConsts->setSafe( mBaseLayerSizeConst, (F32)mLayerTex->getWidth() ); for ( U32 i=0; i < mBaseTextures.size(); i++ ) { GFXTextureObject *tex = mBaseTextures[i]; if ( !tex ) continue; GFX->setTexture( 1, tex ); F32 baseSize = mFile->mMaterials[i]->getDiffuseSize(); F32 scale = 1.0f; if ( !mIsZero( baseSize ) ) scale = getWorldBlockSize() / baseSize; // A mistake early in development means that texture // coords are not flipped correctly. To compensate // we flip the y scale here. mBaseShaderConsts->setSafe( mBaseTexScaleConst, Point2F( scale, -scale ) ); mBaseShaderConsts->setSafe( mBaseTexIdConst, (F32)i ); GFX->drawPrimitive( GFXTriangleStrip, 0, 2 ); } mBaseTarget->resolve(); GFX->setShader( NULL ); //GFX->setStateBlock( NULL ); // WHY NOT? GFX->setShaderConstBuffer( NULL ); GFX->setVertexBuffer( NULL ); GFX->popActiveRenderTarget(); // End it if we begun it... Yeehaw! if ( !sceneBegun ) GFX->endScene(); /// Do we cache this sucker? if (mBaseTexFormat == NONE || !writeToCache) { // We didn't cache the result, so set the base texture // to the render target we updated. This should be good // for realtime painting cases. mBaseTex = blendTex; } else if (mBaseTexFormat == DDS) { String cachePath = _getBaseTexCacheFileName(); FileStream fs; if ( fs.open( _getBaseTexCacheFileName(), Torque::FS::File::Write ) ) { // Read back the render target, dxt compress it, and write it to disk. GBitmap blendBmp( destSize.x, destSize.y, false, GFXFormatR8G8B8A8 ); blendTex.copyToBmp( &blendBmp ); /* // Test code for dumping uncompressed bitmap to disk. { FileStream fs; if ( fs.open( "./basetex.png", Torque::FS::File::Write ) ) { blendBmp.writeBitmap( "png", fs ); fs.close(); } } */ blendBmp.extrudeMipLevels(); DDSFile *blendDDS = DDSFile::createDDSFileFromGBitmap( &blendBmp ); DDSUtil::squishDDS( blendDDS, GFXFormatDXT1 ); // Write result to file stream blendDDS->write( fs ); delete blendDDS; } fs.close(); } else { FileStream stream; if (!stream.open(_getBaseTexCacheFileName(), Torque::FS::File::Write)) { mBaseTex = blendTex; return; } GBitmap bitmap(blendTex->getWidth(), blendTex->getHeight(), false, GFXFormatR8G8B8); blendTex->copyToBmp(&bitmap); bitmap.writeBitmap(formatToExtension(mBaseTexFormat), stream); } }
// DoBuild //------------------------------------------------------------------------------ /*virtual*/ Node::BuildResult TestNode::DoBuild( Job * job ) { // If the workingDir is empty, use the current dir for the process const char * workingDir = m_TestWorkingDir.IsEmpty() ? nullptr : m_TestWorkingDir.Get(); EmitCompilationMessage( workingDir ); // spawn the process Process p; bool spawnOK = p.Spawn( GetTestExecutable()->GetName().Get(), m_TestArguments.Get(), workingDir, FBuild::Get().GetEnvironmentString() ); if ( !spawnOK ) { FLOG_ERROR( "Failed to spawn process for '%s'", GetName().Get() ); return NODE_RESULT_FAILED; } // capture all of the stdout and stderr AutoPtr< char > memOut; AutoPtr< char > memErr; uint32_t memOutSize = 0; uint32_t memErrSize = 0; bool timedOut = !p.ReadAllData( memOut, &memOutSize, memErr, &memErrSize, m_TestTimeOut ); if ( timedOut ) { FLOG_ERROR( "Test timed out after %u ms (%s)", m_TestTimeOut, m_TestExecutable.Get() ); return NODE_RESULT_FAILED; } ASSERT( !p.IsRunning() ); // Get result int result = p.WaitForExit(); if ( result != 0 ) { // something went wrong, print details Node::DumpOutput( job, memOut.Get(), memOutSize ); Node::DumpOutput( job, memErr.Get(), memErrSize ); } // write the test output (saved for pass or fail) FileStream fs; if ( fs.Open( GetName().Get(), FileStream::WRITE_ONLY ) == false ) { FLOG_ERROR( "Failed to open test output file '%s'", GetName().Get() ); return NODE_RESULT_FAILED; } if ( ( memOut.Get() && ( fs.Write( memOut.Get(), memOutSize ) != memOutSize ) ) || ( memErr.Get() && ( fs.Write( memErr.Get(), memErrSize ) != memErrSize ) ) ) { FLOG_ERROR( "Failed to write test output file '%s'", GetName().Get() ); return NODE_RESULT_FAILED; } fs.Close(); // did the test fail? if ( result != 0 ) { FLOG_ERROR( "Test failed (error %i) '%s'", result, GetName().Get() ); return NODE_RESULT_FAILED; } // test passed // we only keep the "last modified" time of the test output for passed tests m_Stamp = FileIO::GetFileLastWriteTime( m_Name ); return NODE_RESULT_OK; }
int OggSoundFile::cb_close(void* source) { FileStream *file = static_cast<FileStream*>(source); file->close(); return 0; }
// ------------------------------------------ void PCPStream::readChanAtoms(AtomStream &atom, int numc, BroadcastState &bcs) { /* Channel *ch=NULL; ChanHitList *chl=NULL; ChanInfo newInfo; ch = chanMgr->findChannelByID(bcs.chanID); chl = chanMgr->findHitListByID(bcs.chanID); if (ch) newInfo = ch->info; else if (chl) newInfo = chl->info;*/ Channel *ch = NULL; ChanHitList *chl = NULL; ChanInfo newInfo, chaInfo; ch = this->parent; if (ch) { newInfo = ch->info; chaInfo = ch->info; } for (int i = 0; i < numc; i++) { int c, d; ID4 id = atom.read(c, d); if ((id == PCP_CHAN_PKT) && (ch)) { readPktAtoms(ch, atom, c, bcs); } else if (id == PCP_CHAN_INFO) { newInfo.readInfoAtoms(atom, c); } else if (id == PCP_CHAN_TRACK) { newInfo.readTrackAtoms(atom, c); } else if (id == PCP_CHAN_BCID) { atom.readBytes(newInfo.bcID.id, 16); } else if (id == PCP_CHAN_KEY) // depreciated { atom.readBytes(newInfo.bcID.id, 16); newInfo.bcID.id[0] = 0; // clear flags } else if (id == PCP_CHAN_ID) { atom.readBytes(newInfo.id.id, 16); ch = chanMgr->findChannelByID(newInfo.id); chl = chanMgr->findHitListByID(newInfo.id); } else { LOG_DEBUG("PCP skip: %s,%d,%d", id.getString().str(), c, d); atom.skip(c, d); } } chl = chanMgr->findHitList(newInfo); if (!chl) chl = chanMgr->addHitList(newInfo); if (chl) { chl->info.update(newInfo); if (!servMgr->chanLog.isEmpty()) { //if (chl->numListeners()) { try { FileStream file; file.openWriteAppend(servMgr->chanLog.cstr()); XML::Node *rn = new XML::Node("update time=\"%d\"", sys->getTime()); XML::Node *n = chl->info.createChannelXML(); n->add(chl->createXML(false)); n->add(chl->info.createTrackXML()); rn->add(n); rn->write(file, 0); delete rn; file.close(); } catch (StreamException &e) { LOG_ERROR("Unable to update channel log: %s", e.msg); } } } } if (ch && !ch->isBroadcasting()) ch->updateInfo(newInfo); }
size_t OggSoundFile::cb_read(void* ptr, size_t size, size_t nmemb, void* source) { FileStream *file = static_cast<FileStream*>(source); return file->read(ptr, size, nmemb); }
bool KEYImporter::Open(const char *resfile, const char *desc) { free(description); description = strdup(desc); if (!core->IsAvailable( IE_BIF_CLASS_ID )) { print( "[ERROR]\nAn Archive Plug-in is not Available\n" ); return false; } unsigned int i; // NOTE: Interface::Init has already resolved resfile. printMessage("KEYImporter", "Opening %s...", WHITE, resfile); FileStream* f = FileStream::OpenFile(resfile); if (!f) { // Check for backslashes (false escape characters) // this check probably belongs elsewhere (e.g. ResolveFilePath) if (strstr( resfile, "\\ " )) { print("%s", "\nEscaped space(s) detected in path!. Do not escape spaces in your GamePath! " ); } printStatus( "ERROR", LIGHT_RED ); printMessage( "KEYImporter", "Cannot open Chitin.key\n", LIGHT_RED ); textcolor( WHITE ); return false; } printStatus( "OK", LIGHT_GREEN ); printMessage( "KEYImporter", "Checking file type...", WHITE ); char Signature[8]; f->Read( Signature, 8 ); if (strncmp( Signature, "KEY V1 ", 8 ) != 0) { printStatus( "ERROR", LIGHT_RED ); printMessage( "KEYImporter", "File has an Invalid Signature.\n", LIGHT_RED ); textcolor( WHITE ); delete( f ); return false; } printStatus( "OK", LIGHT_GREEN ); printMessage( "KEYImporter", "Reading Resources...\n", WHITE ); ieDword BifCount, ResCount, BifOffset, ResOffset; f->ReadDword( &BifCount ); f->ReadDword( &ResCount ); f->ReadDword( &BifOffset ); f->ReadDword( &ResOffset ); printMessage( "KEYImporter", " ", WHITE ); print( "BIF Files Count: %d (Starting at %d Bytes)\n", BifCount, BifOffset ); printMessage("KEYImporter", "RES Count: %d (Starting at %d Bytes)\n", WHITE, ResCount, ResOffset); f->Seek( BifOffset, GEM_STREAM_START ); ieDword BifLen, ASCIIZOffset; ieWord ASCIIZLen; for (i = 0; i < BifCount; i++) { BIFEntry be; f->Seek( BifOffset + ( 12 * i ), GEM_STREAM_START ); f->ReadDword( &BifLen ); f->ReadDword( &ASCIIZOffset ); f->ReadWord( &ASCIIZLen ); f->ReadWord( &be.BIFLocator ); be.name = ( char * ) malloc( ASCIIZLen ); f->Seek( ASCIIZOffset, GEM_STREAM_START ); f->Read( be.name, ASCIIZLen ); for (int p = 0; p < ASCIIZLen; p++) { //some MAC versions use : as delimiter if (be.name[p] == '\\' || be.name[p] == ':') be.name[p] = PathDelimiter; } if (be.name[0] == PathDelimiter) { // totl has '\data\zcMHar.bif' in the key file, and the CaseSensitive // code breaks with that extra slash, so simple fix: remove it ASCIIZLen--; for (int p = 0; p < ASCIIZLen; p++) be.name[p] = be.name[p + 1]; // (if you change this, try moving to ar9700 for testing) } FindBIF(&be); biffiles.push_back( be ); } f->Seek( ResOffset, GEM_STREAM_START ); ieResRef ResRef; ieWord Type; ieDword ResLocator; for (i = 0; i < ResCount; i++) { f->ReadResRef(ResRef); f->ReadWord(&Type); f->ReadDword(&ResLocator); // seems to be always the last entry? if (ResRef[0] != 0) resources.set(ResRef, ResLocator, Type); } printMessage( "KEYImporter", "Resources Loaded...", WHITE ); printStatus( "OK", LIGHT_GREEN ); delete( f ); return true; }
/** * * Loads all connection settings from a configuration file formatted as format * * @param format The format of the file * @param filename The filename to load * @return An array of connection settings pointers */ std::vector<ConnectionSettings *> ConnectionSettings::load( FileFormat format, std::string filename) { std::vector<ConnectionSettings *> connectionList; ConnectionSettings *connectionSettings; FileStream *stream; Variant data; switch (format) { case BINARY: stream = new BinaryFileStream(); break; case JSON: stream = new JsonFileStream(); break; } // Open the file if (!stream->open(filename, std::ios::in)) { connectionSettings = createDefaultSettings(); connectionList.push_back(connectionSettings); // Failed to open file return connectionList; } // Load stream *stream >> data; if (data.getType() == D_VARIANTVECTOR) { auto connections = data.toVariantVector(); for (auto it = connections.begin(); it != connections.end(); ++it) { if (it->getType() == D_VARIANTMAP) { // Initialize settings connectionSettings = new ConnectionSettings(); // Load from map *connectionSettings << it->toVariantMap(); // Add to collection connectionList.push_back(connectionSettings); // Reparent the children and add to the connectionList // connection reparentChildren(connectionList, connectionSettings); } } } // Close stream stream->close(); // Free memory delete stream; if (connectionList.size() == 0) { connectionSettings = createDefaultSettings(); connectionList.push_back(connectionSettings); } return connectionList; }
char* GFXGLShader::_handleIncludes( const Torque::Path& path, FileStream *s ) { // TODO: The #line pragma on GLSL takes something called a // "source-string-number" which it then never explains. // // Until i resolve this mystery i disabled this. // //String linePragma = String::ToString( "#line 1 \r\n"); //U32 linePragmaLen = linePragma.length(); U32 shaderLen = s->getStreamSize(); char* buffer = (char*)dMalloc(shaderLen + 1); //dStrncpy( buffer, linePragma.c_str(), linePragmaLen ); s->read(shaderLen, buffer); buffer[shaderLen] = 0; char* p = dStrstr(buffer, "#include"); while(p) { char* q = p; p += 8; if(dIsspace(*p)) { U32 n = 0; while(dIsspace(*p)) ++p; AssertFatal(*p == '"', "Bad #include directive"); ++p; static char includeFile[256]; while(*p != '"') { AssertFatal(*p != 0, "Bad #include directive"); includeFile[n++] = *p++; AssertFatal(n < sizeof(includeFile), "#include directive too long"); } ++p; includeFile[n] = 0; // First try it as a local file. Torque::Path includePath = Torque::Path::Join(path.getPath(), '/', includeFile); includePath = Torque::Path::CompressPath(includePath); FileStream includeStream; if ( !includeStream.open( includePath, Torque::FS::File::Read ) ) { // Try again assuming the path is absolute // and/or relative. includePath = String( includeFile ); includePath = Torque::Path::CompressPath(includePath); if ( !includeStream.open( includePath, Torque::FS::File::Read ) ) { AssertISV(false, avar("failed to open include '%s'.", includePath.getFullPath().c_str())); if ( smLogErrors ) Con::errorf( "GFXGLShader::_handleIncludes - Failed to open include '%s'.", includePath.getFullPath().c_str() ); // Fail... don't return the buffer. dFree(buffer); return NULL; } } char* includedText = _handleIncludes(includePath, &includeStream); // If a sub-include fails... cleanup and return. if ( !includedText ) { dFree(buffer); return NULL; } // TODO: Disabled till this is fixed correctly. // // Count the number of lines in the file // before the include. /* U32 includeLine = 0; { char* nl = dStrstr( buffer, "\n" ); while ( nl ) { includeLine++; nl = dStrstr( nl, "\n" ); if(nl) ++nl; } } */ String manip(buffer); manip.erase(q-buffer, p-q); String sItx(includedText); // TODO: Disabled till this is fixed correctly. // // Add a new line pragma to restore the proper // file and line number after the include. //sItx += String::ToString( "\r\n#line %d \r\n", includeLine ); dFree(includedText); manip.insert(q-buffer, sItx); char* manipBuf = dStrdup(manip.c_str()); p = manipBuf + (p - buffer); dFree(buffer); buffer = manipBuf; } p = dStrstr(p, "#include"); } return buffer; }
void run() { Vector screenSize(80, 48); int frames = 13125000 * 14 / (11 * 76 * 262); int maxRadius = 20; // Center of screen Vector2<double> c = Vector2Cast<double>(screenSize) / 2; // Positions of screen top-left relative to centre of each picture Array<Vector> p1s(frames); Array<Vector> p2s(frames); int minX = 0, maxX = 0; for (int t = 0; t < frames; ++t) { double f = static_cast<double>(t) / frames; double r = maxRadius; // *(1 - cos(f * tau)) / 2; Rotor2<double> z1(f * 6); Rotor2<double> z2(f * 7); Vector2<double> a1(r*cos(f*tau * 6), r*sin(f*tau * 7)); Vector2<double> a2(r*cos(f*tau * 5), r*sin(f*tau * 4)); // Positions of picture centres relative to screen top-left Vector p1 = -Vector2Cast<int>(c + a1); Vector p2 = -Vector2Cast<int>(c + a2); p1s[t] = p1; p2s[t] = p2; minX = min(min(minX, p1.x), p2.x); maxX = max(max(maxX, p1.x + screenSize.x), p2.x + screenSize.x); } int stride = (3 + maxX - minX) & ~3; // Offset in picture from start of screen to end int ss = (screenSize.y - 1)*stride + screenSize.x; Array<int> o1s(frames); Array<int> o2s(frames); int minO = 0, maxO = 0; for (int t = 0; t < frames; ++t) { Vector p1 = p1s[t]; Vector p2 = p2s[t]; // Offsets of screen top-left into pictures relative to pictures // center. int o1 = p1.y*stride + p1.x; int o2 = p2.y*stride + p2.x; int o1e = o1 + ss; int o2e = o2 + ss; // Picture bounds minO = min(min(minO, o1), o2); maxO = max(max(maxO, o1e), o2e); o1s[t] = o1; o2s[t] = o2; } minO &= -2; maxO = (maxO + 1) & -2; FileStream output = File("tables.asm").openWrite(); /* output.write("cpu 8086\n" "segment _DATA public class = DATA\n" "\n" "global _picture, _motion\n" "\n" "\n"); */ int d = ((-minO) / stride + 1)*stride + stride/2; int bytes = (maxO + 1 - minO) / 2; int xs = (minO + d) % stride; int ys = (minO - xs) / stride; console.write("First position: (" + decimal(xs) + ", " + decimal(ys) + ")\n"); xs = (maxO + d) % stride; ys = (maxO - xs) / stride; console.write("Last position: (" + decimal(xs) + ", " + decimal(ys) + ")\n"); console.write("Picture size: " + decimal(bytes) + "\n"); console.write("Motion size: " + decimal(4 * frames) + "\n"); output.write("frames equ " + decimal(frames) + "\n"); output.write("stride equ " + decimal(stride/2) + "\n"); output.write("p equ picture\n"); output.write( "p2 equ pictureEnd+(pictureEnd-picture)+(headerEnd-header)\n\n"); output.write("motion:"); for (int t = 0; t < frames; ++t) { int o1 = o1s[t] - minO; int o2 = o2s[t] - minO; int sp = o1 / 2; if ((o1 & 1) != 0) sp += bytes; int bp = o2 / 2; if ((o2 & 1) != 0) bp += bytes; if (t % 3 == 0) output.write("\n dw "); else output.write(", "); output.write("p+" + hex(sp, 4) + ", p+" + hex(bp, 4)); } int lastX = 20; output.write("\n\n"); int p2 = (maxO + 1 - minO) / 2; p2 += p2 - 1; output.write("transition:"); Array<bool> cleared(20 * 13); for (int p = 0; p < 20 * 13; ++p) cleared[p] = false; int pp = 0; for (int t = 0; t < 1000000; ++t) { int r = 999 - t / 1000; int theta = t % 1000; Vector2<double> z = Vector2<double>(r/20.0, 0)*Rotor2<double>(theta / 1000.0); Vector p = Vector2Cast<int>(z + Vector2<double>(10, 6)); if (p.x >= 0 && p.x < 20 && p.y >= 0 && p.y < 12) { int aa = p.y * 20 + p.x; if (cleared[aa]) continue; int a = p.y * 206 * 4 + p.x * 10; if (pp % 3 == 0) output.write("\n dw "); else output.write(", "); ++pp; output.write("p2+" + hex(a, 4) + ", "); if (p.y == 12) output.write("p2+" + hex(a, 4)); else output.write("p2+" + hex(a + 206*2, 4)); cleared[aa] = true; } } console.write("pp = " + decimal(pp) + " \n"); output.write("\n\npicture:"); for (int o = minO; o < maxO + 1; o += 2) { int x = (o + d) % stride; int y = (o + d - x) / stride - d/stride; if (lastX == 20) { output.write("\n db "); lastX = 0; } else output.write(", "); for (; lastX < x % 20; lastX += 2) output.write(" "); Vector p(x - stride / 2, y); int cL = colour(p); int cR = colour(p + Vector(1, 0)); int b = cL | (cR << 4); output.write(String(hex(b, 2))); lastX += 2; } output.write("\n"); output.write("pictureEnd:\n"); }
void SuiteFileStream::Test() { const TUint kBytes = 256; Bws<kBytes> b; // Populate each position in the buffer with it's index. for (TUint i=0; i<kBytes; i++) { b.Append((TChar)i); } FileBrx f(b); FileStream stream; stream.SetFile(&f); TEST(stream.Bytes() == kBytes); // test basic reading Bws<kBytes> buf; stream.Read(buf); TEST(buf == b); TEST(stream.Tell() == kBytes); // test that reads on a full buffer and at the end of a file throw TEST_THROWS(stream.Read(buf), ReaderError); buf.SetBytes(0); TEST_THROWS(stream.Read(buf), ReaderError); // test seeking stream.Seek(0); TEST(stream.Tell() == 0); // test a stream can be (un)interrupted stream.ReadInterrupt(); Bws<10> buf2; TEST_THROWS(stream.Read(buf2), ReaderError); stream.Interrupt(false); stream.Read(buf2); TEST(buf2.Bytes() == buf2.MaxBytes()); for (TUint i=0; i<10; i++) { TEST(buf2[i] == b[i]); } // test that Read appends to a buffer buf.SetBytes(0); buf.Append(buf2); stream.Read(buf); TEST(buf.Bytes() == kBytes); TEST(buf == b); }