TInt CKeytoolFileView::SplitFileInputToArrayL() { TInt fSize; iFile.Size(fSize); HBufC8* fileContents = HBufC8::NewLC(fSize); TPtr8 ptr(fileContents->Des()); ptr.SetLength(fSize); // create file stream and Read the content from the file RFileReadStream inputFileStream(iFile); CleanupClosePushL(inputFileStream); inputFileStream.ReadL(ptr, fSize); CleanupStack::PopAndDestroy(&inputFileStream); TInt readPos = 0; TPtrC8 lineContents; TInt lineCount = 0; while (!ReadLine(*fileContents, readPos, lineContents)) { TInt lineLength = lineContents.Length(); if (lineLength) { lineCount++; HBufC* currentLine = HBufC::NewLC(lineLength); currentLine->Des().Copy(lineContents); iArgs.AppendL(currentLine); CleanupStack::Pop(currentLine); } } CleanupStack::PopAndDestroy(fileContents); return lineCount; }
vector <vector <string> > FileReader::ReadData(int readRowCount) { vector <vector <string> > vectorTable; string line; ifstream inputFileStream (m_filename); m_fileSize = inputFileStream.tellg(); inputFileStream.seekg (0, inputFileStream.beg); if (inputFileStream.is_open()) { int rowCount = 0; while ( getline(inputFileStream, line) ) { vectorTable.push_back(split(line,m_delimiter)); rowCount++; if(rowCount == readRowCount) { break; } } m_rowCount = rowCount; cout <<"File" << m_filename << " row count " << rowCount << endl; inputFileStream.close(); } else { cout << "Unable to open file"; } return vectorTable; }
void SlaveMapper::checkMappingExistence(int processID) { int numberOfRequiredProcesses; MPI_Bcast(&numberOfRequiredProcesses, 1, MPI_INT, GlobalConstants::MASTER, MPI_COMM_WORLD); bool result = true; std::stringstream stream; stream << processID; if (processID < numberOfRequiredProcesses) { hasToLoad = true; std::ifstream inputFileStream((PreprocessingFolderStructureManager::getMappingFolderPath(true) + "MappingNodesPart" + stream.str() + ".dat").c_str(), std::ios::binary); result = result && inputFileStream.good(); inputFileStream.close(); inputFileStream.open((PreprocessingFolderStructureManager::getMappingFolderPath(true) + "MappingPredicatesPart" + stream.str() + ".dat").c_str(), std::ios::binary); result = result && inputFileStream.good(); inputFileStream.close(); inputFileStream.open((PreprocessingFolderStructureManager::getMappingFolderPath(true) + "MappingTriplesPart" + stream.str() + ".dat").c_str(), std::ios::binary); result = result && inputFileStream.good(); inputFileStream.close(); } int decision = (result) ? 1 : 0; MPI_Reduce(&decision, NULL, 1, MPI_INT, MPI_MIN, GlobalConstants::MASTER, MPI_COMM_WORLD); }
static void readFromTextFile(const std::string& fileName, PlaceToWriteInput& placeToWriteInput) { std::ifstream inputFileStream(fileName); assert_true(inputFileStream.is_open()); inputFileStream >> placeToWriteInput; closeFileStream(inputFileStream); }
void ProjectImpl::readTargetsByFilename(const std::string& filename) { // Open the input stream const std::string absoluteFilename = mProjectDirectory + filename; std::ifstream inputFileStream(absoluteFilename, std::ios::binary); // Parse JSON if (nullptr == mRapidJsonDocument) { mRapidJsonDocument = new rapidjson::Document(); } JsonHelper::parseDocumentByInputFileStream(*mRapidJsonDocument, inputFileStream, absoluteFilename, "Targets", "1"); }
FileReader::FileReader(const char* fileName, string delimiter) { m_filename = fileName; streampos begin,end; string line; ifstream inputFileStream (fileName); vector <vector <string> > table; inputFileStream.seekg (0, inputFileStream.end); int length = inputFileStream.tellg(); m_fileSize = inputFileStream.tellg(); inputFileStream.seekg (0, inputFileStream.beg); char * buffer = new char [length]; char * readBuffer = new char[m_bufferSize]; cout << inputFileStream.tellg() << endl; cout << inputFileStream.end << endl; m_delimiter = delimiter; /* // Buffered reader , very efficient in terms of IO while ((inputFileStream.tellg() <= length) && (inputFileStream.tellg() >= 0)) { currPos = inputFileStream.tellg(); //printf("-------------\n" ); printf("Current position %d \n",currPos); //printf("-------------\n" ); inputFileStream.read (readBuffer,m_bufferSize); }*/ /* if (inputFileStream.is_open()) { int rowCount = 0; while ( getline(inputFileStream, line) ) { // Parser #3 //vector<string> words = split(line,m_delimiter); table.push_back(split(line,m_delimiter)); rowCount++; } //table.push_back(words) cout << rowCount << endl; inputFileStream.close(); } else { cout << "Unable to open file"; } */ }
bool MasterDegreesComputer::executeDegreesComputation(int numberOfProcesses, std::string degreesComputationConfigurationFileURI, int globalIterator, int numberOfIterations) { DegreesComputationConfigurationFileManager *dcfm = DegreesComputationConfigurationFileManager::getInstance(); int numberOfRequiredProcesses; if (!dcfm->loadConfigurationFile(degreesComputationConfigurationFileURI)) { terminateDegreesComputation(); return false; } std::stringstream globalIteratorStream; globalIteratorStream << globalIterator; std::ifstream inputFileStream((PreprocessingFolderStructureManager::getDegreesComputationFolderPath(true) + "DegreesComputation.dat").c_str(), std::ios::binary); if (!inputFileStream.good() || globalIterator >= numberOfIterations) { inputFileStream.close(); createDegreesComputation(numberOfProcesses, globalIterator); terminateDegreesComputation(); return true; } inputFileStream.seekg(globalIterator * sizeof(int), std::ios::beg); inputFileStream.read((char*)&numberOfRequiredProcesses, sizeof(int)); if (numberOfRequiredProcesses > numberOfProcesses) { std::cout << "\nError: at least " << numberOfRequiredProcesses << " are required for attempting to load the degrees computation\n\n"; inputFileStream.close(); terminateDegreesComputation(); return false; } if (numberOfRequiredProcesses < numberOfProcesses) { std::cout << "Warning: only " << numberOfRequiredProcesses << " processes are required, but " << numberOfProcesses << " were provided instead\n"; } if (checkDegreesComputationExistence(numberOfRequiredProcesses)) { loadDegreesComputation(); } else { createDegreesComputation(numberOfProcesses, globalIterator); } terminateDegreesComputation(); return true; }
//[-------------------------------------------------------] //[ Public virtual RendererRuntime::IResourceLoader methods ] //[-------------------------------------------------------] void CompositorWorkspaceResourceLoader::onDeserialization() { try { std::ifstream inputFileStream(mAsset.assetFilename, std::ios::binary); // Read in the compositor workspace header v1CompositorWorkspace::Header compositorWorkspaceHeader; inputFileStream.read(reinterpret_cast<char*>(&compositorWorkspaceHeader), sizeof(v1CompositorWorkspace::Header)); // Read in the compositor workspace resource nodes ::detail::nodesDeserialization(inputFileStream, *mCompositorWorkspaceResource); } catch (const std::exception& e) { RENDERERRUNTIME_OUTPUT_ERROR_PRINTF("Renderer runtime failed to load compositor workspace asset %d: %s", mAsset.assetId, e.what()); } }
/** * Método que carrega um ficheiro para um vector de strings * Lança excepção FileDoesntExistException se não conseguir abrir o ficheiro * @param filename nome do ficheiro a carregar * @param fileData vector onde irá guardar os dados */ void Diff::loadFile(string filename, vector <string>& fileData) throw(FileDoesntExistException) { fileData.clear(); ifstream inputFileStream(filename.c_str()); if (inputFileStream.is_open()) { while (!inputFileStream.eof()) { string temp; getline(inputFileStream, temp); fileData.push_back(temp); } } else { stringstream ss; ss << "O ficheiro com o nome " << filename << " nao existe!\n"; throw FileDoesntExistException(ss.str().c_str()); } }
void ProjectImpl::readAssetsByFilename(const std::string& filename) { // Open the input stream const std::string absoluteFilename = mProjectDirectory + filename; std::ifstream inputFileStream(absoluteFilename, std::ios::binary); // Parse JSON rapidjson::Document rapidJsonDocument; JsonHelper::parseDocumentByInputFileStream(rapidJsonDocument, inputFileStream, absoluteFilename, "Assets", "1"); // Get the asset package name (includes "/" at the end) mAssetPackageDirectoryName = STD_FILESYSTEM_PATH(filename).parent_path().generic_string() + '/'; // Read project data const rapidjson::Value& rapidJsonValueAssets = rapidJsonDocument["Assets"]; const size_t numberOfAssets = rapidJsonValueAssets.MemberCount(); RendererRuntime::AssetPackage::SortedAssetVector& sortedAssetVector = mAssetPackage.getWritableSortedAssetVector(); sortedAssetVector.resize(numberOfAssets); size_t currentAssetIndex = 0; for (rapidjson::Value::ConstMemberIterator rapidJsonMemberIteratorAssets = rapidJsonValueAssets.MemberBegin(); rapidJsonMemberIteratorAssets != rapidJsonValueAssets.MemberEnd(); ++rapidJsonMemberIteratorAssets) { // Get asset data const RendererRuntime::AssetId assetId = static_cast<uint32_t>(std::atoi(rapidJsonMemberIteratorAssets->name.GetString())); const std::string assetFilename = mAssetPackageDirectoryName + rapidJsonMemberIteratorAssets->value.GetString(); if (assetFilename.length() > RendererRuntime::Asset::MAXIMUM_ASSET_FILENAME_LENGTH) { const std::string message = "Asset filename \"" + assetFilename + "\" of asset ID " + std::to_string(assetId) + " is too long. Maximum allowed asset filename number of bytes is " + std::to_string(RendererRuntime::Asset::MAXIMUM_ASSET_FILENAME_LENGTH); throw std::runtime_error(message); } // Copy asset data RendererRuntime::Asset& asset = sortedAssetVector[currentAssetIndex]; asset.assetId = assetId; strcpy(asset.assetFilename, assetFilename.c_str()); // Next asset, please ++currentAssetIndex; } std::sort(sortedAssetVector.begin(), sortedAssetVector.end(), ::detail::orderByAssetId); // Build the source asset ID to compiled asset ID map buildSourceAssetIdToCompiledAssetId(); }
ast::Component* parseFile(std::string filename) { std::ifstream inputFileStream(filename); if (inputFileStream.is_open()) { std::string input; inputFileStream.seekg(0, std::ios::end); input.reserve(inputFileStream.tellg()); inputFileStream.seekg(0, std::ios::beg); input.assign((std::istreambuf_iterator<char>(inputFileStream)), std::istreambuf_iterator<char>()); return parseInput(input); } else { std::cerr << "File input error" << std::endl; return nullptr; } }
//[-------------------------------------------------------] //[ Public virtual RendererToolkit::IProject methods ] //[-------------------------------------------------------] void ProjectImpl::loadByFilename(const char* filename) { // Clear the previous project clear(); // Open the input stream std::ifstream inputFileStream(filename, std::ios::binary); // Parse JSON rapidjson::Document rapidJsonDocument; JsonHelper::parseDocumentByInputFileStream(rapidJsonDocument, inputFileStream, filename, "Project", "1"); // Read project metadata const rapidjson::Value& rapidJsonValueProject = rapidJsonDocument["Project"]; mProjectName = rapidJsonValueProject["ProjectMetadata"]["Name"].GetString(); { // Read project data mProjectDirectory = STD_FILESYSTEM_PATH(filename).parent_path().generic_string() + '/'; readAssetsByFilename(rapidJsonValueProject["AssetsFilename"].GetString()); readTargetsByFilename(rapidJsonValueProject["TargetsFilename"].GetString()); } }
void ProjectImpl::buildSourceAssetIdToCompiledAssetId() { assert(0 == mSourceAssetIdToCompiledAssetId.size()); assert(0 == mSourceAssetIdToAbsoluteFilename.size()); const RendererRuntime::AssetPackage::SortedAssetVector& sortedAssetVector = mAssetPackage.getSortedAssetVector(); const size_t numberOfAssets = sortedAssetVector.size(); for (size_t i = 0; i < numberOfAssets; ++i) { const RendererRuntime::Asset& asset = sortedAssetVector[i]; // Open the input stream const std::string absoluteAssetFilename = mProjectDirectory + asset.assetFilename; std::ifstream inputFileStream(absoluteAssetFilename, std::ios::binary); // Parse JSON rapidjson::Document rapidJsonDocument; JsonHelper::parseDocumentByInputFileStream(rapidJsonDocument, inputFileStream, absoluteAssetFilename, "Asset", "1"); // Mandatory main sections of the asset const rapidjson::Value& rapidJsonValueAsset = rapidJsonDocument["Asset"]; const rapidjson::Value& rapidJsonValueAssetMetadata = rapidJsonValueAsset["AssetMetadata"]; // Get the relevant asset metadata parts const std::string assetCategory = rapidJsonValueAssetMetadata["AssetCategory"].GetString(); const std::string assetType = rapidJsonValueAssetMetadata["AssetType"].GetString(); const std::string assetName = rapidJsonValueAssetMetadata["AssetName"].GetString(); // Construct the asset ID as string const std::string compiledAssetIdAsString = mProjectName + '/' + assetType + '/' + assetCategory + '/' + assetName; // Hash the asset ID and put it into the map mSourceAssetIdToCompiledAssetId.emplace(asset.assetId, RendererRuntime::StringId(compiledAssetIdAsString.c_str())); mSourceAssetIdToAbsoluteFilename.emplace(asset.assetId, mProjectDirectory + asset.assetFilename); } }
void VStreamsUnit::_testStreamTailer() { /* example: how to tail the system log for a minute: */ /* VFSNode f("/var/log/system.log"); TestTailHandler sysLogTestHandler; VTextTailRunner sysLogTailRunner(f, sysLogTestHandler); sysLogTailRunner.start(); VThread::sleep(10 * VDuration::SECOND()); sysLogTailRunner.stop(); // Calling stop() is optional; can just destruct. */ VFSNode tempDir = VFSNode::getKnownDirectoryNode(VFSNode::CACHED_DATA_DIRECTORY, "vault", "unittest"); VFSNode testDirRoot(tempDir, "vstreamsunit_temp"); (void) testDirRoot.rm(); testDirRoot.mkdirs(); // Create a test file and open it for writing, and create an output text stream for it. VFSNode testFileNode(testDirRoot, "tailed_file.txt"); VBufferedFileStream outputFileStream(testFileNode); outputFileStream.openWrite(); VTextIOStream outputStream(outputFileStream, VTextIOStream::kUseUnixLineEndings); // assertions below assume 1 code point written for line endings, so don't write DOS 2-byte line endings even on Windows // First, write 3 lines of initial content. outputStream.writeLine("zero"); outputStream.writeLine("one"); outputStream.writeLine("two"); outputStream.flush(); // Open the file read-only and create an input text stream for it. VBufferedFileStream inputFileStream(testFileNode); inputFileStream.openReadOnly(); // Now create a file tailer. // It should "immediately" (separate thread) read the existing data (since our read mark is at the start). bool processByLine = true; // switch to test other mode bool callStop = false; // switch to test other mode TestTailHandler testHandler; VTextTailRunner tailRunner(inputFileStream, testHandler, processByLine); tailRunner.start(); VThread::sleep(VDuration::SECOND()); if (processByLine) { VUNIT_ASSERT_EQUAL_LABELED(testHandler.getNumProcessedLines(), 3, "3 initial lines"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedLine(0), "zero", "line zero"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedLine(1), "one", "line one"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedLine(2), "two", "line two"); } else { VUNIT_ASSERT_EQUAL_LABELED(testHandler.getNumProcessedCodePoints(), 13, "13 initial code points"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedCodePoint(0), VCodePoint('z'), "code point [0]"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedCodePoint(5), VCodePoint('o'), "code point [5]"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedCodePoint(9), VCodePoint('t'), "code point [9]"); } // Write two more lines and verify they are processed. outputStream.writeLine("three"); outputStream.writeLine("four"); outputStream.flush(); VThread::sleep(2 * VDuration::SECOND()); if (processByLine) { VUNIT_ASSERT_EQUAL_LABELED(testHandler.getNumProcessedLines(), 5, "5 total lines"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedLine(3), "three", "line three"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedLine(4), "four", "line four"); } else { VUNIT_ASSERT_EQUAL_LABELED(testHandler.getNumProcessedCodePoints(), 24, "24 initial code points"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedCodePoint(13), VCodePoint('t'), "code point [13]"); VUNIT_ASSERT_EQUAL_LABELED(testHandler.getProcessedCodePoint(19), VCodePoint('f'), "code point [19]"); } if (callStop) { tailRunner.stop(); // Calling stop() is optional; can just destruct. } }
int main(int argc, char * argv[]) { // first argument - config file // second argument - filelist // using namespace std; //const int CutNumb = 8; //string CutList[CutNumb]={"No cut","Trigger","1l","lept-Veto","b-Veto","MET $>$ 50","MET $>$ 100","dPhi $>$ 1"}; // **** configuration Config cfg(argv[1]); string Channel="mutau"; // kinematic cuts on electrons bool fillplots= false; bool Wtemplate= true; const bool isData = cfg.get<bool>("IsData"); const bool applyPUreweighting = cfg.get<bool>("ApplyPUreweighting"); const bool applyPUreweighting_vertices = cfg.get<bool>("ApplyPUreweighting_vertices"); const bool applyPUreweighting_official = cfg.get<bool>("ApplyPUreweighting_official"); const bool applyLeptonSF = cfg.get<bool>("ApplyLeptonSF"); const bool InvertTauIso = cfg.get<bool>("InvertTauIso"); const bool InvertLeptonIso = cfg.get<bool>("InvertLeptonIso"); const bool InvertMET = cfg.get<bool>("InvertMET"); const double ptElectronLowCut = cfg.get<double>("ptElectronLowCut"); const double ptElectronHighCut = cfg.get<double>("ptElectronHighCut"); const double etaElectronCut = cfg.get<double>("etaElectronCut"); const double dxyElectronCut = cfg.get<double>("dxyElectronCut"); const double dzElectronCut = cfg.get<double>("dzElectronCut"); const double isoElectronLowCut = cfg.get<double>("isoElectronLowCut"); const double isoElectronHighCut = cfg.get<double>("isoElectronHighCut"); const bool applyElectronId = cfg.get<bool>("ApplyElectronId"); // vertex cuts const double ndofVertexCut = cfg.get<double>("NdofVertexCut"); const double zVertexCut = cfg.get<double>("ZVertexCut"); const double dVertexCut = cfg.get<double>("DVertexCut"); // kinematic cuts on muons const double ptMuonLowCut = cfg.get<double>("ptMuonLowCut"); const double ptMuonHighCut = cfg.get<double>("ptMuonHighCut"); const double etaMuonCut = cfg.get<double>("etaMuonCut"); const double dxyMuonCut = cfg.get<double>("dxyMuonCut"); const double dzMuonCut = cfg.get<double>("dzMuonCut"); const double isoMuonLowCut = cfg.get<double>("isoMuonLowCut"); const double isoMuonHighCut = cfg.get<double>("isoMuonHighCut"); const double isoMuonHighCutQCD = cfg.get<double>("isoMuonHighCutQCD"); const bool applyMuonId = cfg.get<bool>("ApplyMuonId"); const double ptTauLowCut = cfg.get<double>("ptTauLowCut"); const double etaTauCut = cfg.get<double>("etaTauCut"); const string dataBaseDir = cfg.get<string>("DataBaseDir"); string TrigLeg ; if (!isData) TrigLeg = cfg.get<string>("Mu17LegMC"); if (isData) TrigLeg = cfg.get<string>("Mu18LegData"); const string Mu17Tau20MuLegA = cfg.get<string>("Mu17Tau20MuLegA"); const string Mu17Tau20MuLegB = cfg.get<string>("Mu17Tau20MuLegB"); const string Mu17Tau20TauLegA = cfg.get<string>("Mu17Tau20TauLegA"); const string Mu17Tau20TauLegB = cfg.get<string>("Mu17Tau20TauLegB"); const string SingleMuonTriggerFile = cfg.get<string>("Muon17TriggerEff"); const float singleMuonTriggerPtCut = cfg.get<float>("SingleMuonTriggerPtCut"); const float singleMuonTriggerEtaCut = cfg.get<float>("SingleMuonTriggerEtaCut"); const string Region = cfg.get<string>("Region"); const string Sign = cfg.get<string>("Sign"); const double leadchargedhadrcand_dz = cfg.get<double>("leadchargedhadrcand_dz"); const double leadchargedhadrcand_dxy = cfg.get<double>("leadchargedhadrcand_dxy"); // kinematic cuts on Jets const double etaJetCut = cfg.get<double>("etaJetCut"); const double ptJetCut = cfg.get<double>("ptJetCut"); // topological cuts const double dRleptonsCutmutau = cfg.get<double>("dRleptonsCutmutau"); const double dZetaCut = cfg.get<double>("dZetaCut"); const double deltaRTrigMatch = cfg.get<double>("DRTrigMatch"); const bool oppositeSign = cfg.get<bool>("oppositeSign"); const bool isIsoR03 = cfg.get<bool>("IsIsoR03"); // tau const double taupt = cfg.get<double>("taupt"); const double taueta = cfg.get<double>("taueta"); const double decayModeFinding = cfg.get<double>("decayModeFinding"); const double decayModeFindingNewDMs = cfg.get<double>("decayModeFindingNewDMs"); const double againstElectronVLooseMVA5 = cfg.get<double>("againstElectronVLooseMVA5"); const double againstMuonTight3 = cfg.get<double>("againstMuonTight3"); const double vertexz = cfg.get<double>("vertexz"); const double byCombinedIsolationDeltaBetaCorrRaw3Hits = cfg.get<double>("byCombinedIsolationDeltaBetaCorrRaw3Hits"); const unsigned int RunRangeMin = cfg.get<unsigned int>("RunRangeMin"); const unsigned int RunRangeMax = cfg.get<unsigned int>("RunRangeMax"); // vertex distributions filenames and histname const string vertDataFileName = cfg.get<string>("VertexDataFileName"); const string vertMcFileName = cfg.get<string>("VertexMcFileName"); const string vertHistName = cfg.get<string>("VertexHistName"); // lepton scale factors const string muonSfDataBarrel = cfg.get<string>("MuonSfDataBarrel"); const string muonSfDataEndcap = cfg.get<string>("MuonSfDataEndcap"); const string muonSfMcBarrel = cfg.get<string>("MuonSfMcBarrel"); const string muonSfMcEndcap = cfg.get<string>("MuonSfMcEndcap"); const string jsonFile = cfg.get<string>("jsonFile"); string cmsswBase = (getenv ("CMSSW_BASE")); string fullPathToJsonFile = cmsswBase + "/src/DesyTauAnalyses/NTupleMaker/test/json/" + jsonFile; const string MuonIdIsoFile = cfg.get<string>("MuonIdIsoEff"); const string TauFakeRateFile = cfg.get<string>("TauFakeRateEff"); // Run-lumi selector std::vector<Period> periods; if (isData) { // read the good runs std::fstream inputFileStream(fullPathToJsonFile.c_str(), std::ios::in); if (inputFileStream.fail() ) { std::cout << "Error: cannot find json file " << fullPathToJsonFile << std::endl; std::cout << "please check" << std::endl; std::cout << "quitting program" << std::endl; exit(-1); } for(std::string s; std::getline(inputFileStream, s); ) { //std::fstream inputFileStream("temp", std::ios::in); periods.push_back(Period()); std::stringstream ss(s); ss >> periods.back(); } } TString MainTrigger(TrigLeg); TString Muon17Tau20MuLegA (Mu17Tau20MuLegA ); TString Muon17Tau20MuLegB (Mu17Tau20MuLegB ); TString Muon17Tau20TauLegA (Mu17Tau20TauLegA ); TString Muon17Tau20TauLegB (Mu17Tau20TauLegB ); const double Lumi = cfg.get<double>("Lumi"); const double bTag = cfg.get<double>("bTag"); const double metcut = cfg.get<double>("metcut"); CutList.clear(); CutList.push_back("No cut"); CutList.push_back("No cut after PU"); CutList.push_back("$\\mu$"); CutList.push_back("$\\tau_h$"); CutList.push_back("Trigger"); CutList.push_back("2nd $\\ell$-Veto"); CutList.push_back("3rd $\\ell$-Veto"); CutList.push_back("Lepton SF"); CutList.push_back("TauFakeRate"); CutList.push_back("topPtRwgt"); CutList.push_back("${M}_T>60"); CutList.push_back("$ E_T^{\\rm miss}>$ 100"); CutList.push_back("Jets $<$3"); CutList.push_back("b-Veto"); CutList.push_back("$40<\\rm{Inv}_M<80"); CutList.push_back("$1.5<\\Delta R<4$"); int CutNumb = int(CutList.size()); xs=1;fact=1;fact2=1; unsigned int RunMin = 9999999; unsigned int RunMax = 0; ifstream ifs("xsecs"); string line; while(std::getline(ifs, line)) // read one line from ifs { fact=fact2=1; istringstream iss(line); // access line as a stream // we only need the first two columns string dt,st1,st2;st1="stau2_1";st2="stau5_2"; iss >> dt >> xs >> fact >> fact2; //ifs >> dt >> xs; // no need to read further //cout<< " "<<dt<<" "<<endl; //cout<< "For sample ========================"<<dt<<" xsecs is "<<xs<<" XSec "<<XSec<<" "<<fact<<" "<<fact2<<endl; //if (dt==argv[2]) { //if (std::string::npos != dt.find(argv[2])) { if ( dt == argv[2]) { XSec= xs*fact*fact2; cout<<" Found the correct cross section "<<xs<<" for Dataset "<<dt<<" XSec "<<XSec<<endl; } /* if ( argv[2] == st1) {ChiMass=100;mIntermediate=200;} else if (argv[2] == st2) {ChiMass=200;mIntermediate=500;} */ if (isData) XSec=1.; ChiMass=0.0; } if (XSec<0&& !isData) {cout<<" Something probably wrong with the xsecs...please check - the input was "<<argv[2]<<endl;return 0;} std::vector<unsigned int> allRuns; allRuns.clear(); cout<<" ChiMass is "<<ChiMass<<" "<<mIntermediate<<endl; bool doThirdLeptVeto=true; bool doMuVeto=true; //CutList[CutNumb]=CutListt[CutNumb]; char ff[100]; sprintf(ff,"%s/%s",argv[3],argv[2]); if (applyPUreweighting_vertices and applyPUreweighting_official) {std::cout<<"ERROR: Choose only ONE PU reweighting method (vertices or official, not both!) " <<std::endl; exit(-1);} // reweighting with vertices // reading vertex weights TFile * fileDataNVert = new TFile(TString(cmsswBase)+"/src/"+dataBaseDir+"/"+vertDataFileName); TFile * fileMcNVert = new TFile(TString(cmsswBase)+"/src/"+dataBaseDir+"/"+vertMcFileName); TH1D * vertexDataH = (TH1D*)fileDataNVert->Get(TString(vertHistName)); TH1D * vertexMcH = (TH1D*)fileMcNVert->Get(TString(vertHistName)); float normVertexData = vertexDataH->GetSumOfWeights(); float normVertexMc = vertexMcH->GetSumOfWeights(); vertexDataH->Scale(1/normVertexData); vertexMcH->Scale(1/normVertexMc); PileUp * PUofficial = new PileUp(); TFile * filePUdistribution_data = new TFile(TString(cmsswBase)+"/src/DesyTauAnalyses/NTupleMaker/data/PileUpDistrib/Data_Pileup_2015D_Nov17.root","read"); TFile * filePUdistribution_MC = new TFile (TString(cmsswBase)+"/src/DesyTauAnalyses/NTupleMaker/data/PileUpDistrib/MC_Spring15_PU25_Startup.root", "read"); TH1D * PU_data = (TH1D *)filePUdistribution_data->Get("pileup"); TH1D * PU_mc = (TH1D *)filePUdistribution_MC->Get("pileup"); PUofficial->set_h_data(PU_data); PUofficial->set_h_MC(PU_mc); TFile *f10= new TFile(TString(cmsswBase)+"/src/DesyTauAnalyses/NTupleMaker/data/"+muonSfDataBarrel); // mu SF barrel data TFile *f11 = new TFile(TString(cmsswBase)+"/src/DesyTauAnalyses/NTupleMaker/data/"+muonSfDataEndcap); // mu SF endcap data TFile *f12= new TFile(TString(cmsswBase)+"/src/DesyTauAnalyses/NTupleMaker/data/"+muonSfMcBarrel); // mu SF barrel MC TFile *f13 = new TFile(TString(cmsswBase)+"/src/DesyTauAnalyses/NTupleMaker/data/"+muonSfMcEndcap); // mu SF endcap MC TGraphAsymmErrors *hEffBarrelData = (TGraphAsymmErrors*)f10->Get("ZMassBarrel"); TGraphAsymmErrors *hEffEndcapData = (TGraphAsymmErrors*)f11->Get("ZMassEndcap"); TGraphAsymmErrors *hEffBarrelMC = (TGraphAsymmErrors*)f12->Get("ZMassBarrel"); TGraphAsymmErrors *hEffEndcapMC = (TGraphAsymmErrors*)f13->Get("ZMassEndcap"); double * dataEffBarrel = new double[10]; double * dataEffEndcap = new double[10]; double * mcEffBarrel = new double[10]; double * mcEffEndcap = new double[10]; dataEffBarrel = hEffBarrelData->GetY(); dataEffEndcap = hEffEndcapData->GetY(); mcEffBarrel = hEffBarrelMC->GetY(); mcEffEndcap = hEffEndcapMC->GetY(); // Lepton Scale Factors TH1D * MuSF_IdIso_Mu1H = new TH1D("MuIdIsoSF_Mu1H", "MuIdIsoSF_Mu1", 100, 0.5,1.5); ScaleFactor * SF_muonIdIso; if (applyLeptonSF) { SF_muonIdIso = new ScaleFactor(); SF_muonIdIso->init_ScaleFactor(TString(cmsswBase)+"/src/"+TString(MuonIdIsoFile)); } ScaleFactor * SF_muonTrigger = new ScaleFactor(); SF_muonTrigger->init_ScaleFactor(TString(cmsswBase)+"/src/"+TString(SingleMuonTriggerFile)); //////// cout<<" Will try to initialize the TFR now.... "<<endl; ScaleFactor * SF_TFR; bool applyTFR = true; if (applyTFR) { SF_TFR = new ScaleFactor(); SF_TFR->init_ScaleFactorb(TString(cmsswBase)+"/src/"+TString(TauFakeRateFile),applyTFR); } double Weight=0; int nTotalFiles = 0; int iCut=0; double CFCounter[CutNumb]; double statUnc[CutNumb]; int iCFCounter[CutNumb]; for (int i=0;i < CutNumb; i++){ CFCounter[i] = 0; iCFCounter[i] = 0; statUnc[i] =0; } // file name and tree name std::string rootFileName(argv[2]); //std::ifstream fileList(argv[2]); std::ifstream fileList(ff); //std::ifstream fileList0(argv[2]); std::ifstream fileList0(ff); std::string ntupleName("makeroottree/AC1B"); std::string initNtupleName("initroottree/AC1B"); TString era=argv[3]; TString invMuStr,invTauStr,invMETStr; if(InvertLeptonIso) invMuStr = "_InvMuIso_"; if(InvertTauIso) invTauStr = "_InvTauIso_"; if(InvertMET) invMETStr = "_InvMET_"; TString TStrName(rootFileName+invMuStr+invTauStr+invMETStr+"_"+Region+"_"+Sign); std::cout <<" The filename will be "<<TStrName <<std::endl; // output fileName with histograms TFile * file; if (isData) file = new TFile(era+"/"+TStrName+TString("_DataDriven.root"),"update"); if (!isData) file = new TFile(era+"/"+TStrName+TString(".root"),"update"); file->mkdir(Channel.c_str()); file->cd(Channel.c_str()); int nFiles = 0; int nEvents = 0; int selEvents = 0; int selEventsAllMuons = 0; int selEventsIdMuons = 0; int selEventsIsoMuons = 0; bool lumi=false; bool isLowIsoMu=false; bool isHighIsoMu = false; bool isLowIsoTau=false; bool isHighIsoTau = false; std::string dummy; // count number of files ---> while (fileList0 >> dummy) nTotalFiles++; SetupHists(CutNumb); if (argv[4] != NULL && atoi(argv[4])< nTotalFiles) nTotalFiles=atoi(argv[4]); //if (nTotalFiles>50) nTotalFiles=50; //nTotalFiles = 10; for (int iF=0; iF<nTotalFiles; ++iF) { std::string filen; fileList >> filen; std::cout << "file " << iF+1 << " out of " << nTotalFiles << " filename : " << filen << std::endl; TFile * file_ = TFile::Open(TString(filen)); TH1D * histoInputEvents = NULL; histoInputEvents = (TH1D*)file_->Get("makeroottree/nEvents"); if (histoInputEvents==NULL) continue; int NE = int(histoInputEvents->GetEntries()); for (int iE=0;iE<NE;++iE) inputEventsH->Fill(0.); std::cout << " number of input events = " << NE << std::endl; TTree * _inittree = NULL; _inittree = (TTree*)file_->Get(TString(initNtupleName)); if (_inittree==NULL) continue; Float_t genweight; if (!isData) _inittree->SetBranchAddress("genweight",&genweight); Long64_t numberOfEntriesInitTree = _inittree->GetEntries(); std::cout << " number of entries in Init Tree = " << numberOfEntriesInitTree << std::endl; for (Long64_t iEntry=0; iEntry<numberOfEntriesInitTree; iEntry++) { _inittree->GetEntry(iEntry); if (isData) histWeightsH->Fill(0.,1.); else histWeightsH->Fill(0.,genweight); } TTree * _tree = NULL; _tree = (TTree*)file_->Get(TString(ntupleName)); if (_tree==NULL) continue; Long64_t numberOfEntries = _tree->GetEntries(); std::cout << " number of entries in Tree = " << numberOfEntries << std::endl; AC1B analysisTree(_tree); // if (std::string::npos != rootFileName.find("TTJetsLO") || std::string::npos != rootFileName.find("TTPow")) //numberOfEntries = 1000; // numberOfEntries = 1000; for (Long64_t iEntry=0; iEntry<numberOfEntries; ++iEntry) { Float_t weight = 1; Float_t puweight = 1; //float topptweight = 1; analysisTree.GetEntry(iEntry); nEvents++; iCut = 0; //std::cout << " number of entries in Tree = " << numberOfEntries <<" starting weight "<<weight<< std::endl; if (nEvents%50000==0) cout << " processed " << nEvents << " events" << endl; if (fabs(analysisTree.primvertex_z)>zVertexCut) continue; if (analysisTree.primvertex_ndof<ndofVertexCut) continue; double dVertex = (analysisTree.primvertex_x*analysisTree.primvertex_x+ analysisTree.primvertex_y*analysisTree.primvertex_y); if (dVertex>dVertexCut) continue; if (analysisTree.primvertex_count<2) continue; //isData= false; bool lumi=false; isLowIsoMu=false; isHighIsoMu = false; isLowIsoTau=false; isHighIsoTau = false; Float_t genweights; float topPt = 0; float antitopPt = 0; bool isZTT = false; if(!isData) { /* TTree *genweightsTree = (TTree*)file_->Get("initroottree/AC1B"); genweightsTree->SetBranchAddress("genweight",&genweights); Long64_t numberOfEntriesInit = genweightsTree->GetEntries(); for (Long64_t iEntryInit=0; iEntryInit<numberOfEntriesInit; ++iEntryInit) { genweightsTree->GetEntry(iEntryInit); histWeightsH->Fill(0.,genweights); } */ /* for (unsigned int igent=0; igent < analysisTree.gentau_count; ++igent) { if (analysisTree.gentau_isPrompt[igent]) isZTT = true; } */ for (unsigned int igen=0; igen<analysisTree.genparticles_count; ++igen) { // cout<< " info = " << int(analysisTree.genparticles_count) <<" "<<int(analysisTree.genparticles_pdgid[igen])<<endl; if (analysisTree.genparticles_pdgid[igen]==6) topPt = TMath::Sqrt(analysisTree.genparticles_px[igen]*analysisTree.genparticles_px[igen]+ analysisTree.genparticles_py[igen]*analysisTree.genparticles_py[igen]); if (analysisTree.genparticles_pdgid[igen]==-6) antitopPt = TMath::Sqrt(analysisTree.genparticles_px[igen]*analysisTree.genparticles_px[igen]+ analysisTree.genparticles_py[igen]*analysisTree.genparticles_py[igen]); } weight *= analysisTree.genweight; lumi=true; //cout<<" weight from init "<<genweights<< " "<<analysisTree.genweight<<" "<<weight<<endl; /* if (applyPUreweighting) { int binNvert = vertexDataH->FindBin(analysisTree.primvertex_count); float_t dataNvert = vertexDataH->GetBinContent(binNvert); float_t mcNvert = vertexMcH->GetBinContent(binNvert); if (mcNvert < 1e-10){mcNvert=1e-10;} float_t vertWeight = dataNvert/mcNvert; weight *= vertWeight; // cout << "NVert = " << analysisTree.primvertex_count << " weight = " << vertWeight << endl; } */ } if (isData) { XSec = 1.; histRuns->Fill(analysisTree.event_run); ///////////////according to dimuons int n=analysisTree.event_run; int lum = analysisTree.event_luminosityblock; std::string num = std::to_string(n); std::string lnum = std::to_string(lum); for(const auto& a : periods) { if ( num.c_str() == a.name ) { //std::cout<< " Eureka "<<num<<" "<<a.name<<" "; // std::cout <<"min "<< last->lower << "- max last " << last->bigger << std::endl; for(auto b = a.ranges.begin(); b != std::prev(a.ranges.end()); ++b) { // cout<<b->lower<<" "<<b->bigger<<endl; if (lum >= b->lower && lum <= b->bigger ) lumi = true; } auto last = std::prev(a.ranges.end()); // std::cout <<"min "<< last->lower << "- max last " << last->bigger << std::endl; if ( (lum >=last->lower && lum <= last->bigger )) lumi=true; } } if (!lumi) continue; //if (lumi ) cout<<" ============= Found good run"<<" "<<n<<" "<<lum<<endl; } if (analysisTree.event_run<RunMin) RunMin = analysisTree.event_run; if (analysisTree.event_run>RunMax) RunMax = analysisTree.event_run; //std::cout << " Run : " << analysisTree.event_run << std::endl; bool isNewRun = true; if (allRuns.size()>0) { for (unsigned int iR=0; iR<allRuns.size(); ++iR) { if (analysisTree.event_run==allRuns.at(iR)) { isNewRun = false; break; } } } if (isNewRun) allRuns.push_back(analysisTree.event_run); if (!lumi) continue; JetsMV.clear(); ElMV.clear(); TauMV.clear(); MuMV.clear(); LeptMV.clear(); mu_index=-1; tau_index=-1; el_index=-1; double MET = sqrt ( analysisTree.pfmet_ex*analysisTree.pfmet_ex + analysisTree.pfmet_ey*analysisTree.pfmet_ey); METV.SetPx(analysisTree.pfmet_ex); METV.SetPy(analysisTree.pfmet_ey); METV.SetPz(analysisTree.pfmet_ez); METV.SetPhi(analysisTree.pfmet_phi); if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; for (unsigned int ijj = 0; ijj<analysisTree.pfjet_count; ++ijj) { JetsV.SetPxPyPzE(analysisTree.pfjet_px[ijj], analysisTree.pfjet_py[ijj], analysisTree.pfjet_pz[ijj], analysisTree.pfjet_e[ijj]); JetsMV.push_back(JetsV); } for (unsigned int imm = 0; imm<analysisTree.muon_count; ++imm) { MuV.SetPtEtaPhiM(analysisTree.muon_pt[imm], analysisTree.muon_eta[imm], analysisTree.muon_phi[imm], muonMass); MuMV.push_back(MuV); // mu_index=0; } for (unsigned int ie = 0; ie<analysisTree.electron_count; ++ie) { ElV.SetPtEtaPhiM(analysisTree.electron_pt[ie], analysisTree.electron_eta[ie], analysisTree.electron_phi[ie], electronMass); ElMV.push_back(ElV); // el_index=0; } for (unsigned int itt = 0; itt<analysisTree.tau_count; ++itt) { TauV.SetPtEtaPhiM(analysisTree.tau_pt[itt], analysisTree.tau_eta[itt], analysisTree.tau_phi[itt], tauMass); TauMV.push_back(TauV); // tau_index=0; } if (!isData ) { if (applyPUreweighting) { puweight = float(PUofficial->get_PUweight(double(analysisTree.numtruepileupinteractions))); weight *=puweight; } } // vector <string> ss; ss.push_back(.c_str()); if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; //selecTable.Fill(1,0, weight ); bool trigAccept = false; unsigned int nMainTrigger = 0; bool isMainTrigger = false; unsigned int nfilters = analysisTree.run_hltfilters->size(); // std::cout << "nfiltres = " << nfilters << std::endl; for (unsigned int i=0; i<nfilters; ++i) { // std::cout << "HLT Filter : " << i << " = " << analysisTree.run_hltfilters->at(i) << std::endl; TString HLTFilter(analysisTree.run_hltfilters->at(i)); if (HLTFilter==MainTrigger) { nMainTrigger = i; isMainTrigger = true; } } if (!isMainTrigger) { std::cout << "HLT filter for Mu20 " << MainTrigger << " not found" << std::endl; return(-1); } /////now clear the Mu.El.Jets again to fill them again after cleaning MuMV.clear(); ElMV.clear(); TauMV.clear(); LeptMV.clear(); double isoMuMin = 9999; bool mu_iso=false; vector<int> muons; muons.clear(); for (unsigned int im = 0; im<analysisTree.muon_count; ++im) { if (analysisTree.muon_pt[im]<ptMuonLowCut) continue; if (fabs(analysisTree.muon_eta[im])>etaMuonCut) continue; if (fabs(analysisTree.muon_dxy[im])>dxyMuonCut) continue; if (fabs(analysisTree.muon_dz[im])>dzMuonCut) continue; double absIso= analysisTree.muon_r03_sumChargedHadronPt[im] + max(analysisTree.muon_r03_sumNeutralHadronEt[im] + analysisTree.muon_r03_sumPhotonEt[im] - 0.5 * analysisTree.muon_r03_sumPUPt[im],0.0); double relIso = absIso/analysisTree.muon_pt[im]; if (relIso<isoMuonLowCut) continue; if (applyMuonId && !analysisTree.muon_isMedium[im]) continue; //cout<<" after muIso index "<<int(mu_index)<<" pT "<<analysisTree.muon_pt[im]<<" relIso "<<relIso<<" isoMuMin "<<isoMuMin<<" muon_count "<<analysisTree.muon_count<<" im "<<im<<" event "<<iEntry<<endl; if (double(relIso)<double(isoMuMin)) { isoMuMin = relIso; mu_index = int(im); mu_iso=true; //cout<<" after muIso index "<<int(mu_index)<<" pT "<<analysisTree.muon_pt[im]<<" relIso "<<relIso<<" isoMuMin "<<isoMuMin<<" muon_count "<<analysisTree.muon_count<<" im "<<im<<" event "<<iEntry<<endl; muons.push_back(im); MuV.SetPtEtaPhiM(analysisTree.muon_pt[mu_index], analysisTree.muon_eta[mu_index], analysisTree.muon_phi[mu_index], muonMass); MuMV.push_back(MuV); LeptMV.push_back(MuV); } //cout<<" Indexes here "<<im<<" "<<mu_index<<endl; if (relIso == isoMuMin && im != mu_index) { //cout<<" found a pair for muons " <<relIso <<" mu_index "<<mu_index<<" pT "<<analysisTree.muon_pt[int(mu_index)]<<" new index "<<im<<" pT "<<analysisTree.muon_pt[int(im)]<<" event "<<iEntry<<endl; analysisTree.muon_pt[im] > analysisTree.muon_pt[mu_index] ? mu_index = int(im) : mu_index = mu_index; } } if (muons.size()==0 || !mu_iso ) continue; double absIso= analysisTree.muon_r03_sumChargedHadronPt[mu_index] + max(analysisTree.muon_r03_sumNeutralHadronEt[mu_index] + analysisTree.muon_r03_sumPhotonEt[mu_index] - 0.5 * analysisTree.muon_r03_sumPUPt[mu_index],0.0); double relIso = absIso/analysisTree.muon_pt[mu_index]; if (relIso>isoMuonHighCut && !InvertLeptonIso) continue; if (relIso>isoMuonHighCutQCD ) { isHighIsoMu=true ;isLowIsoMu=false;} else { isHighIsoMu = false;isLowIsoMu=true;} sort(LeptMV.begin(), LeptMV.end(),ComparePt); if (LeptMV.size() == 0 ) continue; if (InvertLeptonIso && !isHighIsoMu) continue; if (!InvertLeptonIso && isHighIsoMu) continue; if (InvertLeptonIso && isLowIsoMu) continue; //cout<<" Iso check "<<relIso<<" InvertLeptonIso "<<InvertLeptonIso<<" isHighIsoMu "<<isHighIsoMu<<" isLowIsoMu "<<isLowIsoMu<<" cutQCD "<<isoMuonHighCutQCD<<endl; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; double isoTauMin = 999; bool tau_iso = false; vector<int> tau; tau.clear(); for (unsigned int it = 0; it<analysisTree.tau_count; ++it) { if (analysisTree.tau_pt[it] < ptTauLowCut || fabs(analysisTree.tau_eta[it])> etaTauCut) continue; if (analysisTree.tau_decayModeFindingNewDMs[it]<decayModeFindingNewDMs) continue; if ( fabs(analysisTree.tau_leadchargedhadrcand_dz[it])> leadchargedhadrcand_dz) continue; if (analysisTree.tau_againstElectronVLooseMVA5[it]<againstElectronVLooseMVA5) continue; if (analysisTree.tau_againstMuonTight3[it]<againstMuonTight3) continue; //cout<<" "<<analysisTree.tau_byMediumCombinedIsolationDeltaBetaCorr3Hits[it]<<endl; if (!InvertTauIso && analysisTree.tau_byCombinedIsolationDeltaBetaCorrRaw3Hits[it] > byCombinedIsolationDeltaBetaCorrRaw3Hits ) continue; //if (!InvertTauIso && analysisTree.tau_byMediumCombinedIsolationDeltaBetaCorr3Hits[it] < 0.5 ) continue; double tauIso = analysisTree.tau_byCombinedIsolationDeltaBetaCorrRaw3Hits[it]; if (tauIso<isoTauMin ) { // cout<<" there was a chenge "<<tauIso<<" "<<isoTauMin<<" it "<<it<<" tau_index "<<tau_index<<" "<<analysisTree.tau_count<<endl; isoTauMin = tauIso; tau_iso=true; tau_index = (int)it; tau.push_back(tau_index); TauV.SetPtEtaPhiM(analysisTree.tau_pt[tau_index], analysisTree.tau_eta[tau_index], analysisTree.tau_phi[tau_index], tauMass); TauMV.push_back(TauV); } continue; if (tauIso==isoTauMin && it != tau_index) { //analysisTree.tau_pt[it] > analysisTree.tau_pt[tau_index] ? tau_index = it : tau_index = tau_index; if (analysisTree.tau_pt[it] > analysisTree.tau_pt[tau_index] ) tau_index = (int)it ; //cout<<" found a pair " <<tauIso <<" "<<tau_index<<" "<<it<<endl; } } if (tau.size()==0 || !tau_iso ) continue; // cout<< " Lets check "<<mu_index <<" "<<tau_index <<" "<<endl; //cout<<" "<<endl; ////////////////////change to new tau inverted definition double tauIsoI = analysisTree.tau_byMediumCombinedIsolationDeltaBetaCorr3Hits[tau_index]; if (tauIsoI > 0.5 && InvertTauIso) {isHighIsoTau =true;} //else {isHighIsoTau =false ; isLowIsoTau=true;} //if (isHighIsoTau && tauIso > 2*byCombinedIsolationDeltaBetaCorrRaw3Hits ) continue; if (InvertTauIso && !isHighIsoTau) continue; if (!InvertTauIso && isHighIsoTau) continue; //if (InvertTauIso && isLowIsoTau) continue; /* continue; double isoTauMin = 999; bool tau_iso = false; vector<int> tau; tau.clear(); for (unsigned int it = 0; it<analysisTree.tau_count; ++it) { if (analysisTree.tau_pt[it] < ptTauLowCut || fabs(analysisTree.tau_eta[it])> etaTauCut) continue; if (analysisTree.tau_decayModeFindingNewDMs[it]<decayModeFindingNewDMs) continue; if ( fabs(analysisTree.tau_leadchargedhadrcand_dz[it])> leadchargedhadrcand_dz) continue; if (analysisTree.tau_againstElectronVLooseMVA5[it]<againstElectronVLooseMVA5) continue; if (analysisTree.tau_againstMuonTight3[it]<againstMuonTight3) continue; //if (!InvertTauIso && analysisTree.tau_byCombinedIsolationDeltaBetaCorrRaw3Hits[it] > byCombinedIsolationDeltaBetaCorrRaw3Hits ) continue; cout<<" "<<analysisTree.tau_byMediumCombinedIsolationDeltaBetaCorr3Hits[it]<<endl; //aif (!InvertTauIso && analysisTree.tau_byMediumCombinedIsolationDeltaBetaCorr3Hits[it] < 0.5 ) continue; double tauIso = analysisTree.tau_byCombinedIsolationDeltaBetaCorrRaw3Hits[it]; if (tauIso<isoTauMin ) { // cout<<" there was a chenge "<<tauIso<<" "<<isoTauMin<<" it "<<it<<" tau_index "<<tau_index<<" "<<analysisTree.tau_count<<endl; isoTauMin = tauIso; tau_iso=true; tau_index = int(it); tau.push_back(tau_index); TauV.SetPtEtaPhiM(analysisTree.tau_pt[tau_index], analysisTree.tau_eta[tau_index], analysisTree.tau_phi[tau_index], tauMass); TauMV.push_back(TauV); } if (tauIso==isoTauMin && it != tau_index) { analysisTree.tau_pt[it] > analysisTree.tau_pt[tau_index] ? tau_index = int(it) : tau_index = tau_index; //cout<<" found a pair " <<tauIso <<" "<<tau_index<<" "<<it<<endl; } } if (tau.size()==0 || !tau_iso ) continue; double tauIsoI = analysisTree.tau_byMediumCombinedIsolationDeltaBetaCorr3Hits[tau_index]; if (tauIsoI > 0.5 && InvertTauIso) {isHighIsoTau =true;} //else {isHighIsoTau =false ; isLowIsoTau=true;} //if (isHighIsoTau && tauIso > 2*byCombinedIsolationDeltaBetaCorrRaw3Hits ) continue; if (InvertTauIso && !isHighIsoTau) continue; if (!InvertTauIso && isHighIsoTau) continue; //if (InvertTauIso && isLowIsoTau) continue; */ double q = analysisTree.tau_charge[tau_index] * analysisTree.muon_charge[mu_index]; if (q>0 && Sign=="OS" ) continue; if (q<0 && Sign=="SS" ) continue; bool regionB = (q<0 && isLowIsoMu); bool regionA = (q>0 && isLowIsoMu); bool regionC = (q<0 && isHighIsoMu); bool regionD = (q>0 && isHighIsoMu); if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; //cout<<" HOW MANY MUONS DO I HAVE ?? "<<muons.size()<<endl; bool isdRLeptonMatched = false; for (unsigned int iT=0; iT<analysisTree.trigobject_count; ++iT) { if (analysisTree.trigobject_filters[iT][nMainTrigger]) { // Mu17 Leg double dRtrig = deltaR(analysisTree.muon_eta[mu_index],analysisTree.muon_phi[mu_index], analysisTree.trigobject_eta[iT],analysisTree.trigobject_phi[iT]); if (!isData && analysisTree.trigobject_filters[iT][nMainTrigger] && analysisTree.trigobject_pt[iT]>singleMuonTriggerPtCut && dRtrig<deltaRTrigMatch) isdRLeptonMatched = true; if (isData && dRtrig<deltaRTrigMatch) isdRLeptonMatched=true; } } if (!isdRLeptonMatched) continue; double dR = deltaR(analysisTree.tau_eta[tau_index],analysisTree.tau_phi[tau_index], analysisTree.muon_eta[mu_index],analysisTree.muon_phi[mu_index]); if (dR<dRleptonsCutmutau) continue; double ptMu1 = (double)analysisTree.muon_pt[mu_index]; double etaMu1 = (double)analysisTree.muon_eta[mu_index]; float trigweight=1.; float Mu17EffData = (float)SF_muonTrigger->get_EfficiencyData(double(ptMu1),double(etaMu1)); float Mu17EffMC = (float)SF_muonTrigger->get_EfficiencyMC(double(ptMu1),double(etaMu1)); if (!isData) { if (Mu17EffMC>1e-6) trigweight = Mu17EffData / Mu17EffMC; weight *= trigweight; // cout<<" Trigger weight "<<trigweight<<endl; } if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; //Set this flag if there is an opposite-charge muon pair in the event with muons separated by DR>0.15 and both passing the loose selection: bool MuVeto=false; if (doMuVeto){ if (muons.size()>1){ for (unsigned int imv = 0; imv<analysisTree.muon_count; ++imv) { if ( imv != mu_index ){ double absIso= analysisTree.muon_r03_sumChargedHadronPt[imv] + max(analysisTree.muon_r03_sumNeutralHadronEt[imv] + analysisTree.muon_r03_sumPhotonEt[imv] - 0.5 * analysisTree.muon_r03_sumPUPt[imv],0.0); double relIso = absIso/analysisTree.muon_pt[imv]; double dRr = deltaR(analysisTree.muon_eta[mu_index],analysisTree.muon_phi[mu_index], analysisTree.muon_eta[imv],analysisTree.muon_phi[imv]); bool OSCharge = false; if ( imv != mu_index && analysisTree.muon_charge[imv] != analysisTree.muon_charge[mu_index] ) OSCharge=true; //if ( analysisTree.muon_charge[imv] != analysisTree.muon_charge[mu_index] && analysisTree.muon_isGlobal[imv] && analysisTree.muon_isTracker[imv] && analysisTree.muon_isPF[imv] if ( analysisTree.muon_charge[imv] != analysisTree.muon_charge[mu_index] && analysisTree.muon_isGlobal[imv] && analysisTree.muon_isTracker[imv] && analysisTree.muon_isPF[imv] && analysisTree.muon_pt[imv]> 15 && fabs(analysisTree.muon_eta[imv])< 2.4 && fabs(analysisTree.muon_dxy[imv])<0.045 && fabs(analysisTree.muon_dz[imv] < 0.2 && relIso< 0.3 && analysisTree.muon_isMedium[imv]) && dRr > 0.15 && OSCharge) //removed from last recipe MuVeto=true; } } } } if (MuVeto) continue; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; bool ThirdLeptVeto=false; if (doThirdLeptVeto){ if (analysisTree.electron_count>0) { for (unsigned int iev = 0; iev<analysisTree.electron_count; ++iev) { /* double neutralIsoV = analysisTree.electron_r03_sumNeutralHadronEt[iev] + analysisTree.electron_r03_sumNeutralHadronEt[iev] + analysisTree.electron_r03_sumPhotonEt[iev] - 4*TMath::Pi()*(0.3*0.3)*analysisTree.rho; double IsoWithEA = analysisTree.electron_r03_sumChargedHadronPt[iev] + TMath::Max(double(0), neutralIsoV); */ double IsoWithEA = analysisTree.electron_r03_sumChargedHadronPt[iev] + max(analysisTree.electron_r03_sumNeutralHadronEt[iev] + analysisTree.electron_r03_sumPhotonEt[iev] - 0.5 * analysisTree.electron_r03_sumPUPt[iev], 0.0) ; double relIsoV = IsoWithEA/analysisTree.electron_pt[iev]; bool electronMvaId = electronMvaIdWP90(analysisTree.electron_pt[iev], analysisTree.electron_superclusterEta[iev], analysisTree.electron_mva_id_nontrigPhys14[iev]); if ( iev != el_index && analysisTree.electron_pt[iev] > 10 && fabs(analysisTree.electron_eta[iev]) < 2.5 && fabs(analysisTree.electron_dxy[iev])<0.045 && fabs(analysisTree.electron_dz[iev]) < 0.2 && relIsoV< 0.3 && electronMvaId && analysisTree.electron_pass_conversion[iev] && analysisTree.electron_nmissinginnerhits[iev] <=1) ThirdLeptVeto=true; } } if (analysisTree.muon_count>0){ for (unsigned int imvv = 0; imvv<analysisTree.muon_count; ++imvv) { // if ( imvv != mu_index && analysisTree.muon_charge[imvv] != analysisTree.muon_charge[mu_index] ){ double absIso= analysisTree.muon_r03_sumChargedHadronPt[imvv] + max(analysisTree.muon_r03_sumNeutralHadronEt[imvv] + analysisTree.muon_r03_sumPhotonEt[imvv] - 0.5 * analysisTree.muon_r03_sumPUPt[imvv],0.0); double relIso = absIso/analysisTree.muon_pt[imvv]; if ( imvv != mu_index && analysisTree.muon_isMedium[imvv] && analysisTree.muon_pt[imvv]> 10 && fabs(analysisTree.muon_eta[imvv])< 2.4 && fabs(analysisTree.muon_dxy[imvv])<0.045 && fabs(analysisTree.muon_dz[imvv] < 0.2 && relIso< 0.3 && analysisTree.muon_isMedium[imvv]) ) ThirdLeptVeto=true; } } } if (ThirdLeptVeto) continue; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; if (!isData && applyLeptonSF) { //leptonSFweight = SF_yourScaleFactor->get_ScaleFactor(pt, eta) double ptMu1 = (double)analysisTree.muon_pt[mu_index]; double etaMu1 = (double)analysisTree.muon_eta[mu_index]; double IdIsoSF_mu1 = SF_muonIdIso->get_ScaleFactor(ptMu1, etaMu1); MuSF_IdIso_Mu1H->Fill(IdIsoSF_mu1); weight = weight*IdIsoSF_mu1; } if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; TLorentzVector muVc ; muVc.SetPtEtaPhiM(analysisTree.muon_pt[mu_index], analysisTree.muon_eta[mu_index], analysisTree.muon_phi[mu_index], muonMass); TLorentzVector tauVc; tauVc.SetPtEtaPhiM(analysisTree.tau_pt[tau_index], analysisTree.tau_eta[tau_index], analysisTree.tau_phi[tau_index], tauMass); double MTv = mT(muVc,METV); if (!isData && applyTFR) { //leptonSFweight = SF_yourScaleFactor->get_ScaleFactor(pt, eta) double ptTau1 = (double)analysisTree.tau_pt[tau_index]; double etaTau1 = (double)analysisTree.tau_eta[tau_index]; double TFRSF_mu1 = SF_TFR->get_ScaleFactor(ptTau1, etaTau1); MuSF_IdIso_Mu1H->Fill(TFRSF_mu1); weight = weight*TFRSF_mu1; //cout<<" "<<TFRSF_mu1<<" for eta "<<etaTau1<< " pT "<< ptTau1<<endl; } if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; if (!isData && ( string::npos != filen.find("TTJets") || string::npos != filen.find("TTPowHeg")) ) //if (!isData ) { if (topPt>0.&&antitopPt>0.) { float topptweight = topPtWeight(topPt,antitopPt); // cout<<" "<<topPt<<" "<<antitopPt<<endl; weight *= topptweight; } } if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; if (MTv<60 ) continue; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; // for (unsigned int j=0;j<LeptMV.size();++j) cout<<" j "<<j<<" "<<LeptMV.at(j).Pt()<<endl; // cout<<""<<endl; ////////jets cleaning vector<int> jets; jets.clear(); TLorentzVector leptonsV, muonJ, jetsLV; // continue; //JetsV.SetPxPyPzE(analysisTree.pfjet_px[ij], analysisTree.pfjet_py[ij], analysisTree.pfjet_pz[ij], analysisTree.pfjet_e[ij]); //double ETmiss = TMath::Sqrt(analysisTree.pfmet_ex*analysisTree.pfmet_ex + analysisTree.pfmet_ey*analysisTree.pfmet_ey); double ETmiss = METV.Pt();//TMath::Sqrt(analysisTree.pfmet_ex*analysisTree.pfmet_ex + analysisTree.pfmet_ey*analysisTree.pfmet_ey); if (InvertMET && ETmiss > 100. ) continue; if (!InvertMET && ETmiss < 100. ) continue; //that is the nominal selection ie MET > 100 if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; double ptScalarSum = -1; bool btagged= false; JetsMV.clear(); float jetEtaCut = 2.4; float DRmax = 0.5; int countjets = 0; for (unsigned int jet=0; jet<analysisTree.pfjet_count; ++jet) { float absJetEta = fabs(analysisTree.pfjet_eta[jet]); if (absJetEta > etaJetCut) continue; if (fabs(analysisTree.pfjet_pt[jet])<ptJetCut) continue; //double Dr= deltaR(LeptMV.at(il).Eta(), LeptMV.at(il).Phi(), bool isPFJetId = false ; isPFJetId =looseJetiD(analysisTree,jet); if (!isPFJetId) continue; //for (unsigned int lep=0;LeptMV.size();lep++){ //double Dr=(LeptMV.at(lep).Eta(),LeptMV.at(lep).Phi(), double Dr=deltaR(analysisTree.muon_eta[mu_index],analysisTree.muon_phi[mu_index], analysisTree.pfjet_eta[jet],analysisTree.pfjet_phi[jet]); if ( Dr < DRmax) continue; double Drr=deltaR(analysisTree.tau_eta[tau_index],analysisTree.tau_phi[tau_index], analysisTree.pfjet_eta[jet],analysisTree.pfjet_phi[jet]); if ( Drr < DRmax) continue; if (analysisTree.pfjet_btag[jet][0] > bTag) btagged = true; JetsV.SetPxPyPzE(analysisTree.pfjet_px[jet], analysisTree.pfjet_py[jet], analysisTree.pfjet_pz[jet], analysisTree.pfjet_e[jet]); JetsMV.push_back(JetsV); countjets++; } if (countjets >2 ) continue; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; if (btagged ) continue; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; // pt Scalar //cout<<" "<<mu_index<<" "<<tau_index<<" "<<MuMV.at(mu_index).M()<<" "<<TauMV.at(tau_index).M()<<endl; TLorentzVector diL = muVc + tauVc; if ( diL.M() < 100 ) continue; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; /* if (ETmiss < 100) continue; if (ETmiss < 120) continue; FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; // topological cut //if (DZeta<dZetaCut) continue; */ //double dRr = deltaR(diL.Eta(), diL.Phi(), METV.Eta(), METV.Phi()); double dRr = deltaR(muVc.Eta(), muVc.Phi(), tauVc.Eta(), tauVc.Phi()); if (dRr>3 ) continue; if(fillplots) FillMainHists(iCut, weight, ElMV, MuMV, TauMV,JetsMV,METV, ChiMass,mIntermediate,analysisTree, Channel, mu_index,el_index,tau_index); CFCounter[iCut]+= weight; iCFCounter[iCut]++; iCut++; FillTree(); selEvents++; } // end of file processing (loop over events in one file) nFiles++; delete _tree; file_->Close(); delete file_; } cout<<"done"<<endl; cout<<" Total events "<<nEvents<<" Will use weight "<<histWeightsH->GetSumOfWeights()<<" Norm Factor for a Lumi of "<<Lumi<<"/pb is "<<XSec*Lumi/( histWeightsH->GetSumOfWeights())<<endl; cout<<" First content "<<CFCounter[0]<<endl; cout<<" Run range from -----> "<<RunMin<<" to "<<RunMax<<endl; /* for (int i=0;i<CutNumb;++i){ CFCounter[i] *= double(XSec*Lumi/( histWeights->GetSumOfWeights())); if (iCFCounter[i] <0.2) statUnc[i] =0; else statUnc[i] = CFCounter[i]/sqrt(iCFCounter[i]); } */ //write out cutflow ofstream tfile; // TString outname = argv[argc-1]; TString outname=argv[2]; TString textfilename = "cutflow_"+outname+"_"+Channel+"_"+argv[3]+".txt"; // tfile.open(textfilename); // tfile << "########################################" << endl; for(int ci = 0; ci < CutNumb; ci++) { // tfile << CutList[ci]<<"\t & \t" // << CFCounter[ci] <<"\t & \t"<< statUnc[ci] <<"\t & \t"<< iCFCounter[ci] << endl; CutFlowUnW->SetBinContent(1+ci,0); CutFlow->SetBinContent(1+ci,0); CutFlowUnW->SetBinContent(1+ci,float(CFCounter[ci]) ); CFCounter[ci] *= double(XSec*Lumi/( histWeightsH->GetSumOfWeights())); CutFlow->SetBinContent(1+ci,float(CFCounter[ci])); cout << " i "<<ci<<" "<<iCFCounter[ci]<<" "<<XSec*Lumi/( histWeightsH->GetSumOfWeights())<<" "<<CutFlowUnW->GetBinContent(1+ci)<<" "<<CutFlow->GetBinContent(1+ci)<<endl; if (iCFCounter[ci] <0.2) statUnc[ci] =0; //else statUnc[i] = CFCounter[i]/sqrt(iCFCounter[i]); else statUnc[ci] = sqrt(CFCounter[ci]); } //ofstream tfile1; //TString textfile_Con = "CMG_cutflow_Con_Mu_"+outname+".txt"; //tfile1.open(textfile_Con); //tfile1 << "########################################" << endl; //tfile << "Cut efficiency numbers:" << endl; // tfile << " Cut "<<"\t & \t"<<"#Evnts for "<<Lumi/1000<<" fb-1 & \t"<<" Uncertainty \t"<<" cnt\t"<<endl; // tfile.close(); std::cout << std::endl; int allEvents = int(inputEventsH->GetEntries()); std::cout << "Total number of input events = " << allEvents << std::endl; std::cout << "Total number of events in Tree = " << nEvents << std::endl; std::cout << "Total number of selected events = " << selEvents << std::endl; std::cout << std::endl; file->cd(Channel.c_str()); WriteTree(); hxsec->Fill(XSec); hxsec->Write(); inputEventsH->Write(); histWeightsH->Write(); histRuns->Write(); CutFlowUnW->Write(); CutFlow->Write(); MuSF_IdIso_Mu1H->Write(); file->Write(); file->Close(); delete file; }
void ShaderPieceAssetCompiler::compile(const Input& input, const Configuration& configuration, Output& output) { // Input, configuration and output const std::string& assetInputDirectory = input.assetInputDirectory; const std::string& assetOutputDirectory = input.assetOutputDirectory; RendererRuntime::AssetPackage& outputAssetPackage = *output.outputAssetPackage; // Get the JSON asset object const rapidjson::Value& rapidJsonValueAsset = configuration.rapidJsonDocumentAsset["Asset"]; // Read configuration // TODO(co) Add required properties std::string inputFile; { // Read shader piece asset compiler configuration const rapidjson::Value& rapidJsonValueShaderPieceAssetCompiler = rapidJsonValueAsset["ShaderPieceAssetCompiler"]; inputFile = rapidJsonValueShaderPieceAssetCompiler["InputFile"].GetString(); } // Open the input file std::ifstream inputFileStream(assetInputDirectory + inputFile, std::ios::binary); const std::string assetName = rapidJsonValueAsset["AssetMetadata"]["AssetName"].GetString(); const std::string outputAssetFilename = assetOutputDirectory + assetName + ".shader_piece"; std::ofstream outputFileStream(outputAssetFilename, std::ios::binary); { // Shader piece // Get file size and file data std::string sourceCode; { std::string originalSourceCode; inputFileStream.seekg(0, std::ifstream::end); const std::streampos numberOfBytes = inputFileStream.tellg(); inputFileStream.seekg(0, std::ifstream::beg); originalSourceCode.resize(static_cast<size_t>(numberOfBytes)); inputFileStream.read(const_cast<char*>(originalSourceCode.c_str()), numberOfBytes); // Strip comments from source code StringHelper::stripCommentsFromSourceCode(originalSourceCode, sourceCode); } const std::streampos numberOfBytes = sourceCode.length(); { // Shader piece header RendererRuntime::v1ShaderPiece::Header shaderPieceHeader; shaderPieceHeader.formatType = RendererRuntime::v1ShaderPiece::FORMAT_TYPE; shaderPieceHeader.formatVersion = RendererRuntime::v1ShaderPiece::FORMAT_VERSION; shaderPieceHeader.numberOfShaderSourceCodeBytes = static_cast<uint32_t>(numberOfBytes); // Write down the shader piece header outputFileStream.write(reinterpret_cast<const char*>(&shaderPieceHeader), sizeof(RendererRuntime::v1ShaderPiece::Header)); } // Dump the unchanged content into the output file stream outputFileStream.write(sourceCode.c_str(), numberOfBytes); } { // Update the output asset package const std::string assetCategory = rapidJsonValueAsset["AssetMetadata"]["AssetCategory"].GetString(); const std::string assetIdAsString = input.projectName + "/ShaderPiece/" + assetCategory + '/' + assetName; // Output asset RendererRuntime::Asset outputAsset; outputAsset.assetId = RendererRuntime::StringId(assetIdAsString.c_str()); strcpy(outputAsset.assetFilename, outputAssetFilename.c_str()); // TODO(co) Buffer overflow test outputAssetPackage.getWritableSortedAssetVector().push_back(outputAsset); } }
void SceneAssetCompiler::compile(const Input& input, const Configuration& configuration, Output& output) { // Input, configuration and output const std::string& assetInputDirectory = input.assetInputDirectory; const std::string& assetOutputDirectory = input.assetOutputDirectory; RendererRuntime::AssetPackage& outputAssetPackage = *output.outputAssetPackage; // Get the JSON asset object const rapidjson::Value& rapidJsonValueAsset = configuration.rapidJsonDocumentAsset["Asset"]; // Read configuration // TODO(co) Add required properties std::string inputFile; { // Read scene asset compiler configuration const rapidjson::Value& rapidJsonValueSceneAssetCompiler = rapidJsonValueAsset["SceneAssetCompiler"]; inputFile = rapidJsonValueSceneAssetCompiler["InputFile"].GetString(); } // Open the input file const std::string inputFilename = assetInputDirectory + inputFile; std::ifstream inputFileStream(inputFilename, std::ios::binary); const std::string assetName = rapidJsonValueAsset["AssetMetadata"]["AssetName"].GetString(); const std::string outputAssetFilename = assetOutputDirectory + assetName + ".scene"; std::ofstream outputFileStream(outputAssetFilename, std::ios::binary); { // Scene // Parse JSON rapidjson::Document rapidJsonDocument; JsonHelper::parseDocumentByInputFileStream(rapidJsonDocument, inputFileStream, inputFilename, "SceneAsset", "1"); { // Write down the scene resource header RendererRuntime::v1Scene::Header sceneHeader; sceneHeader.formatType = RendererRuntime::v1Scene::FORMAT_TYPE; sceneHeader.formatVersion = RendererRuntime::v1Scene::FORMAT_VERSION; outputFileStream.write(reinterpret_cast<const char*>(&sceneHeader), sizeof(RendererRuntime::v1Scene::Header)); } // Mandatory main sections of the material blueprint const rapidjson::Value& rapidJsonValueSceneAsset = rapidJsonDocument["SceneAsset"]; const rapidjson::Value& rapidJsonValueNodes = rapidJsonValueSceneAsset["Nodes"]; { // Write down the scene nodes RendererRuntime::v1Scene::Nodes nodes; nodes.numberOfNodes = rapidJsonValueNodes.MemberCount(); outputFileStream.write(reinterpret_cast<const char*>(&nodes), sizeof(RendererRuntime::v1Scene::Nodes)); // Loop through all scene nodes for (rapidjson::Value::ConstMemberIterator rapidJsonMemberIteratorNodes = rapidJsonValueNodes.MemberBegin(); rapidJsonMemberIteratorNodes != rapidJsonValueNodes.MemberEnd(); ++rapidJsonMemberIteratorNodes) { const rapidjson::Value& rapidJsonValueNode = rapidJsonMemberIteratorNodes->value; const rapidjson::Value* rapidJsonValueItems = rapidJsonValueNode.HasMember("Items") ? &rapidJsonValueNode["Items"] : nullptr; { // Write down the scene node RendererRuntime::v1Scene::Node node; // Get the scene node transform node.transform.scale = glm::vec3(1.0f, 1.0f, 1.0f); if (rapidJsonValueNode.HasMember("Properties")) { const rapidjson::Value& rapidJsonValueProperties = rapidJsonValueNode["Properties"]; // Position, rotation and scale JsonHelper::optionalFloatNProperty(rapidJsonValueProperties, "Position", &node.transform.position.x, 3); JsonHelper::optionalFloatNProperty(rapidJsonValueProperties, "Rotation", &node.transform.rotation.x, 4); JsonHelper::optionalFloatNProperty(rapidJsonValueProperties, "Scale", &node.transform.scale.x, 3); } // Write down the scene node node.numberOfItems = (nullptr != rapidJsonValueItems) ? rapidJsonValueItems->MemberCount() : 0; outputFileStream.write(reinterpret_cast<const char*>(&node), sizeof(RendererRuntime::v1Scene::Node)); } // Write down the scene items if (nullptr != rapidJsonValueItems) { for (rapidjson::Value::ConstMemberIterator rapidJsonMemberIteratorItems = rapidJsonValueItems->MemberBegin(); rapidJsonMemberIteratorItems != rapidJsonValueItems->MemberEnd(); ++rapidJsonMemberIteratorItems) { const rapidjson::Value& rapidJsonValueItem = rapidJsonMemberIteratorItems->value; const RendererRuntime::SceneItemTypeId typeId = RendererRuntime::StringId(rapidJsonMemberIteratorItems->name.GetString()); // Get the scene item type specific data number of bytes // TODO(co) Make this more generic via scene factory uint32_t numberOfBytes = 0; if (RendererRuntime::CameraSceneItem::TYPE_ID == typeId) { // Nothing here } else if (RendererRuntime::MeshSceneItem::TYPE_ID == typeId) { numberOfBytes = sizeof(RendererRuntime::v1Scene::MeshItem); } { // Write down the scene item header RendererRuntime::v1Scene::ItemHeader itemHeader; itemHeader.typeId = typeId; itemHeader.numberOfBytes = numberOfBytes; outputFileStream.write(reinterpret_cast<const char*>(&itemHeader), sizeof(RendererRuntime::v1Scene::ItemHeader)); } // Write down the scene item type specific data, if there is any if (0 != numberOfBytes) { if (RendererRuntime::CameraSceneItem::TYPE_ID == typeId) { // Nothing here } else if (RendererRuntime::MeshSceneItem::TYPE_ID == typeId) { // Map the source asset ID to the compiled asset ID const RendererRuntime::AssetId compiledAssetId = JsonHelper::getCompiledAssetId(input, rapidJsonValueItem, "MeshAssetId"); // Write the mesh item data RendererRuntime::v1Scene::MeshItem meshItem; meshItem.meshAssetId = compiledAssetId; outputFileStream.write(reinterpret_cast<const char*>(&meshItem), sizeof(RendererRuntime::v1Scene::MeshItem)); } } } } } } } { // Update the output asset package const std::string assetCategory = rapidJsonValueAsset["AssetMetadata"]["AssetCategory"].GetString(); const std::string assetIdAsString = input.projectName + "/Scene/" + assetCategory + '/' + assetName; // Output asset RendererRuntime::Asset outputAsset; outputAsset.assetId = RendererRuntime::StringId(assetIdAsString.c_str()); strcpy(outputAsset.assetFilename, outputAssetFilename.c_str()); // TODO(co) Buffer overflow test outputAssetPackage.getWritableSortedAssetVector().push_back(outputAsset); } }
int main(int argc, char *argv[]) { argList::addOption("file","fileName","specify the input file"); argList::addOption("fileIn","fileName","similar to file option"); argList::addOption("field","fieldName","specify the output file"); argList::addOption("fileOut","fileName","specify the output file"); argList::addOption("folder","constant","specify the folder"); argList::addOption("offset","0","add offset to interpolated value"); Foam::argList args(argc,argv); word matrixFile = "default"; word nameField = "default"; if (args.optionFound("file")) { matrixFile = args.option("file"); } else if (args.optionFound("fileIn")) { WarningIn("setFieldsFromXY.C") << "option fileIn deprecated, use option -file instead" << nl << endl; matrixFile = args.option("fileIn"); } else { FatalErrorIn("setFieldsFromXY.C") << "no input file specified, use option -file" << exit(FatalError); } if (args.optionFound("field")) { nameField = args.option("field"); } else if (args.optionFound("fileOut")) { WarningIn("setFieldsFromXY.C") << "option fileOut deprecated, use option -field instead" << nl << endl; nameField = args.option("fileOut"); } else { FatalErrorIn("setFieldsFromXY.C") << "no field specified, use option -field" << exit(FatalError); } scalar offset = args.optionLookupOrDefault<scalar>("offset",0.); #include "createTime.H" #include "createMesh.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // //- read inputFile permeability matrix IFstream inputFileStream(matrixFile); RectangularMatrix<scalar> inputFile(inputFileStream()); word fileDir = "constant"; if (args.optionFound("folder")) { fileDir = args.option("folder"); } volScalarField outputFile ( IOobject ( nameField, fileDir, mesh, IOobject::MUST_READ, IOobject::AUTO_WRITE ), mesh ); forAll(outputFile,celli) { label id1=-1; label id2=-1; label id3=-1; scalar dist1 = VGREAT; scalar dist2 = VGREAT; scalar dist3 = VGREAT; for(label i=0;i<inputFile.m();i++) { scalar dist = Foam::sqrt(pow(inputFile[i][0]-mesh.C()[celli].x(),2)+pow(inputFile[i][1]-mesh.C()[celli].y(),2)); if ( dist < dist1) { //Info << "1 *** " << dist << " " << dist1 << " " << dist2 << " " << dist3 << endl; id3 = id2; dist3 = dist2; id2 = id1; dist2 = dist1; id1 = i; dist1 = dist; } else if ( dist < dist2) { // Info << "2 *** " << dist << " " << dist1 << " " << dist2 << " " << dist3 << endl; id3 = id2; dist3 = dist2; id2 = i; dist2 = dist; } else if ( dist < dist3) { // Info << "3 *** " << dist << " " << dist1 << " " << dist2 << " " << dist3 << endl; id3 = i; dist3 = dist; } } if ( (id1 == -1) || (id2 == -1) || (id3 == -1)) { Info << nl << "Error : three point are not found for interpolation" << nl << id1 << " / " << id2 << " / " << id3 << endl; } outputFile[celli] = ( dist1*inputFile[id1][2] + dist2*inputFile[id2][2] + dist3*inputFile[id3][2] ) / (dist1+dist2+dist3) + offset; }
// ----------------------------------------------------------------------------- // CHelloS60AppUi::HandleCommandL() // Takes care of command handling. // ----------------------------------------------------------------------------- // void CHelloS60AppUi::HandleCommandL(TInt aCommand) { switch (aCommand) { case EEikCmdExit: case EAknSoftkeyExit: Exit(); break; case ECommand1: { // Load a string from the resource file and display it HBufC* textResource = StringLoader::LoadLC(R_COMMAND1_TEXT); CAknInformationNote* informationNote; informationNote = new (ELeave) CAknInformationNote; // Show the information Note with // textResource loaded with StringLoader. informationNote->ExecuteLD(*textResource); // Pop HBuf from CleanUpStack and Destroy it. CleanupStack::PopAndDestroy(textResource); } break; case ECommand2: { RFile rFile; //Open file where the stream text is User::LeaveIfError(rFile.Open(CCoeEnv::Static()->FsSession(), KFileName, EFileStreamText));//EFileShareReadersOnly));// EFileStreamText)); CleanupClosePushL(rFile); // copy stream from file to RFileStream object RFileReadStream inputFileStream(rFile); CleanupClosePushL(inputFileStream); // HBufC descriptor is created from the RFileStream object. HBufC* fileData = HBufC::NewLC(inputFileStream, 32); CAknInformationNote* informationNote; informationNote = new (ELeave) CAknInformationNote; // Show the information Note informationNote->ExecuteLD(*fileData); // Pop loaded resources from the cleanup stack CleanupStack::PopAndDestroy(3); // filedata, inputFileStream, rFile } break; case EHelp: { CArrayFix<TCoeHelpContext>* buf = CCoeAppUi::AppHelpContextL(); HlpLauncher::LaunchHelpApplicationL(iEikonEnv->WsSession(), buf); } break; case EAbout: { CAknMessageQueryDialog* dlg = new (ELeave) CAknMessageQueryDialog(); dlg->PrepareLC(R_ABOUT_QUERY_DIALOG); HBufC* title = iEikonEnv->AllocReadResourceLC(R_ABOUT_DIALOG_TITLE); dlg->QueryHeading()->SetTextL(*title); CleanupStack::PopAndDestroy(); //title HBufC* msg = iEikonEnv->AllocReadResourceLC(R_ABOUT_DIALOG_TEXT); dlg->SetMessageTextL(*msg); CleanupStack::PopAndDestroy(); //msg dlg->RunLD(); } break; default: Panic( EHelloS60Ui); break; } }
void Properties::readProperties(string propertiesFileName){ string bufor; string parameterName; string parameterValue; size_t found; this->propertiesFileName = propertiesFileName; ifstream inputFileStream (propertiesFileName.c_str()); if (!inputFileStream){ cerr<<"Error: properties file: "<<propertiesFileName<<" could not be opened"<<endl; } /* * Iterate all file text lines. */ if(inputFileStream.is_open()){ while(inputFileStream.good()){ getline(inputFileStream,bufor); found = bufor.find('='); if(found!=string::npos){ parameterName = bufor.substr(0,found); parameterValue = bufor.substr(found+1,bufor.size()); if(parameterName==ALGORITHM_TYPE_PARAMETER_NAME){ algorithmType = parameterValue; } else if(parameterName==ALGORITHM_NAME_PARAMETER_NAME){ algorithmName = parameterValue; algorithmNameId = getAlgorithmNameId(algorithmName); algorithmGroup = getAlgorithmGroupName(algorithmName); algorithmGroupId = getAlgorithmGroupNameId(algorithmGroup); if(algorithmType == "") algorithmType = getAlgorithmType(algorithmName); } else if(parameterName==EPS_PARAMETER_NAME){ eps = strtod(parameterValue.c_str(), NULL); } else if(parameterName==MIN_PTS_PARAMETER_NAME){ minPts = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==K_PARAMETER_NAME){ k = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==USE_COSINE_SIMILARITY_PARAMETER_NAME){ if(parameterValue==TRUE){ useCosineSimilarity = true; } else if(parameterValue==FALSE){ useCosineSimilarity = false; } } else if(parameterName==IS_DATASET_FILE_FORMAT_DENSE_PARAMETER_NAME){ if(parameterValue==DENSE){ isDatasetFileFormatDense = true; } else if(parameterValue==SPARSE){ isDatasetFileFormatDense = false; } } else if(parameterName==DATASET_FILE_PATH_PARAMETER_NAME){ datasetFilePath = parameterValue; } else if(parameterName==DATASET_DIMENSION_PARAMETER_NAME){ datasetDimension = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==DATASET_DIMENSION_VALUE_TRESHOLD_PARAMETER_NAME){ datasetDimensionValueTreshold = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==DATASET_ELEMENTS_NUMBER_PARAMETER_NAME){ datasetElementsNumber = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==IS_DATASET_INTERNAL_REPRESENTATION_DENSE_PARAMETER_NAME){ if(parameterValue==DENSE){ isDatasetInternalFormatDense = true; } else if(parameterValue==SPARSE){ isDatasetInternalFormatDense = false; } } else if(parameterName==REFERENCE_POINT_PARAMETER_NAME){ referencePointsString = parameterValue; } else if(parameterName==IS_REFERENCE_POINT_FORMAT_DENSE_PARAMETER_NAME){ if(parameterValue==DENSE){ isReferencePointFormatDense = true; } else if(parameterValue==SPARSE){ isReferencePointFormatDense = false; } } else if(parameterName==PROJECTION_DIMENSIONS_PARAMETER_NAME){ /*size_t size = parameterValue.size(); size_t begin = 0; size_t end;*/ projectionDimensionsString = parameterValue; /*while(1){ end = parameterValue.find(',', begin); if(end==string::npos){ projectionDimensions.push_back(strtol(parameterValue.substr(begin,size - begin).c_str(), NULL, 10)); break; } else{ projectionDimensions.push_back(strtol(parameterValue.substr(begin,end - begin).c_str(), NULL, 10)); begin = end + 1; } }*/ } else if(parameterName==PROJECTION_SOURCE_SEQUENCE_PARAMETER_NAME){ size_t size = parameterValue.size(); size_t begin = 0; size_t end; projectionSortingCriteriaString = parameterValue; while(1){ end = parameterValue.find(',', begin); if(end==string::npos){ string value = parameterValue.substr(begin+1,size - begin); projectionSourceSequence.push_back(pair<char, unsigned long>(parameterValue.at(begin),strtol(value.c_str(), NULL, 10))); break; } else{ string value = parameterValue.substr(begin+1,end - begin); projectionSourceSequence.push_back(pair<char, unsigned long>(parameterValue.at(begin),strtol(value.c_str(), NULL, 10))); begin = end + 1; } } } else if(parameterName==CLASSIFICATION_SUBSET_FACTOR_PARAMETER_NAME){ classificationSubsetFactor = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==USE_BINARY_PLACEMENT_PARAMETER_NAME){ if(parameterValue == TRUE){ useBinaryPlacement = true; } else if(parameterValue == FALSE){ useBinaryPlacement = false; } } else if(parameterName==P_SAMPLE_INDEX_PARAMETER_NAME){ pSampleIndex = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==S_SAMPLE_INDEX_PARAMETER_NAME){ sSampleIndex = strtol(parameterValue.c_str(), NULL, 10); } else if(parameterName==SEARCH_METHOD_PARAMETER_NAME){ searchMethod = parameterValue; } else if(parameterName==IS_USE_DATASET_USE_INDEX){ if(parameterValue==TRUE){ isUseDatasetIndexAcess = true; } else if(parameterValue==FALSE){ isUseDatasetIndexAcess = false; } } else if(parameterName==IS_USE_BOUNDARIES){ if(parameterValue==TRUE){ isUseBoundaries = true; } else if(parameterValue==FALSE){ isUseBoundaries = false; } } } } inputFileStream.close(); } }
void loadObj(const string& fileName, const D3DXCOLOR& color, float scale, vector<Vertex>& vertices, unsigned int normalCount, unsigned int positionCount, unsigned int texCoordCount, unsigned int vertexCount) { vertices.reserve(vertexCount); ifstream inputFileStream(fileName.c_str()); if (inputFileStream.fail()) { throw exception(); } char* rawSplit = new char[MAX_SPLIT_LENGTH]; vector<string> lines = splitString(rawSplit, inputFileStream, '\n', normalCount + positionCount + texCoordCount + vertexCount / 3 + 50); vector<vector<string> > faces; faces.reserve(vertexCount / 3); vector<D3DXVECTOR3> normals; normals.reserve(normalCount); vector<D3DXVECTOR3> positions; positions.reserve(positionCount); vector<D3DXVECTOR2> texCoords; texCoords.reserve(texCoordCount); for (unsigned int lineIndex = 0; lineIndex < lines.size(); lineIndex++) { if (lines[lineIndex].empty()) { continue; } istringstream inputLineStream(lines[lineIndex]); vector<string> splitLine = splitString(rawSplit, inputLineStream, ' ', 4); if (splitLine[0] == "v") { D3DXVECTOR3 position( (float) atof(splitLine[1].c_str()) * scale, (float) atof(splitLine[2].c_str()) * scale, (float) atof(splitLine[3].c_str()) * scale); positions.push_back(position); } else if (splitLine[0] == "vn") { D3DXVECTOR3 normal( (float) atof(splitLine[1].c_str()), (float) atof(splitLine[2].c_str()), (float) atof(splitLine[3].c_str())); normals.push_back(normal); } else if (splitLine[0] == "vt") { D3DXVECTOR2 texCoord( (float) atof(splitLine[1].c_str()), 1.0f - (float) atof(splitLine[2].c_str())); texCoords.push_back(texCoord); } else if (splitLine[0] == "f") { faces.push_back(splitLine); } } // Read the faces from the file and populate the arrays. for (unsigned int faceIndex = 0; faceIndex < faces.size(); faceIndex++) { vector<string> face = faces[faceIndex]; for (unsigned int vertexIndex = 1; vertexIndex < face.size() - 1; vertexIndex++) { istringstream inputFaceStream(face[vertexIndex]); vector<string> splitVertex = splitString(rawSplit, inputFaceStream, '/', 3); Vertex vertex; vertex.color = color; if (!normals.empty()) { vertex.normal = normals[atoi(splitVertex[2].c_str()) - 1]; } vertex.position = positions[atoi(splitVertex[0].c_str()) - 1]; if (!texCoords.empty()) { vertex.texCoord = texCoords[atoi(splitVertex[1].c_str()) - 1]; } vertices.push_back(vertex); } // Create face normals if none were provided. if (normals.empty()) { unsigned int vertexCount = vertices.size(); D3DXVECTOR3 edge0 = vertices[vertexCount - 1].position - vertices[vertexCount - 2].position; D3DXVECTOR3 edge1 = vertices[vertexCount - 1].position - vertices[vertexCount - 3].position; D3DXVECTOR3 faceNormal; D3DXVec3Cross(&faceNormal, &edge0, &edge1); D3DXVec3Normalize(&faceNormal, &faceNormal); vertices[vertexCount - 1].normal = faceNormal; vertices[vertexCount - 2].normal = faceNormal; vertices[vertexCount - 3].normal = faceNormal; } } delete rawSplit; }
void ProjectImpl::compileAsset(const RendererRuntime::Asset& asset, const char* rendererTarget, RendererRuntime::AssetPackage& outputAssetPackage) { // Open the input stream const std::string absoluteAssetFilename = mProjectDirectory + asset.assetFilename; std::ifstream inputFileStream(absoluteAssetFilename, std::ios::binary); // Parse JSON rapidjson::Document rapidJsonDocument; JsonHelper::parseDocumentByInputFileStream(rapidJsonDocument, inputFileStream, absoluteAssetFilename, "Asset", "1"); // Mandatory main sections of the asset const rapidjson::Value& rapidJsonValueAsset = rapidJsonDocument["Asset"]; const rapidjson::Value& rapidJsonValueAssetMetadata = rapidJsonValueAsset["AssetMetadata"]; // Check asset ID match: A sanity check in here doesn't hurt const RendererRuntime::AssetId assetId = static_cast<uint32_t>(std::atoi(rapidJsonValueAssetMetadata["AssetId"].GetString())); if (assetId != asset.assetId) { const std::string message = "Failed to compile asset with filename \"" + std::string(asset.assetFilename) + "\": According to the asset package it should be asset ID " + std::to_string(asset.assetId) + " but inside the asset file it's asset ID " + std::to_string(assetId); throw std::runtime_error(message); } // Dispatch asset compiler // TODO(co) Add multithreading support: Add compiler queue which is processed in the background, ensure compiler instances are reused // Get the asset input directory and asset output directory const std::string assetInputDirectory = STD_FILESYSTEM_PATH(absoluteAssetFilename).parent_path().generic_string() + '/'; const std::string assetType = rapidJsonValueAssetMetadata["AssetType"].GetString(); const std::string assetCategory = rapidJsonValueAssetMetadata["AssetCategory"].GetString(); const std::string assetOutputDirectory = "../" + getRenderTargetDataRootDirectory(rendererTarget) + mAssetPackageDirectoryName + assetType + '/' + assetCategory + '/'; // Ensure that the asset output directory exists, else creating output file streams will fail #ifdef WIN32 std::tr2::sys::create_directories(assetOutputDirectory); #else std::experimental::filesystem::create_directories(assetOutputDirectory); #endif // Asset compiler input IAssetCompiler::Input input(mProjectName, assetInputDirectory, assetOutputDirectory, mSourceAssetIdToCompiledAssetId, mSourceAssetIdToAbsoluteFilename); // Asset compiler configuration assert(nullptr != mRapidJsonDocument); const IAssetCompiler::Configuration configuration(rapidJsonDocument, (*mRapidJsonDocument)["Targets"], rendererTarget); // Asset compiler output IAssetCompiler::Output output; output.outputAssetPackage = &outputAssetPackage; // Evaluate the asset type and continue with the processing in the asset type specific way // TODO(co) Currently this is fixed build in, later on me might want to have this dynamic so we can plugin additional asset compilers const AssetCompilerTypeId assetCompilerTypeId(assetType.c_str()); if (TextureAssetCompiler::TYPE_ID == assetCompilerTypeId) { TextureAssetCompiler().compile(input, configuration, output); } else if (ShaderPieceAssetCompiler::TYPE_ID == assetCompilerTypeId) { ShaderPieceAssetCompiler().compile(input, configuration, output); } else if (ShaderBlueprintAssetCompiler::TYPE_ID == assetCompilerTypeId) { ShaderBlueprintAssetCompiler().compile(input, configuration, output); } else if (MaterialBlueprintAssetCompiler::TYPE_ID == assetCompilerTypeId) { MaterialBlueprintAssetCompiler().compile(input, configuration, output); } else if (MaterialAssetCompiler::TYPE_ID == assetCompilerTypeId) { MaterialAssetCompiler().compile(input, configuration, output); } else if (SkeletonAssetCompiler::TYPE_ID == assetCompilerTypeId) { SkeletonAssetCompiler().compile(input, configuration, output); } else if (MeshAssetCompiler::TYPE_ID == assetCompilerTypeId) { MeshAssetCompiler().compile(input, configuration, output); } else if (SceneAssetCompiler::TYPE_ID == assetCompilerTypeId) { SceneAssetCompiler().compile(input, configuration, output); } else if (CompositorNodeAssetCompiler::TYPE_ID == assetCompilerTypeId) { CompositorNodeAssetCompiler().compile(input, configuration, output); } else if (CompositorWorkspaceAssetCompiler::TYPE_ID == assetCompilerTypeId) { CompositorWorkspaceAssetCompiler().compile(input, configuration, output); } else { const std::string message = "Failed to compile asset with filename \"" + std::string(asset.assetFilename) + "\" and ID " + std::to_string(asset.assetId) + ": Asset type \"" + assetType + "\" is unknown"; throw std::runtime_error(message); } }