BlockStreamIteratorBase* Filter::getIteratorTree(const unsigned& blocksize){ BlockStreamIteratorBase* child_iterator=child_->getIteratorTree(blocksize); ExpandableBlockStreamFilter::State state; state.block_size_=blocksize; state.child_=child_iterator; state.comparator_list_=comparator_list_; Dataflow dataflow=getDataflow(); state.schema_=getSchema(dataflow.attribute_list_); BlockStreamIteratorBase* filter=new ExpandableBlockStreamFilter(state); return filter; }
BlockStreamIteratorBase* Filter::getIteratorTree(const unsigned& blocksize){ Dataflow dataflow=getDataflow(); BlockStreamIteratorBase* child_iterator=child_->getIteratorTree(blocksize); ExpandableBlockStreamFilter::State state; state.block_size_=blocksize; state.child_=child_iterator; state.v_ei_=exprArray_; state.qual_=qual_; state.colindex_=colindex_; // assert(!comparator_list_.empty()); state.comparator_list_=comparator_list_; // assert(!state.comparator_list_.empty()); state.schema_=getSchema(dataflow.attribute_list_); BlockStreamIteratorBase* filter=new ExpandableBlockStreamFilter(state); return filter; }
BlockStreamIteratorBase* EqualJoin::getIteratorTree(const unsigned& block_size){ if(dataflow_==0){ getDataflow(); } BlockStreamJoinIterator* join_iterator; BlockStreamIteratorBase* child_iterator_left=left_child_->getIteratorTree(block_size); BlockStreamIteratorBase* child_iterator_right=right_child_->getIteratorTree(block_size); Dataflow dataflow_left=left_child_->getDataflow(); Dataflow dataflow_right=right_child_->getDataflow(); BlockStreamJoinIterator::State state; state.block_size_=block_size; state.ht_nbuckets=1024*1024; // state.ht_nbuckets=1024; state.input_schema_left=getSchema(dataflow_left.attribute_list_); state.input_schema_right=getSchema(dataflow_right.attribute_list_); state.ht_schema=getSchema(dataflow_left.attribute_list_); /* the bucket size is 64-byte-aligned */ // state.ht_bucketsize=((state.input_schema_left->getTupleMaxSize()-1)/64+1)*64; /* * In the initial implementation, I set the bucket size to be up round to cache line * size, e.g., 64Bytes. Finally, I realized that different from aggregation, the hash * table bucket in the build phase of hash join is filled very quickly and hence a * a relatively large bucket size could reduce the number of overflowing buckets and * avoid the random memory access caused by acceesing overflowing buckets. */ state.ht_bucketsize=128; state.output_schema=getSchema(dataflow_->attribute_list_); state.joinIndex_left=getLeftJoinKeyIndexList(); state.joinIndex_right=getRightJoinKeyIndexList(); state.payload_left=getLeftPayloadIndexList(); state.payload_right=getRightPayloadIndexList(); switch(join_police_){ case no_repartition:{ state.child_left=child_iterator_left; state.child_right=child_iterator_right; join_iterator=new BlockStreamJoinIterator(state); break; } case left_repartition:{ // state.child_left BlockStreamExpander::State expander_state; expander_state.block_count_in_buffer_=EXPANDER_BUFFER_SIZE; expander_state.block_size_=block_size; expander_state.init_thread_count_=Config::initial_degree_of_parallelism; expander_state.child_=child_iterator_left; expander_state.schema_=getSchema(dataflow_left.attribute_list_); BlockStreamIteratorBase* expander=new BlockStreamExpander(expander_state); NodeTracker* node_tracker=NodeTracker::getInstance(); ExpandableBlockStreamExchangeEpoll::State exchange_state; exchange_state.block_size_=block_size; exchange_state.child_=expander;//child_iterator_left; exchange_state.exchange_id_=IDsGenerator::getInstance()->generateUniqueExchangeID(); std::vector<NodeID> upper_id_list=getInvolvedNodeID(dataflow_->property_.partitioner); exchange_state.upper_ip_list_=convertNodeIDListToNodeIPList(upper_id_list); std::vector<NodeID> lower_id_list=getInvolvedNodeID(dataflow_left.property_.partitioner); exchange_state.lower_ip_list_=convertNodeIDListToNodeIPList(lower_id_list); const Attribute right_partition_key=dataflow_->property_.partitioner.getPartitionKey(); /* get the left attribute that is corresponding to the partition key.*/ Attribute left_partition_key=joinkey_pair_list_[getIndexInRightJoinKeyList(right_partition_key)].first; exchange_state.partition_schema_=partition_schema::set_hash_partition(getIndexInAttributeList(dataflow_left.attribute_list_,left_partition_key)); // exchange_state.schema=getSchema(dataflow_left.attribute_list_,dataflow_right.attribute_list_); exchange_state.schema_=getSchema(dataflow_left.attribute_list_); BlockStreamIteratorBase* exchange=new ExpandableBlockStreamExchangeEpoll(exchange_state); state.child_left=exchange; state.child_right=child_iterator_right; join_iterator=new BlockStreamJoinIterator(state); break; } case right_repartition:{ BlockStreamExpander::State expander_state; expander_state.block_count_in_buffer_=EXPANDER_BUFFER_SIZE; expander_state.block_size_=block_size; expander_state.init_thread_count_=Config::initial_degree_of_parallelism; expander_state.child_=child_iterator_right; expander_state.schema_=getSchema(dataflow_right.attribute_list_); BlockStreamIteratorBase* expander=new BlockStreamExpander(expander_state); NodeTracker* node_tracker=NodeTracker::getInstance(); ExpandableBlockStreamExchangeEpoll::State exchange_state; exchange_state.block_size_=block_size; exchange_state.child_=expander; exchange_state.exchange_id_=IDsGenerator::getInstance()->generateUniqueExchangeID(); std::vector<NodeID> upper_id_list=getInvolvedNodeID(dataflow_->property_.partitioner); exchange_state.upper_ip_list_=convertNodeIDListToNodeIPList(upper_id_list); std::vector<NodeID> lower_id_list=getInvolvedNodeID(dataflow_right.property_.partitioner); exchange_state.lower_ip_list_=convertNodeIDListToNodeIPList(lower_id_list); const Attribute output_partition_key=dataflow_->property_.partitioner.getPartitionKey(); /* get the right attribute that is corresponding to the partition key.*/ Attribute right_repartition_key; if(dataflow_->property_.partitioner.hasShadowPartitionKey()){ right_repartition_key=joinkey_pair_list_[getIndexInLeftJoinKeyList(output_partition_key,dataflow_->property_.partitioner.getShadowAttributeList())].second; } else{ right_repartition_key=joinkey_pair_list_[getIndexInLeftJoinKeyList(output_partition_key)].second; } exchange_state.partition_schema_=partition_schema::set_hash_partition(getIndexInAttributeList(dataflow_right.attribute_list_,right_repartition_key)); exchange_state.schema_=getSchema(dataflow_right.attribute_list_); BlockStreamIteratorBase* exchange=new ExpandableBlockStreamExchangeEpoll(exchange_state); state.child_left=child_iterator_left; state.child_right=exchange; join_iterator=new BlockStreamJoinIterator(state); break; } case complete_repartition:{ /* build left input*/ BlockStreamExpander::State expander_state_l; expander_state_l.block_count_in_buffer_=EXPANDER_BUFFER_SIZE; expander_state_l.block_size_=block_size; expander_state_l.init_thread_count_=Config::initial_degree_of_parallelism; expander_state_l.child_=child_iterator_left; expander_state_l.schema_=getSchema(dataflow_left.attribute_list_); BlockStreamIteratorBase* expander_l=new BlockStreamExpander(expander_state_l); ExpandableBlockStreamExchangeEpoll::State l_exchange_state; l_exchange_state.block_size_=block_size; l_exchange_state.child_=expander_l; l_exchange_state.exchange_id_=IDsGenerator::getInstance()->generateUniqueExchangeID(); std::vector<NodeID> lower_id_list=getInvolvedNodeID(dataflow_left.property_.partitioner); l_exchange_state.lower_ip_list_=convertNodeIDListToNodeIPList(lower_id_list); std::vector<NodeID> upper_id_list=getInvolvedNodeID(dataflow_->property_.partitioner); l_exchange_state.upper_ip_list_=convertNodeIDListToNodeIPList(upper_id_list); const Attribute left_partition_key=dataflow_->property_.partitioner.getPartitionKey(); l_exchange_state.partition_schema_=partition_schema::set_hash_partition(getIndexInAttributeList(dataflow_left.attribute_list_,left_partition_key)); l_exchange_state.schema_=getSchema(dataflow_left.attribute_list_); BlockStreamIteratorBase* l_exchange=new ExpandableBlockStreamExchangeEpoll(l_exchange_state); /*build right input*/ BlockStreamExpander::State expander_state_r; expander_state_r.block_count_in_buffer_=EXPANDER_BUFFER_SIZE; expander_state_r.block_size_=block_size; expander_state_r.init_thread_count_=Config::initial_degree_of_parallelism; expander_state_r.child_=child_iterator_right; expander_state_r.schema_=getSchema(dataflow_right.attribute_list_); BlockStreamIteratorBase* expander_r=new BlockStreamExpander(expander_state_r); ExpandableBlockStreamExchangeEpoll::State r_exchange_state; r_exchange_state.block_size_=block_size; r_exchange_state.child_=expander_r; r_exchange_state.exchange_id_=IDsGenerator::getInstance()->generateUniqueExchangeID(); lower_id_list=getInvolvedNodeID(dataflow_right.property_.partitioner); r_exchange_state.lower_ip_list_=convertNodeIDListToNodeIPList(lower_id_list); upper_id_list=getInvolvedNodeID(dataflow_->property_.partitioner); r_exchange_state.upper_ip_list_=convertNodeIDListToNodeIPList(upper_id_list); const Attribute right_partition_key=joinkey_pair_list_[getIndexInLeftJoinKeyList(left_partition_key)].second; r_exchange_state.partition_schema_=partition_schema::set_hash_partition(getIndexInAttributeList(dataflow_right.attribute_list_,right_partition_key)); r_exchange_state.schema_=getSchema(dataflow_right.attribute_list_); BlockStreamIteratorBase* r_exchange=new ExpandableBlockStreamExchangeEpoll(r_exchange_state); /* finally build the join iterator itself*/ state.child_left=l_exchange; state.child_right=r_exchange; join_iterator=new BlockStreamJoinIterator(state); break; } default:{ break; } } return join_iterator; }
bool LogicalScan::GetOptimalPhysicalPlan(Requirement requirement,PhysicalPlanDescriptor& physical_plan_descriptor, const unsigned & block_size){ Dataflow dataflow=getDataflow(); NetworkTransfer transfer=requirement.requireNetworkTransfer(dataflow); ExpandableBlockStreamProjectionScan::State state; state.block_size_=block_size; state.projection_id_=target_projection_->getProjectionID(); state.schema_=getSchema(dataflow_->attribute_list_); state.sample_rate_=sample_rate_; PhysicalPlan scan=new ExpandableBlockStreamProjectionScan(state); if(transfer==NONE){ physical_plan_descriptor.plan=scan; physical_plan_descriptor.dataflow=dataflow; physical_plan_descriptor.cost+=0; } else{ physical_plan_descriptor.cost+=dataflow.getAggregatedDatasize(); ExpandableBlockStreamExchangeEpoll::State state; state.block_size_=block_size; state.child_=scan;//child_iterator; state.exchange_id_=IDsGenerator::getInstance()->generateUniqueExchangeID(); state.schema_=getSchema(dataflow.attribute_list_); std::vector<NodeID> lower_id_list=getInvolvedNodeID(dataflow.property_.partitioner); state.lower_id_list_=lower_id_list; std::vector<NodeID> upper_id_list; if(requirement.hasRequiredLocations()){ upper_id_list=requirement.getRequiredLocations(); } else{ if(requirement.hasRequiredPartitionFunction()){ /* partition function contains the number of partitions*/ PartitionFunction* partitoin_function=requirement.getPartitionFunction(); upper_id_list=std::vector<NodeID>(NodeTracker::getInstance()->getNodeIDList().begin(),NodeTracker::getInstance()->getNodeIDList().begin()+partitoin_function->getNumberOfPartitions()-1); } else{ //TODO: decide the degree of parallelism upper_id_list=NodeTracker::getInstance()->getNodeIDList(); } } state.upper_id_list_=upper_id_list; state.partition_schema_=partition_schema::set_hash_partition(getIndexInAttributeList(dataflow.attribute_list_,requirement.getPartitionKey())); assert(state.partition_schema_.partition_key_index>=0); BlockStreamIteratorBase* exchange=new ExpandableBlockStreamExchangeEpoll(state); Dataflow new_dataflow; new_dataflow.attribute_list_=dataflow.attribute_list_; new_dataflow.property_.partitioner.setPartitionKey(requirement.getPartitionKey()); new_dataflow.property_.partitioner.setPartitionFunction(PartitionFunctionFactory::createBoostHashFunction(state.upper_id_list_.size())); const unsigned total_size=dataflow.getAggregatedDatasize(); const unsigned degree_of_parallelism=state.upper_id_list_.size(); std::vector<DataflowPartition> dataflow_partition_list; for(unsigned i=0;i<degree_of_parallelism;i++){ const NodeID location=upper_id_list[i]; /* Currently, the join output size cannot be predicted due to the absence of data statistics. * We just use the magic number as following */ const unsigned datasize=total_size/degree_of_parallelism; DataflowPartition dfp(i,datasize,location); dataflow_partition_list.push_back(dfp); } new_dataflow.property_.partitioner.setPartitionList(dataflow_partition_list); physical_plan_descriptor.plan=exchange; physical_plan_descriptor.dataflow=new_dataflow; physical_plan_descriptor.cost+=new_dataflow.getAggregatedDatasize(); } if(requirement.passLimits(physical_plan_descriptor.cost)) return true; else return false; }
bool Filter::GetOptimalPhysicalPlan(Requirement requirement,PhysicalPlanDescriptor& physical_plan_descriptor, const unsigned & block_size){ PhysicalPlanDescriptor physical_plan; std::vector<PhysicalPlanDescriptor> candidate_physical_plans; /* no requirement to the child*/ if(child_->GetOptimalPhysicalPlan(Requirement(),physical_plan)){ NetworkTransfer transfer=requirement.requireNetworkTransfer(physical_plan.dataflow); if(transfer==NONE){ ExpandableBlockStreamFilter::State state; state.block_size_=block_size; state.child_=physical_plan.plan; state.qual_=qual_; state.colindex_=colindex_; state.comparator_list_=comparator_list_; state.v_ei_=exprArray_; Dataflow dataflow=getDataflow(); state.schema_=getSchema(dataflow.attribute_list_); BlockStreamIteratorBase* filter=new ExpandableBlockStreamFilter(state); physical_plan.plan=filter; candidate_physical_plans.push_back(physical_plan); } else if((transfer==OneToOne)||(transfer==Shuffle)){ /* the input data flow should be transfered in the network to meet the requirement * TODO: implement OneToOne Exchange * */ ExpandableBlockStreamFilter::State state_f; state_f.block_size_=block_size; state_f.child_=physical_plan.plan; state_f.v_ei_=exprArray_; state_f.qual_=qual_; state_f.colindex_=colindex_; state_f.comparator_list_=comparator_list_; Dataflow dataflow=getDataflow(); state_f.schema_=getSchema(dataflow.attribute_list_); BlockStreamIteratorBase* filter=new ExpandableBlockStreamFilter(state_f); physical_plan.plan=filter; physical_plan.cost+=physical_plan.dataflow.getAggregatedDatasize(); ExpandableBlockStreamExchangeEpoll::State state; state.block_size_=block_size; state.child_=physical_plan.plan;//child_iterator; state.exchange_id_=IDsGenerator::getInstance()->generateUniqueExchangeID(); state.schema_=getSchema(physical_plan.dataflow.attribute_list_); std::vector<NodeID> upper_id_list; if(requirement.hasRequiredLocations()){ upper_id_list=requirement.getRequiredLocations(); } else{ if(requirement.hasRequiredPartitionFunction()){ /* partition function contains the number of partitions*/ PartitionFunction* partitoin_function=requirement.getPartitionFunction(); upper_id_list=std::vector<NodeID>(NodeTracker::getInstance()->getNodeIDList().begin(),NodeTracker::getInstance()->getNodeIDList().begin()+partitoin_function->getNumberOfPartitions()-1); } else{ //TODO: decide the degree of parallelism upper_id_list=NodeTracker::getInstance()->getNodeIDList(); } } state.upper_ip_list_=convertNodeIDListToNodeIPList(upper_id_list); assert(requirement.hasReuiredPartitionKey()); state.partition_schema_=partition_schema::set_hash_partition(this->getIndexInAttributeList(physical_plan.dataflow.attribute_list_,requirement.getPartitionKey())); assert(state.partition_schema_.partition_key_index>=0); std::vector<NodeID> lower_id_list=getInvolvedNodeID(physical_plan.dataflow.property_.partitioner); state.lower_ip_list_=convertNodeIDListToNodeIPList(lower_id_list); BlockStreamIteratorBase* exchange=new ExpandableBlockStreamExchangeEpoll(state); physical_plan.plan=exchange; } candidate_physical_plans.push_back(physical_plan); } if(child_->GetOptimalPhysicalPlan(requirement,physical_plan)){ ExpandableBlockStreamFilter::State state; state.block_size_=block_size; state.child_=physical_plan.plan; state.v_ei_=exprArray_; state.qual_=qual_; state.colindex_=colindex_; state.comparator_list_=comparator_list_; Dataflow dataflow=getDataflow(); state.schema_=getSchema(dataflow.attribute_list_); BlockStreamIteratorBase* filter=new ExpandableBlockStreamFilter(state); physical_plan.plan=filter; candidate_physical_plans.push_back(physical_plan); } physical_plan_descriptor=getBestPhysicalPlanDescriptor(candidate_physical_plans); if(requirement.passLimits(physical_plan_descriptor.cost)) return true; else return false; }
int main( int argc, char* argv[] ) { // Try to process the command line if ( !ConfigKeeper::getSingleton().processCommandLineArguments( argc, argv ) ) { std::cout << ConfigKeeper::getSingleton().getDescription(); return 0; } // See if help was requested if ( ConfigKeeper::getSingleton().getCount( "help" ) ) { std::cout << ConfigKeeper::getSingleton().getDescription(); return 0; } // Make sure an input was specified if ( !ConfigKeeper::getSingleton().getCount( "slsf-file" ) ) { std::cerr << "Input file must be specified." << std::endl; std::cerr << ConfigKeeper::getSingleton().getDescription(); return 1; } // Set the generation language bool generateC = true; if ( ConfigKeeper::getSingleton().getCount( "java" ) ) generateC = false; if ( ConfigKeeper::getSingleton().getCount( "c" ) ) generateC = true; bool annotations = ConfigKeeper::getSingleton().getCount( "annotations" ); // Set the output directory std::string outputDirectory; if ( ConfigKeeper::getSingleton().getCount( "output-directory" ) ) { outputDirectory = ConfigKeeper::getSingleton().getStringValue( "output-directory" ); } DirectoryVector directoryVector; if ( ConfigKeeper::getSingleton().getCount( "libdir" ) ) { directoryVector = ConfigKeeper::getSingleton().getStringVector( "libdir" ); } addMFileDirectories( directoryVector ); // Determine the input file name std::string inputFilename = ConfigKeeper::getSingleton().getStringValue( "slsf-file" ); std::string projectName = inputFilename.substr( 0, inputFilename.rfind( "." ) ); { std::string::size_type lastSlashPos = projectName.rfind( "/" ); std::string::size_type lastBackslashPos = projectName.rfind( "\\" ); if ( lastSlashPos == std::string::npos ) lastSlashPos = lastBackslashPos; if ( lastBackslashPos == std::string::npos ) lastBackslashPos = lastSlashPos; std::string::size_type delimPos = std::min( lastSlashPos, lastBackslashPos ); if ( delimPos != std::string::npos ) projectName = projectName.substr( delimPos + 1 ); } std::string arg1 = SLSF_PARADIGM_NAME " = \"" + inputFilename + "\" "; // std::string arg2 = "SFC = \"" + std::string(argv[2]) + string(".xml") + "\" !\"SFC.xsd\" "; InputFileRegistry ifr; ifr.registerFile( arg1 ); try { // Open SLSF r Udm::SmartDataNetwork sdnSLSF_fb0( SLSF::diagram ); sdnSLSF_fb0.OpenExisting( ifr.getFileName( SLSF_PARADIGM_NAME ), UseXSD()( ifr.getFileName( SLSF_PARADIGM_NAME ) ) ? ifr.getXsdName( SLSF_PARADIGM_NAME ) : SLSF_PARADIGM_NAME, Udm::CHANGES_LOST_DEFAULT ); Udm::StaticDataNetworkSpecifier sdns_SLSF_fb1( ifr.getFileName( SLSF_PARADIGM_NAME ), &sdnSLSF_fb0 ); ModelsFolderMap modelsFolderMap = getModelsFolderMap( sdnSLSF_fb0 ); for( ModelsFolderMap::iterator mfmItr = modelsFolderMap.begin() ; mfmItr != modelsFolderMap.end() ; ++mfmItr ) { int modelsFolderID = mfmItr->first; std::string modelsFolderName = mfmItr->second.name(); if ( outputDirectory.empty() ) outputDirectory = modelsFolderName; std::vector< Udm::StaticDataNetworkSpecifier > dnsvec; dnsvec.push_back( sdns_SLSF_fb1 ); if ( !boost::filesystem::create_directories( outputDirectory ) ) { if ( !boost::filesystem::is_directory( outputDirectory ) ) { std::cerr << "ERROR: Could not create \"" + outputDirectory + "\" directory." << std::endl; continue; } } std::string sfcFilename = outputDirectory + "/" + modelsFolderName + "_SFC.xml"; std::string arg2= "SFC = \"" + sfcFilename + "\" !\"SFC.xsd\" "; ifr.registerFile( arg2 ); // Open SFC w UdmDom::DomDataNetwork &sdnSFC_fb3 = SFCUdmEngine::get_singleton().getDomDataNetwork(); sdnSFC_fb3.CreateNew( ifr.getFileName( "SFC" ), UseXSD()( ifr.getFileName( "SFC" ) ) ? ifr.getXsdName( "SFC" ) : "SFC", SFC::Project::meta, Udm::CHANGES_LOST_DEFAULT ); Udm::StaticDataNetworkSpecifier sdns_SFC_fb4( ifr.getFileName( "SFC" ), &sdnSFC_fb3 ); dnsvec.push_back( sdns_SFC_fb4 ); // Create the project Udm::StaticUdmProject prj( dnsvec, ESM2SLC::diagram ); Udm::DataNetwork& SLSF_ref_fb2= prj.GetDataNetwork( ifr.getFileName( SLSF_PARADIGM_NAME ) ); Udm::DataNetwork& sFC_ref_fb5= prj.GetDataNetwork( ifr.getFileName( "SFC" ) ); Packets_t projects_1; Packets_t rootDataflows_3; // get objects from DNs ModelsFolderMap modelsFolderMap2 = getModelsFolderMap( SLSF_ref_fb2 ); ModelsFolderMap::iterator mfmItr2 = modelsFolderMap2.find( modelsFolderID ); if ( mfmItr2 == modelsFolderMap2.end() ) { std::cerr << "ERROR: Could not find ModelsFolder with id \"" << modelsFolderID << "\"" << std::endl; } else { SLSF::Dataflow dataflow = getDataflow( mfmItr2->second ); rootDataflows_3.push_back( dataflow ); // Get access to temporary root object(s). // transformation SFC::Project rootSFC_257f = SFC::Project::Cast( sFC_ref_fb5.GetRootObject() ); rootSFC_257f.name() = projectName; projects_1.push_back( rootSFC_257f ); for( Packets_t::const_iterator it = rootDataflows_3.begin() ; it != rootDataflows_3.end() ; ++it ) { Packets_t oneRootState; oneRootState.push_back( *it ); TransClass tL_0; // tL_0( projects_1, oneRootState); tL_0( oneRootState, projects_1 ); // rootSFC_257f.numInstance() = ni>0 ? ni : 1; boost::filesystem::path path = boost::filesystem::current_path(); boost::filesystem::current_path( outputDirectory ); if ( generateC ) CPrinter::print( rootSFC_257f ); else JPrinter::printProject( rootSFC_257f, annotations ); boost::filesystem::current_path( path ); // Print the S-Function wrapper // printSFuncWrapper( SFUtils::convertToCPlusPlusName( stateName ), rootSFC_257f ); // Delete program } } // Close the project prj.Close(); // Close SFC w sdnSFC_fb3.CloseWithUpdate(); if ( SFUtils::getPortTypeError() ) boost::filesystem::remove( sfcFilename ); } modelsFolderMap.clear(); // Close SLSF r sdnSLSF_fb0.CloseNoUpdate(); } catch( udm_exception &e ) { cout << e.what() << endl; return 2; } catch( std::exception &e ) { std::cout << e.what() << std::endl; return 3; } catch( ... ) { std::cout << "Unknown exception (sorry)" << std::endl; return 4; } if ( CodeGenerator::getSingleton().getError() ) return 5; return 0; }