void MassAddDialog::handleTickle() { if (_scanStack.empty()) return; // We have finished scanning uint32 t = g_system->getMillis(); // Perform a breadth-first scan of the filesystem. while (!_scanStack.empty() && (g_system->getMillis() - t) < kMaxScanTime) { Common::FSNode dir = _scanStack.pop(); Common::FSList files; if (!dir.getChildren(files, Common::FSNode::kListAll)) { continue; } // Run the detector on the dir GameList candidates(EngineMan.detectGames(files)); // Just add all detected games / game variants. If we get more than one, // that either means the directory contains multiple games, or the detector // could not fully determine which game variant it was seeing. In either // case, let the user choose which entries he wants to keep. // // However, we only add games which are not already in the config file. for (GameList::const_iterator cand = candidates.begin(); cand != candidates.end(); ++cand) { GameDescriptor result = *cand; Common::String path = dir.getPath(); // Remove trailing slashes while (path != "/" && path.lastChar() == '/') path.deleteLastChar(); // Check for existing config entries for this path/gameid/lang/platform combination if (_pathToTargets.contains(path)) { bool duplicate = false; const StringArray &targets = _pathToTargets[path]; for (StringArray::const_iterator iter = targets.begin(); iter != targets.end(); ++iter) { // If the gameid, platform and language match -> skip it Common::ConfigManager::Domain *dom = ConfMan.getDomain(*iter); assert(dom); if ((*dom)["gameid"] == result["gameid"] && (*dom)["platform"] == result["platform"] && (*dom)["language"] == result["language"]) { duplicate = true; break; } } if (duplicate) { _oldGamesCount++; break; // Skip duplicates } } result["path"] = path; _games.push_back(result); _list->append(result.description()); } // Recurse into all subdirs for (Common::FSList::const_iterator file = files.begin(); file != files.end(); ++file) { if (file->isDirectory()) { _scanStack.push(*file); } } _dirsScanned++; } // Update the dialog char buf[256]; if (_scanStack.empty()) { // Enable the OK button _okButton->setEnabled(true); snprintf(buf, sizeof(buf), "%s", _("Scan complete!")); _dirProgressText->setLabel(buf); snprintf(buf, sizeof(buf), _("Discovered %d new games, ignored %d previously added games."), _games.size(), _oldGamesCount); _gameProgressText->setLabel(buf); } else { snprintf(buf, sizeof(buf), _("Scanned %d directories ..."), _dirsScanned); _dirProgressText->setLabel(buf); snprintf(buf, sizeof(buf), _("Discovered %d new games, ignored %d previously added games ..."), _games.size(), _oldGamesCount); _gameProgressText->setLabel(buf); } if (_games.size() > 0) { _list->scrollToEnd(); } drawDialog(); }
plim_program compile_for_plim( const mig_graph& mig, const properties::ptr& settings, const properties::ptr& statistics ) { /* settings */ const auto verbose = get( settings, "verbose", false ); const auto progress = get( settings, "progress", false ); const auto enable_cost_function = get( settings, "enable_cost_function", true ); const auto generator_strategy = get( settings, "generator_strategy", 0u ); /* 0u: LIFO, 1u: FIFO */ /* timing */ properties_timer t( statistics ); plim_program program; const auto& info = mig_info( mig ); boost::dynamic_bitset<> computed( num_vertices( mig ) ); std::unordered_map<mig_function, memristor_index> func_to_rram; auto_index_generator<memristor_index> memristor_generator( generator_strategy == 0u ? auto_index_generator<memristor_index>::request_strategy::lifo : auto_index_generator<memristor_index>::request_strategy::fifo ); /* constant and all PIs are computed */ computed.set( info.constant ); for ( const auto& input : info.inputs ) { computed.set( input ); func_to_rram.insert( {{input, false}, memristor_generator.request()} ); } /* keep a priority queue for candidates invariant: candidates elements' children are all computed */ compilation_compare cmp( mig, enable_cost_function ); std::priority_queue<mig_node, std::vector<mig_node>, compilation_compare> candidates( cmp ); /* find initial candidates */ for ( const auto& node : boost::make_iterator_range( vertices( mig ) ) ) { /* PI and constant cannot be candidate */ if ( out_degree( node, mig ) == 0 ) { continue; } if ( all_children_computed( node, mig, computed ) ) { candidates.push( node ); } } const auto parent_edges = precompute_ingoing_edges( mig ); null_stream ns; std::ostream null_out( &ns ); boost::progress_display show_progress( num_vertices( mig ), progress ? std::cout : null_out ); /* synthesis loop */ while ( !candidates.empty() ) { ++show_progress; /* pick the best candidate */ auto candidate = candidates.top(); candidates.pop(); L( "[i] compute node " << candidate ); /* perform computation (e.g. mark which RRAM is used for this node) */ const auto children = get_children( mig, candidate ); boost::dynamic_bitset<> children_compl( 3u ); for ( const auto& f : index( children ) ) { children_compl.set( f.index, f.value.complemented ); } /* indexes and registers */ auto i_src_pos = 3u, i_src_neg = 3u, i_dst = 3u; plim_program::operand_t src_pos; plim_program::operand_t src_neg; memristor_index dst; /* find the inverter */ /* if there is one inverter */ if ( children_compl.count() == 1u ) { i_src_neg = children_compl.find_first(); if ( children[i_src_neg].node == 0u ) { src_neg = false; } else { src_neg = func_to_rram.at( {children[i_src_neg].node, false} ); } } /* if there are more than one inverters, but one of them is a constant */ else if ( children_compl.count() > 1u && children[children_compl.find_first()].node == 0u ) { i_src_neg = children_compl.find_next( children_compl.find_first() ); src_neg = func_to_rram.at( {children[i_src_neg].node, false} ); } /* if there is no inverter but a constant */ else if ( children_compl.count() == 0u && children[0u].node == 0u ) { i_src_neg = 0u; src_neg = !children[0u].complemented; } /* if there are more than one inverters */ else if ( children_compl.count() > 1u ) { do /* in order to escape early */ { /* pick an input that has multiple fanout */ for ( auto i = 0u; i < 3u; ++i ) { if ( !children_compl[i] ) continue; if ( cmp.fanout_count( children[i].node ) > 1u ) { i_src_neg = i; src_neg = func_to_rram.at( {children[i_src_neg].node, false} ); break; } } if ( i_src_neg < 3u ) { break; } i_src_neg = children_compl.find_first(); src_neg = func_to_rram.at( {children[i_src_neg].node, false} ); } while ( false ); } /* if there is no inverter */ else { do /* in order to escape early */ { /* pick an input that has multiple fanout */ for ( auto i = 0u; i < 3u; ++i ) { const auto it_reg = func_to_rram.find( {children[i].node, true} ); if ( it_reg != func_to_rram.end() ) { i_src_neg = i; src_neg = it_reg->second; break; } } if ( i_src_neg < 3u ) { break; } /* pick an input that has multiple fanout */ for ( auto i = 0u; i < 3u; ++i ) { if ( cmp.fanout_count( children[i].node ) > 1u ) { i_src_neg = i; break; } } /* or pick the first one */ if ( i_src_neg == 3u ) { i_src_neg = 0u; } /* create new register for inversion */ const auto inv_result = memristor_generator.request(); program.invert( inv_result, func_to_rram.at( {children[i_src_neg].node, false} ) ); func_to_rram.insert( {{children[i_src_neg].node, true}, inv_result} ); src_neg = inv_result; } while ( false ); } children_compl.reset( i_src_neg ); /* find the destination */ unsigned oa, ob; std::tie( oa, ob ) = three_without( i_src_neg ); /* if there is a child with one fan-out */ /* check whether they fulfill the requirements (non-constant and one fan-out) */ const auto oa_c = children[oa].node != 0u && cmp.fanout_count( children[oa].node ) == 1u; const auto ob_c = children[ob].node != 0u && cmp.fanout_count( children[ob].node ) == 1u; if ( oa_c || ob_c ) { /* first check for complemented cases (to avoid them for last operand) */ std::unordered_map<mig_function, memristor_index>::const_iterator it; if ( oa_c && children[oa].complemented && ( it = func_to_rram.find( {children[oa].node, true} ) ) != func_to_rram.end() ) { i_dst = oa; dst = it->second; } else if ( ob_c && children[ob].complemented && ( it = func_to_rram.find( {children[ob].node, true} ) ) != func_to_rram.end() ) { i_dst = ob; dst = it->second; } else if ( oa_c && !children[oa].complemented ) { i_dst = oa; dst = func_to_rram.at( {children[oa].node, false} ); } else if ( ob_c && !children[ob].complemented ) { i_dst = ob; dst = func_to_rram.at( {children[ob].node, false} ); } } /* no destination found yet? */ if ( i_dst == 3u ) { /* create new work RRAM */ dst = memristor_generator.request(); /* is there a constant (if, then it's the first one) */ if ( children[oa].node == 0u ) { i_dst = oa; program.read_constant( dst, children[oa].complemented ); } /* is there another inverter, then load it with that one? */ else if ( children_compl.count() > 0u ) { i_dst = children_compl.find_first(); program.invert( dst, func_to_rram.at( {children[i_dst].node, false} ) ); } /* otherwise, pick first one */ else { i_dst = oa; program.assign( dst, func_to_rram.at( {children[i_dst].node, false} ) ); } } /* positive operand */ i_src_pos = 3u - i_src_neg - i_dst; const auto node = children[i_src_pos].node; if ( node == 0u ) { src_pos = children[i_src_pos].complemented; } else if ( children[i_src_pos].complemented ) { const auto it_reg = func_to_rram.find( {node, true} ); if ( it_reg == func_to_rram.end() ) { /* create new register for inversion */ const auto inv_result = memristor_generator.request(); program.invert( inv_result, func_to_rram.at( {node, false} ) ); func_to_rram.insert( {{node, true}, inv_result} ); src_pos = inv_result; } else { src_pos = it_reg->second; } } else { src_pos = func_to_rram.at( {node, false} ); } program.compute( dst, src_pos, src_neg ); func_to_rram.insert( {{candidate, false}, dst} ); /* free free registers */ for ( const auto& c : children ) { if ( cmp.remove_fanout( c.node ) == 0u && c.node != 0u ) { const auto reg = func_to_rram.at( {c.node, false} ); if ( reg != dst ) { memristor_generator.release( reg ); } const auto it_reg = func_to_rram.find( {c.node, true} ); if ( it_reg != func_to_rram.end() && it_reg->second != dst ) { memristor_generator.release( it_reg->second ); } } } /* update computed and find new candidates */ computed.set( candidate ); const auto it = parent_edges.find( candidate ); if ( it != parent_edges.end() ) /* if it has parents */ { for ( const auto& e : it->second ) { const auto parent = boost::source( e, mig ); if ( !computed[parent] && all_children_computed( parent, mig, computed ) ) { candidates.push( parent ); } } } L( " - src_pos: " << i_src_pos << std::endl << " - src_neg: " << i_src_neg << std::endl << " - dst: " << i_dst << std::endl ); } set( statistics, "step_count", (int)program.step_count() ); set( statistics, "rram_count", (int)program.rram_count() ); std::vector<int> write_counts( program.write_counts().begin(), program.write_counts().end() ); set( statistics, "write_counts", write_counts ); return program; }
InputIterator find_extrema_with_reduce(InputIterator first, InputIterator last, Compare compare, const bool find_minimum, command_queue &queue) { typedef typename std::iterator_traits<InputIterator>::difference_type difference_type; typedef typename std::iterator_traits<InputIterator>::value_type input_type; const context &context = queue.get_context(); const device &device = queue.get_device(); // Getting information about used queue and device const size_t compute_units_no = device.get_info<CL_DEVICE_MAX_COMPUTE_UNITS>(); const size_t max_work_group_size = device.get_info<CL_DEVICE_MAX_WORK_GROUP_SIZE>(); const size_t count = detail::iterator_range_size(first, last); std::string cache_key = std::string("__boost_find_extrema_with_reduce_") + type_name<input_type>(); // load parameters boost::shared_ptr<parameter_cache> parameters = detail::parameter_cache::get_global_cache(device); // get preferred work group size and preferred number // of work groups per compute unit size_t work_group_size = parameters->get(cache_key, "wgsize", 256); size_t work_groups_per_cu = parameters->get(cache_key, "wgpcu", 100); // calculate work group size and number of work groups work_group_size = (std::min)(max_work_group_size, work_group_size); size_t work_groups_no = compute_units_no * work_groups_per_cu; work_groups_no = (std::min)( work_groups_no, static_cast<size_t>(std::ceil(float(count) / work_group_size)) ); // phase I: finding candidates for extremum // device buffors for extremum candidates and their indices // each work-group computes its candidate vector<input_type> candidates(work_groups_no, context); vector<uint_> candidates_idx(work_groups_no, context); // finding candidates for first extremum and their indices find_extrema_with_reduce( first, count, candidates.begin(), candidates_idx.begin(), work_groups_no, work_group_size, compare, find_minimum, queue ); // phase II: finding extremum from among the candidates // zero-copy buffers for final result (value and index) vector<input_type, ::boost::compute::pinned_allocator<input_type> > result(1, context); vector<uint_, ::boost::compute::pinned_allocator<uint_> > result_idx(1, context); // get extremum from among the candidates find_extrema_with_reduce( candidates.begin(), candidates_idx.begin(), work_groups_no, result.begin(), result_idx.begin(), 1, work_group_size, compare, find_minimum, true, queue ); // mapping extremum index to host uint_* result_idx_host_ptr = static_cast<uint_*>( queue.enqueue_map_buffer( result_idx.get_buffer(), command_queue::map_read, 0, sizeof(uint_) ) ); return first + static_cast<difference_type>(*result_idx_host_ptr); }
InputIterator find_extrema_with_reduce(InputIterator first, InputIterator last, ::boost::compute::less< typename std::iterator_traits< InputIterator >::value_type > compare, const bool find_minimum, command_queue &queue) { typedef typename std::iterator_traits<InputIterator>::difference_type difference_type; typedef typename std::iterator_traits<InputIterator>::value_type input_type; const context &context = queue.get_context(); const device &device = queue.get_device(); // Getting information about used queue and device const size_t compute_units_no = device.get_info<CL_DEVICE_MAX_COMPUTE_UNITS>(); const size_t max_work_group_size = device.get_info<CL_DEVICE_MAX_WORK_GROUP_SIZE>(); const size_t count = detail::iterator_range_size(first, last); std::string cache_key = std::string("__boost_find_extrema_with_reduce_") + type_name<input_type>(); // load parameters boost::shared_ptr<parameter_cache> parameters = detail::parameter_cache::get_global_cache(device); // get preferred work group size and preferred number // of work groups per compute unit size_t work_group_size = parameters->get(cache_key, "wgsize", 256); size_t work_groups_per_cu = parameters->get(cache_key, "wgpcu", 64); // calculate work group size and number of work groups work_group_size = (std::min)(max_work_group_size, work_group_size); size_t work_groups_no = compute_units_no * work_groups_per_cu; work_groups_no = (std::min)( work_groups_no, static_cast<size_t>(std::ceil(float(count) / work_group_size)) ); // phase I: finding candidates for extremum // device buffors for extremum candidates and their indices // each work-group computes its candidate // zero-copy buffers are used to eliminate copying data back to host vector<input_type, ::boost::compute::pinned_allocator<input_type> > candidates(work_groups_no, context); vector<uint_, ::boost::compute::pinned_allocator <uint_> > candidates_idx(work_groups_no, context); // finding candidates for first extremum and their indices find_extrema_with_reduce( first, count, candidates.begin(), candidates_idx.begin(), work_groups_no, work_group_size, compare, find_minimum, queue ); // phase II: finding extremum from among the candidates // mapping candidates and their indices to host input_type* candidates_host_ptr = static_cast<input_type*>( queue.enqueue_map_buffer( candidates.get_buffer(), command_queue::map_read, 0, work_groups_no * sizeof(input_type) ) ); uint_* candidates_idx_host_ptr = static_cast<uint_*>( queue.enqueue_map_buffer( candidates_idx.get_buffer(), command_queue::map_read, 0, work_groups_no * sizeof(uint_) ) ); input_type* i = candidates_host_ptr; uint_* idx = candidates_idx_host_ptr; uint_* extremum_idx = idx; input_type extremum = *candidates_host_ptr; i++; idx++; // find extremum (serial) from among the candidates on host if(!find_minimum) { while(idx != (candidates_idx_host_ptr + work_groups_no)) { input_type next = *i; bool compare_result = next > extremum; bool equal = next == extremum; extremum = compare_result ? next : extremum; extremum_idx = compare_result ? idx : extremum_idx; extremum_idx = equal ? ((*extremum_idx < *idx) ? extremum_idx : idx) : extremum_idx; idx++, i++; } } else { while(idx != (candidates_idx_host_ptr + work_groups_no)) { input_type next = *i; bool compare_result = next < extremum; bool equal = next == extremum; extremum = compare_result ? next : extremum; extremum_idx = compare_result ? idx : extremum_idx; extremum_idx = equal ? ((*extremum_idx < *idx) ? extremum_idx : idx) : extremum_idx; idx++, i++; } } return first + static_cast<difference_type>(*extremum_idx); }
PClassifier TTreeLearner::operator()(PExampleGenerator ogen, const int &weight) { if (!ogen) raiseError("invalid example generator"); PVariable &classVar = ogen->domain->classVar; if (!classVar) raiseError("class-less domain"); bool tempSplit = !split; if (tempSplit) if (classVar->varType == TValue::INTVAR) split = defaultDiscreteTreeSplitConstructor; else if (classVar->varType == TValue::FLOATVAR) split = defaultContinuousTreeSplitConstructor; else raiseError("invalid class type (discrete or continuous expected)"); bool tempStop = !stop; if (tempStop) stop = defaultStop; bool tempSplitter = !exampleSplitter; if (tempSplitter) exampleSplitter = mlnew TTreeExampleSplitter_UnknownsAsSelector; try { PExampleGenerator examples; /* If we don't intend to store them, we'll copy them if they're not in a table. If we must store examples, we'll copy them in any case... */ if (storeExamples) examples = mlnew TExampleTable(ogen); else examples = toExampleTable(ogen); PDistribution apriorClass = getClassDistribution (examples); if (apriorClass->abs == 0) raiseError("no examples"); vector<bool> candidates(examples->domain->attributes->size(), true); PTreeNode root = call(examples, weight, apriorClass, candidates, 0); if (storeExamples) root->examples = examples; if (tempSplit) split = PTreeSplitConstructor(); if (tempStop) stop = PTreeSplitConstructor(); if (tempSplitter) exampleSplitter = PTreeExampleSplitter(); return mlnew TTreeClassifier(examples->domain, root, descender ? descender : mlnew TTreeDescender_UnknownMergeAsSelector); } catch (exception) { if (tempSplit) split = PTreeSplitConstructor(); if (tempStop) stop = PTreeSplitConstructor(); if (tempSplitter) exampleSplitter = PTreeExampleSplitter(); throw; } }
//--------------------------------------------------------- // Count of all the weapons in the world of my type and // see if we have a surplus. If there is a surplus, try // to find suitable candidates for removal. // // Right now we just remove the first weapons we find that // are behind the player, or are out of the player's PVS. // Later, we may want to score the results so that we // removed the farthest gun that's not in the player's // viewcone, etc. // // Some notes and thoughts: // // This code is designed NOT to remove weapons that are // hand-placed by level designers. It should only clean // up weapons dropped by dead NPCs, which is useful in // situations where enemies are spawned in for a sustained // period of time. // // Right now we PREFER to remove weapons that are not in the // player's PVS, but this could be opposite of what we // really want. We may only want to conduct the cleanup on // weapons that are IN the player's PVS. //--------------------------------------------------------- void CGameWeaponManager::Think() { int i; // Don't have to think all that often. SetNextThink( gpGlobals->curtime + 2.0 ); const char *pszWeaponName = STRING( m_iszWeaponName ); CUtlVector<CBaseEntity *> candidates( 0, 64 ); if ( m_bExpectingWeapon ) { CBaseCombatWeapon *pWeapon = NULL; // Firstly, count the total number of weapons of this type in the world. // Also count how many of those can potentially be removed. pWeapon = assert_cast<CBaseCombatWeapon *>(gEntList.FindEntityByClassname( pWeapon, pszWeaponName )); while( pWeapon ) { if( !pWeapon->IsEffectActive( EF_NODRAW ) && pWeapon->IsRemoveable() ) { candidates.AddToTail( pWeapon ); } pWeapon = assert_cast<CBaseCombatWeapon *>(gEntList.FindEntityByClassname( pWeapon, pszWeaponName )); } } else { for ( i = 0; i < m_ManagedNonWeapons.Count(); i++) { CBaseEntity *pEntity = m_ManagedNonWeapons[i]; if ( pEntity ) { Assert( pEntity->m_iClassname == m_iszWeaponName ); if ( !pEntity->IsEffectActive( EF_NODRAW ) ) { candidates.AddToTail( pEntity ); } } else { m_ManagedNonWeapons.FastRemove( i-- ); } } } // Calculate the surplus. int surplus = candidates.Count() - m_iMaxPieces; // Based on what the player can see, try to clean up the world by removing weapons that // the player cannot see right at the moment. CBaseEntity *pCandidate; for ( i = 0; i < candidates.Count() && surplus > 0; i++ ) { bool fRemovedOne = false; pCandidate = candidates[i]; Assert( !pCandidate->IsEffectActive( EF_NODRAW ) ); // if ( gpGlobals->maxClients == 1 ) { CBasePlayer *pPlayer = UTIL_GetNearestVisiblePlayer(pCandidate); // Nodraw serves as a flag that this weapon is already being removed since // all we're really doing inside this loop is marking them for removal by // the entity system. We don't want to count the same weapon as removed // more than once. if( !UTIL_FindClientInPVS( pCandidate->edict() ) ) { fRemovedOne = true; } else if( !pPlayer->FInViewCone( pCandidate ) ) { fRemovedOne = true; } else if ( UTIL_DistApprox( pPlayer->GetAbsOrigin(), pCandidate->GetAbsOrigin() ) > (30*12) ) { fRemovedOne = true; } } // else // { // fRemovedOne = true; // } if( fRemovedOne ) { pCandidate->AddEffects( EF_NODRAW ); UTIL_Remove( pCandidate ); DevMsg( 2, "Surplus %s removed\n", pszWeaponName); surplus--; } } }
int main(int argc, char **argv) { if(argc==1 || (argc==2 && strcmp(argv[1],"-h")==0) || (argc==2 && strcmp(argv[1],"--help")==0) || (argc==2 && strcmp(argv[1],"--h")==0) ) { usage(argv[0]); exit(-1); } time_t global_start=clock(); int i,j,new_node, num_pushes=0, seed=0, nsplits=0, root=-1,subtree_root=-1,new_ms,max_bits=-1, num_spawns=0; // Set default p to 1/3 double p=0.3333; list<int>::iterator ii; char *DIMACS_file=NULL; bool has_graph=false,verbose=false,do_push=true,do_two_sep=true, do_init_push=false; Graph *G=NULL; time_t start,stop; //for printing out the graphviz representations of each move on the bd bool gviz_all = false; bool gviz_el = false; //edge labels on bool gviz_th = true; //thicknesses off char gvizfile[20]; int step = 0; // time_t begin=clock(); // Controller object has some methods which are used independently from // graph objects. goblinController *CT; CT = new goblinController(); //Turn off printing of dots. CT->traceLevel = 0; // Parse arguments for(i=0;i<argc;i++) { if(strcmp(argv[i],"-v")==0) verbose=true; if(strcmp(argv[i],"-f")==0) { DIMACS_file=argv[i+1]; // Read in the file G=new Graph(DIMACS_file, true); G->write_graphviz_file("orig.gviz"); char metis_file[100]; sprintf(metis_file,"%s.metis",argv[i+1]); G->write_METIS_file(metis_file); sprintf(metis_file,"%s.hmetis",argv[i+1]); G->write_HMETIS_file(metis_file); has_graph=true; if(verbose) { print_message(0,"Read in graph from file: %s\n",DIMACS_file); cout << *G; } } if(strcmp(argv[i],"-p")==0) p=atof(argv[i+1]); if(strcmp(argv[i],"-nopush")==0) do_push=false; if(strcmp(argv[i],"-no2sep")==0) do_two_sep=false; if(strcmp(argv[i],"-seed")==0) seed=atoi(argv[i+1]); if(strcmp(argv[i],"-root")==0) root=atoi(argv[i+1]); if(strcmp(argv[i],"-subtree")==0) subtree_root=atoi(argv[i+1]); if(strcmp(argv[i],"-exhaust")==0) max_bits=atoi(argv[i+1]); } // Make sure we have a graph if(!has_graph) fatal_error("Did not load graph\n"); if(!G->check_connected()) fatal_error("Graph is not connected\n"); if(!G->check_two_connected()) fatal_error("Graph is not 2 connected!\n"); // "seed" the rng for(i=0;i<seed;i++) lcgrand(0); // Create the tree BDTree btree(G); // Initialize to the star configuration btree.create_star(); if(gviz_all) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } // Find candidate pushes if(do_init_push) { num_pushes=btree.push(0); print_message(0,"Found %d initial pushes\n",num_pushes); } if(gviz_all && num_pushes > 0) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } if(do_two_sep) { // Look for 2-separations list<int> X1, Y1; bool ts = false; int bv = 0; bool found; while(!ts) { //we need to try to separate a vertex with at least 4 edges adjacent to it found = false; while(found == false && bv < btree.num_nodes) { if(btree.nodes[bv].edges.size() >3) found = true; else bv++; } if(!found) { print_message(0, "Did not find a(nother) node of btree to split.\n"); break; } if(verbose) print_message(0,"Running two separation function at vertex %d\n", bv); X1.clear(); Y1.clear(); start = clock(); ts= btree.two_separation(bv, &X1, &Y1, CT); //ts = btree.bf_two_separation(bv, &X1, &Y1); stop = clock(); print_message(0, "Checked for two separation in %f seconds.\n", ((double)(stop-start))/CLOCKS_PER_SEC); if(ts) { print_message(0, "Found a valid 2 separation!\n"); print_message(0, "Splitting node %d\n", bv); // Split the node new_node = btree.split_node(bv,&X1,EDGE_SPLIT,&new_ms); print_message(0,"After split - new edge has middle set of size %d\n", new_ms); if(gviz_all) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } if(do_push) { //Push the vertex you split num_pushes=btree.push(bv); print_message(0, "Found %d pushes at %d \n",num_pushes, bv); if(gviz_all && num_pushes > 0) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } //Push the new vertex created num_pushes=btree.push(new_node); print_message(0, "Found %d pushes at %d \n",num_pushes, new_node); if(gviz_all && num_pushes > 0) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } } //Add one to our count, then reset ts and bv so we restart search for 2-seps. nsplits++; ts = false; bv = 0; } else { print_message(0, "No 2 separation at vertex %d!\n", bv); bv++; } } print_message(0, "Finished with two-separations. Split %d nodes.\n", nsplits); } // Now run eigenvector splitting until BD is valid int split_node=0,new_node_1, new_node_2; nsplits=0; list<int> A, B, CA, CB, partition, partition2; vector<int> candidates(btree.num_nodes); while(!btree.is_valid) { // Find a BDTreeNode with at least 4 neighbors // fill candidates with possibilities // This is not smart since we really should just update // the candidates as we split and add nodes... // but it's probably in the noise anyway j=0; split_node = -1; for(i=0;i<btree.num_nodes;i++) { if(btree.nodes[i].edges.size()>=4) { candidates[j]=i; j++; } } print_message(1,"generating random int in 0...%d\n",j-1); split_node=rand_int(0,j-1); split_node=candidates[split_node]; print_message(1,"split_node is %d (degree=%d)\n",split_node,btree.nodes[split_node].edges.size()); A.clear(); B.clear(); CA.clear(); CB.clear(); nsplits++; // Run eigenvector heuristic - if fill_extra=true then we will // we are adding "obvious" edges to A and B within this function! start=clock(); if(btree.num_interior_nodes==1) btree.eigenvector_split(split_node,&A, &B, p, false); else btree.eigenvector_leaf_split(split_node,&A, &B, p, false); stop=clock(); print_message(0,"Computed eigenvector with p=%f in %f seconds.\n",p, ((double)(stop-start))/CLOCKS_PER_SEC); print_message(0,"Eigenvector A:\n"); print(0,A); print_message(0,"Eigenvector B:\n"); print(0,B); // Should do this only if A and B require it if(A.size()+B.size() < btree.nodes[split_node].edges.size()) { // Run the max flow to get an actual splitting of the edges start=clock(); btree.split_maxflow_partition(split_node,&A,&B,&CA,&CB, CT); stop = clock(); print_message(0,"Computed partition given eigenvector results in %f seconds.\n", ((double)(stop-start))/CLOCKS_PER_SEC); // Create the edge set partition.clear(); for(ii=A.begin();ii!=A.end();++ii) partition.push_back(*ii); for(ii=CA.begin();ii!=CA.end();++ii) partition.push_back(*ii); partition.sort(); } else { // Just use A partition.clear(); for(ii=A.begin();ii!=A.end();++ii) partition.push_back(*ii); } // Check the size of the exhaust BEFORE splitting the node int exhaust_bits=btree.nodes[split_node].edges.size() - A.size() - B.size(); print_message(1,"Exhaust size is %d bits\n",exhaust_bits); int exhaust_ms_size=-1; time_t exh_start=0, exh_stop=0; list<int> C; // Check to see if we can/want to exhaust if( exhaust_bits <= max_bits) { print_message(0,"Checking exhaust\n"); // Check this exhaustively exh_start=clock(); C.clear(); exhaust_ms_size=btree.best_partition(split_node,&A, &B, &C); exh_stop=clock(); } print_message(1,"Splitting %d with edge partition of size %d\n",split_node,partition.size()); print(1,partition); if(btree.num_interior_nodes==1) { // initial star - use split node new_node_2=-1; new_node_1 = btree.split_node(split_node,&partition,EDGE_SPLIT,&new_ms); btree.write_graphviz_file(true,"init.gviz",true,true); } else { // Later on in the process - use spawn node int ms1,ms2; btree.spawn_nodes(split_node,&partition,EDGE_SPLIT,&ms1,&ms2, &new_node_1, &new_node_2); print_message(0,"After spawn - new middle sets of size %d,%d (max ms is %d)\n",ms1,ms2,btree.max_middle_set_size); print_message(0,"new_nodes: %d,%d\n",new_node_1, new_node_2); //char spawn_file[100]; //sprintf(spawn_file,"spawn_%d.gviz",num_spawns); //btree.write_graphviz_file(true,spawn_file,true,true); num_spawns++; } // Check for validity here!!! if(btree.is_valid) break; // CSG - this fails because we had a spawn of a leaf that was an old node - // so new node1 and 2 are getting set incorrectly in spawn_node if(new_node_1!=-1) { // Try to push the newly created nodes if(btree.nodes[new_node_1].edges.size()>=4 && do_push) { num_pushes=btree.push(new_node_1); print_message(0,"\n\tFound %d pushes for newly introduced node %d\n",num_pushes,new_node_1); if(gviz_all && num_pushes > 0) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } } // Check for validity here!!! if(btree.is_valid) break; } if(new_node_2!=-1) { if(btree.nodes[new_node_2].edges.size()>=4 && do_push) { num_pushes=btree.push(new_node_2); print_message(0,"\n\tFound %d pushes for newly introduced node %d\n",num_pushes,new_node_2); if(gviz_all && num_pushes > 0) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } } } // Check for validity here!!! if(btree.is_valid) break; } if(root!=-1) { // This seems to be working when all graphviz flags are off, but something doesn't seem right // if last param is set to 2, probably because middle set is empty and we get 0 pen width??! // BDS - fixed this by making minimum penwidth 1 (i.e. pw = |mid set| unless |mid set| = 0, in which // case, pw = 1. btree.write_graphviz_file(false,"before_root.gviz",false,true); //cout<<btree; btree.root(root); btree.write_graphviz_file(false,"after_root.gviz",false,true); //cout<<btree; } if(verbose) cout<<btree; #if 0 // This section was just for generating a specific plot when running // c:\Users\tcg\PROJECTS\SGD\gaudi\code\trunk\branch_decomposition\Release>BranchDecomposition.exe -p . // 33 -no2sep -root 10 -subtree 330 -f ..\data\ch130.tsp.del.100.dimacs // Check to see if a subtree is desired if(subtree_root!=-1) { int roots[24]={400,328,267,292,263, 403,438,257,251,302, 276,452,294,405,364, 379,349,369,330,443, 338,420,291,425}; char *colors[6]={"red","blue","green","orange","purple","yellow"}; list<int> subtree; for(i=0;i<24;i++) { subtree.clear(); btree.find_subtree(roots[i], &subtree); print_message(1,"Subtree rooted at %d:\n",roots[i]); for(ii=subtree.begin();ii!=subtree.end();++ii) { printf("%d [label=\"\",style=filled,fillcolor=%s,color=%s];\n",*ii,colors[i%6],colors[i%6]); } printf("\n\n"); } } #endif print_message(0,"%d splits performed\n",nsplits); int max_ms=0; for(i=0;i<btree.num_edges;i++) if((int)btree.edges[i].middle_set.size()>max_ms) max_ms=btree.edges[i].middle_set.size(); print_message(0,"max middle set is %d\n",max_ms); vector<int> hist(max_ms+1,0); for(i=0;i<btree.num_edges;i++) hist[btree.edges[i].middle_set.size()]++; print(0,hist); if(gviz_all && num_pushes > 0) { sprintf(gvizfile, "bd.%d.gviz", step); btree.write_graphviz_file(false,gvizfile,gviz_el, gviz_th); step++; } time_t global_stop=clock(); printf("%s %3.3f %3.3f %d %d\n",DIMACS_file,p,(double)(global_stop-global_start)/CLOCKS_PER_SEC,nsplits,max_ms); fflush(stdout); //write a file with thick/thin lines. //btree.write_graphviz_file(false,"final.gviz",false, true); //write a file with thick/thin lines and edge labels //btree.write_graphviz_file(false,"final.gviz",true, true); delete G; delete CT; return 1; }
// // Check whether or not there are files to be recompiled. // bool Control::IncrementalRecompilation() { // // Empty out the type lookup table so that it does not continue // to point to a type that is deleted here. // type_table.SetEmpty(); SymbolSet candidates(input_java_file_set.Size() + input_class_file_set.Size() + recompilation_file_set.Size()); if (! recompilation_file_set.IsEmpty()) candidates = recompilation_file_set; else { Ostream out; out.StandardOutput(); out << endl << "Incremental: Enter to continue or q + Enter to quit: " << flush; char ch; // See if the user types Q or presses enter/escape or sends an EOF while (1) { cin.get(ch); if (cin.eof() || (ch == U_q) || (ch == U_Q)) { return false; } if ((ch == U_ESCAPE) || (ch == U_LINE_FEED)) { break; } } candidates = input_java_file_set; candidates.Union(input_class_file_set); } if (!candidates.IsEmpty()) { TypeDependenceChecker dependence_checker(this, candidates, type_trash_bin); dependence_checker.PartialOrder(); // // Compute the initial set of files that need to be recompiled. Place // them in recompilation_file_set. // RereadDirectories(); ComputeRecompilationSet(dependence_checker); } // // Starting with the initial recompilation_file_set, complete the // computation of the set of files that need to be recompiled. (Add all // new files to recompilation_file_set). Also, complete the computation of // type_trash_set, the set of files that should be removed from the // database as they will be recompiled. // fprintf(stderr, "%s", (recompilation_file_set.IsEmpty() && expired_file_set.IsEmpty() ? "\nnothing changed...\n" : "\nok...\n")); fflush(stderr); return true; }
TAM::TAM(int numTones, int numRes, Image* stroke) : images(numTones, std::vector<Image*>(numRes)) { std::vector<Image> candidates(numRes); for (int tone = 0; tone < numTones; ++tone) { int imageSize = 1; for (int resolution = 0; resolution < numRes; ++resolution) { images[tone][resolution] = new Image(imageSize, imageSize); static Pixel white = Pixel(1,1,1); images[tone][resolution]->fillImage(white); if (tone == 0) { candidates[resolution] = Image(imageSize, imageSize); } imageSize *= 2; } } double darkestTone =.9; double lightestTone = .15; double toneInterval; if (numTones == 1) { toneInterval = darkestTone - lightestTone; } else { toneInterval = (darkestTone - lightestTone) / (numTones - 1); } for (int toneLevel = 0; toneLevel < numTones; ++toneLevel) { double maxTone = lightestTone + toneInterval * toneLevel; while (fabs(maxTone - images[toneLevel][numRes-1]->getTone()) > (.1/(numRes-3))) { RandomStroke bestStroke; double bestTone = -10000; for (int i = 0; i < 25; ++i) { double toneSum = 0; bool horizontalStroke = maxTone <= .7; RandomStroke currentStroke = getRandomStroke(horizontalStroke); for (int resolution = 3; resolution < numRes; ++resolution) { if (fabs(maxTone - images[toneLevel][resolution]->getTone()) > (.1/(resolution-3+1.))) { drawStroke(stroke, currentStroke, &candidates[resolution]); // Get the effective length of the stroke, given that it might "run off" the edge double withStroke = fabs(maxTone - candidates[resolution].getTone()); double withoutStroke = fabs(maxTone - images[toneLevel][resolution]->getTone()); if (withoutStroke < withStroke) { continue; } double toneContribution = candidates[resolution].getTone() - images[toneLevel][resolution]->getTone(); // // All images are squares, side lengths are equal // const double imageSize = images[toneLevel][resolution]->getWidth(); // // double actualStrokeLength = currentStroke.length * stroke->getWidth(); // double normalizedStrokeLength; // double actualStrokeX = currentStroke.x*imageSize; // double actualStrokeY = currentStroke.y*imageSize; // if(horizontalStroke){ // normalizedStrokeLength = getActualStrokeLength(actualStrokeX, actualStrokeLength, imageSize); // } // else { // normalizedStrokeLength = getActualStrokeLength(actualStrokeY, actualStrokeLength, imageSize); // } toneContribution /= currentStroke.length; toneSum += toneContribution; candidates[resolution] = *images[toneLevel][resolution]; } } if (toneSum > bestTone) { bestTone = toneSum; bestStroke = currentStroke; } } for (int candidate = 3; candidate < numRes; ++candidate) { if (fabs(maxTone - images[toneLevel][candidate]->getTone()) > (.1/(candidate-3 + 1.))) { drawStroke(stroke, bestStroke, images[toneLevel][candidate]); candidates[candidate] = *images[toneLevel][candidate]; } } } for (int resolution=0; resolution<3; resolution++){ float greyLevel = 1-maxTone; Pixel grey(greyLevel, greyLevel, greyLevel); images[toneLevel][resolution]->fillImage(grey); } if (toneLevel != numTones - 1) { for (int resolution = 0; resolution < numRes; ++resolution) { *images[toneLevel+1][resolution] = *images[toneLevel][resolution]; candidates[resolution] = *images[toneLevel][resolution]; } } } }
bool wd177x_format::save(io_generic *io, floppy_image *image) { // Count the number of formats int formats_count; for(formats_count=0; formats[formats_count].form_factor; formats_count++); // Allocate the storage for the list of testable formats for a // given cell size dynamic_array<int> candidates(formats_count); // Format we're finally choosing int chosen_candidate = -1; // Previously tested cell size int min_cell_size = 0; for(;;) { // Build the list of all formats for the immediatly superior cell size int cur_cell_size = 0; int candidates_count = 0; for(int i=0; i != formats_count; i++) { if(image->get_form_factor() == floppy_image::FF_UNKNOWN || image->get_form_factor() == formats[i].form_factor) { if(formats[i].cell_size == cur_cell_size) candidates[candidates_count++] = i; else if((!cur_cell_size || formats[i].cell_size < cur_cell_size) && formats[i].cell_size > min_cell_size) { candidates[0] = i; candidates_count = 1; cur_cell_size = formats[i].cell_size; } } } min_cell_size = cur_cell_size; // No candidates with a cell size bigger than the previously // tested one, we're done if(!candidates_count) break; // Filter with track 0 head 0 check_compatibility(image, candidates, candidates_count); // Nobody matches, try with the next cell size if(!candidates_count) continue; // We have a match at that cell size, we just need to find the // best one given the geometry // If there's only one, we're done if(candidates_count == 1) { chosen_candidate = candidates[0]; break; } // Otherwise, find the best int tracks, heads; image->get_actual_geometry(tracks, heads); chosen_candidate = candidates[0]; for(int i=1; i != candidates_count; i++) { const format &cc = formats[chosen_candidate]; const format &cn = formats[candidates[i]]; // Handling enough sides is better than not if(cn.head_count >= heads && cc.head_count < heads) goto change; else if(cc.head_count >= heads && cn.head_count < heads) goto dont_change; // Since we're limited to two heads, at that point head // count is identical for both formats. // Handling enough tracks is better than not if(cn.track_count >= tracks && cc.track_count < tracks) goto change; else if(cn.track_count >= tracks && cc.track_count < tracks) goto dont_change; // Both are on the same side of the track count, so closest is best if(cc.track_count < tracks && cn.track_count > cc.track_count) goto change; if(cc.track_count >= tracks && cn.track_count < cc.track_count) goto change; goto dont_change; change: chosen_candidate = candidates[i]; dont_change: ; } // We have a winner, bail out break; } // No match, pick the first one and be done with it if(chosen_candidate == -1) chosen_candidate = 0; const format &f = formats[chosen_candidate]; int track_size = compute_track_size(f); UINT8 sectdata[40*512]; desc_s sectors[40]; build_sector_description(f, sectdata, sectors); for(int track=0; track < f.track_count; track++) for(int head=0; head < f.head_count; head++) { extract_sectors(image, f, sectors, track, head); io_generic_write(io, sectdata, get_image_offset(f, head, track), track_size); } return true; }
vector<vector<Costumer*> > CVRP::update_list(vector<Route*> &solution, vector<double> ×, int &n){ vector<Costumer*>::iterator itr = clients.begin(); vector<vector<Costumer*> > candidates(solution.size(), vector<Costumer*> ()); for(; itr != clients.end(); itr++){ Costumer* co = *(itr); for(int r = 0; r < solution.size(); r++){ //candidates[r].clear(); //Erro aqui!!! Nao esquece!!! if(!solution[r]->clients.empty()){ int c = solution[r]->clients.size()-1; Costumer* org = solution[r]->clients[c]; int cost = calculate_cost(org->coord, co->coord); //Time arrival to verify if the vehicle can get to the client before the due time double arrivalTime = times[r] + cost; //cout << arrivalTime << endl; //Verify if the client wasn't visited and if the vehicle is on time if(!visited[co->id - 1] && co->id != org->id && co->dTime >= arrivalTime){ Point *i = solution[r]->clients[c]->coord; Point *j = co->coord; co->ratio = calculate_cost(j, dep.coord) + cost; co->saving = calculate_cost(j, dep.coord) + calculate_cost(dep.coord, i) - calculate_cost(i, j); co->cost = cost; co->arrTime = arrivalTime; candidates[r].push_back(co); } } } } for(int i = 0; i < candidates.size(); i++){ if(candidates[i].size() != 0){ return candidates; } } candidates.push_back(vector<Costumer*> ()); int k = candidates.size() - 1; for(int i = 0; i < clients.size(); i++){ Costumer *cos = clients[i]; if(!visited[cos->id - 1]){ int cost = calculate_cost(dep.coord, cos->coord); if(candidates[k].empty()){ candidates[k].push_back(cos); }else if(candidates[k][0]->cost > cost){ candidates[k][0] = cos; } } } visited.push_back(false); visited[candidates[k][0]->id - 1] = true; times.push_back(0.0); times[k] += candidates[k][0]->cost + candidates[k][0]->servTime; candidates[k][0]->arrTime = candidates[k][0]->cost; n++; solution.push_back(new Route); solution[solution.size()-1]->clients.push_back(candidates[k][0]); return candidates; }
uint64_t searchDatabase(std::vector<std::vector<uint32_t>>& dst, const std::string& database_path, Chain** queries, int32_t queries_length, uint32_t kmer_length, uint32_t max_candidates, uint32_t num_threads) { fprintf(stderr, "** Searching database for candidate sequences **\n"); std::shared_ptr<Hash> query_hash = createHash(queries, queries_length, 0, queries_length, kmer_length); Chain** database = nullptr; int database_length = 0; int database_start = 0; FILE* handle = nullptr; int serialized = 0; readFastaChainsPartInit(&database, &database_length, &handle, &serialized, database_path.c_str()); uint64_t database_cells = 0; std::vector<float> min_scores(queries_length, 1000000.0); std::vector<std::vector<std::vector<Candidate>>> candidates(num_threads); uint32_t part = 1; float part_size = database_chunk / (float) 1000000000; while (true) { int status = 1; status &= readFastaChainsPart(&database, &database_length, handle, serialized, database_chunk); databaseLog(part, part_size, 0); uint32_t database_split_size = (database_length - database_start) / num_threads; std::vector<uint32_t> database_splits(num_threads + 1, database_start); for (uint32_t i = 1; i < num_threads; ++i) { database_splits[i] += i * database_split_size; } database_splits[num_threads] = database_length; std::vector<ThreadPoolTask*> thread_tasks(num_threads, nullptr); for (uint32_t i = 0; i < num_threads; ++i) { auto thread_data = new ThreadSearchData(query_hash, queries_length, min_scores, database, database_splits[i], database_splits[i + 1], kmer_length, max_candidates, candidates[i], i == num_threads - 1, part, part_size); thread_tasks[i] = threadPoolSubmit(threadSearchDatabase, (void*) thread_data); } for (uint32_t i = 0; i < num_threads; ++i) { threadPoolTaskWait(thread_tasks[i]); threadPoolTaskDelete(thread_tasks[i]); } for (int i = database_start; i < database_length; ++i) { database_cells += chainGetLength(database[i]); chainDelete(database[i]); database[i] = nullptr; } // merge candidates from all threads for (int32_t i = 0; i < queries_length; ++i) { for (uint32_t j = 1; j < num_threads; ++j) { if (candidates[j][i].empty()) { continue; } candidates[0][i].insert(candidates[0][i].end(), candidates[j][i].begin(), candidates[j][i].end()); std::vector<Candidate>().swap(candidates[j][i]); } if (num_threads > 1) { std::sort(candidates[0][i].begin(), candidates[0][i].end()); if (candidates[0][i].size() > max_candidates) { std::vector<Candidate> tmp(candidates[0][i].begin(), candidates[0][i].begin() + max_candidates); candidates[0][i].swap(tmp); } } if (!candidates[0][i].empty()) { min_scores[i] = candidates[0][i].back().score; } } databaseLog(part, part_size, 100); ++part; if (status == 0) { break; } database_start = database_length; } fprintf(stderr, "\n\n"); fclose(handle); deleteFastaChains(database, database_length); dst.clear(); dst.resize(queries_length); for (int32_t i = 0; i < queries_length; ++i) { dst[i].reserve(candidates[0][i].size()); for (uint32_t j = 0; j < candidates[0][i].size(); ++j) { dst[i].emplace_back(candidates[0][i][j].id); } std::vector<Candidate>().swap(candidates[0][i]); std::sort(dst[i].begin(), dst[i].end()); } return database_cells; }
int main(int argc, char** argv) { register char* s; register Rule_t* r; register List_t* p; int i; int args; int trace; char* t; char* buf; char* tok; Var_t* v; Stat_t st; Stat_t ds; Sfio_t* tmp; /* * initialize dynamic globals */ version = strdup(fmtident(version)); setlocale(LC_ALL, ""); error_info.id = idname; error_info.version = version; error_info.exit = finish; error_info.auxilliary = intercept; if (pathcheck(PATHCHECK, error_info.id, NiL)) return 1; error(-99, "startup"); settypes("*?[]", C_MATCH); settypes("+-|=", C_OPTVAL); settypes(" \t\n", C_SEP); settype(0, C_SEP); settypes(" \t\v\n:+&=;\"\\", C_TERMINAL); settype(0, C_TERMINAL); settypes("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_", C_ID1|C_ID2|C_VARIABLE1|C_VARIABLE2); settypes(".", C_VARIABLE1|C_VARIABLE2); settypes("0123456789", C_ID2|C_VARIABLE2); /* * close garbage fd's -- we'll be tidy from this point on * 3 may be /dev/tty on some systems * 0..9 for user redirection in shell * 10..19 left open by bugs in some shells * error_info.fd interited from parent * any close-on-exec fd's must have been done on our behalf */ i = 3; if (isatty(i)) i++; for (; i < 20; i++) if (i != error_info.fd && !fcntl(i, F_GETFD, 0)) close(i); /* * allocate the very temporary buffer streams */ internal.met = sfstropen(); internal.nam = sfstropen(); internal.tmp = sfstropen(); internal.val = sfstropen(); internal.wrk = sfstropen(); tmp = sfstropen(); sfstrrsrv(tmp, 2 * MAXNAME); /* * initialize the code and hash tables */ initcode(); inithash(); /* * set the default state */ state.alias = 1; state.exec = 1; state.global = 1; state.init = 1; #if DEBUG state.intermediate = 1; #endif state.io[0] = sfstdin; state.io[1] = sfstdout; state.io[2] = sfstderr; state.jobs = 1; state.pid = getpid(); state.readstate = MAXVIEW; state.scan = 1; state.start = CURTIME; state.stateview = -1; state.tabstops = 8; state.targetview = -1; #if BINDINDEX state.view[0].path = makerule("."); #else state.view[0].path = "."; #endif state.view[0].pathlen = 1; state.writeobject = state.writestate = "-"; /* * pwd initialization * * for project management, if . is group|other writeable * then change umask() for similar write protections */ buf = sfstrbase(tmp); internal.pwd = (s = getcwd(buf, MAXNAME)) ? strdup(s) : strdup("."); internal.pwdlen = strlen(internal.pwd); if (stat(".", &st)) error(3, "cannot stat ."); if (S_ISDIR(st.st_mode) && (st.st_mode & (S_IWGRP|S_IWOTH))) umask(umask(0) & ~(st.st_mode & (S_IWGRP|S_IWOTH))); /* * set some variable default values */ hashclear(table.var, HASH_ALLOCATE); setvar(external.make, argv[0], V_import); t = "lib/make"; setvar(external.lib, strdup((s = pathpath(t, argv[0], PATH_EXECUTE, buf, SF_BUFSIZE)) ? s : t), V_import); setvar(external.pwd, internal.pwd, V_import); setvar(external.version, version, V_import); hashset(table.var, HASH_ALLOCATE); /* * read the environment */ readenv(); if (v = getvar(external.nproc)) state.jobs = (int)strtol(v->value, NiL, 0); if ((v = getvar(external.pwd)) && !streq(v->value, internal.pwd)) { if (!stat(v->value, &st) && !stat(internal.pwd, &ds) && st.st_ino == ds.st_ino && st.st_dev == ds.st_dev) { free(internal.pwd); internal.pwd = strdup(v->value); internal.pwdlen = strlen(v->value); } else { v->property &= ~V_import; v->property |= V_free; v->value = strdup(internal.pwd); } } /* * initialize the internal rule pointers */ initrule(); /* * read the static initialization script */ sfputr(tmp, initstatic, -1); parse(NiL, sfstruse(tmp), "initstatic", NiL); /* * check and read the args file */ if (s = colonlist(tmp, external.args, 1, ' ')) { i = fs3d(0); tok = tokopen(s, 1); while (s = tokread(tok)) if (vecargs(vecfile(s), &argc, &argv) >= 0) break; else if (errno != ENOENT) error(1, "cannot read args file %s", s); tokclose(tok); fs3d(i); } state.argf = newof(0, int, argc, 0); /* * set the command line options * read the command line assignments * mark the command line scripts and targets */ state.init = 0; state.readonly = 1; state.argv = argv; state.argc = argc; if ((args = scanargs(state.argc, state.argv, state.argf)) < 0) return 1; state.readonly = 0; state.init = 1; if (state.base) state.readstate = 0; if (state.compileonly) { state.forceread = 1; state.virtualdot = 0; } /* * tone down the bootstrap noise */ if ((trace = error_info.trace) == -1) error_info.trace = 0; /* * check explicit environment overrides */ if (s = colonlist(tmp, external.import, 1, ' ')) { tok = tokopen(s, 1); while (s = tokread(tok)) { if (i = *s == '!') s++; if (v = getvar(s)) { if (i) v->property &= ~V_import; else v->property |= V_readonly; } } tokclose(tok); } /* * set up the traps */ inittrap(); /* * announce the version */ if (error_info.trace < 0) { errno = 0; error(error_info.trace, "%s [%d %s]", version, state.pid, timestr(state.start)); } /* * initialize the views */ state.global = 0; state.user = 1; initview(); /* * check for mam */ if (state.mam.out) { if (!state.mam.statix || *state.mam.label) error_info.write = mamerror; if (state.mam.regress || state.regress) { sfprintf(state.mam.out, "%sinfo mam %s %05d\n", state.mam.label, state.mam.type, state.mam.parent); if (state.mam.regress) sfprintf(state.mam.out, "%sinfo start regression\n", state.mam.label); } else { sfprintf(state.mam.out, "%sinfo mam %s %05d 1994-07-17 %s\n", state.mam.label, state.mam.type, state.mam.parent, version); if (!state.mam.statix || *state.mam.label) { sfprintf(state.mam.out, "%sinfo start %lu\n", state.mam.label, CURTIME); if (!state.mam.root || streq(state.mam.root, internal.pwd)) sfprintf(state.mam.out, "%sinfo pwd %s\n", state.mam.label, internal.pwd); else sfprintf(state.mam.out, "%sinfo pwd %s %s\n", state.mam.label, state.mam.root, mamname(makerule(internal.pwd))); buf = sfstrbase(tmp); if (state.fsview && !mount(NiL, buf, FS3D_GET|FS3D_ALL|FS3D_SIZE(sfstrsize(tmp)), NiL)) sfprintf(state.mam.out, "%sinfo view %s\n", state.mam.label, buf); } } } /* * read the dynamic initialization script */ if ((i = error_info.trace) > -20) error_info.trace = 0; sfputr(tmp, initdynamic, -1); parse(NiL, sfstruse(tmp), "initdynamic", NiL); error_info.trace = i; state.user = 0; state.init = 0; /* * read the explicit makefiles * readfile() handles the base and global rules * * NOTE: internal.tmplist is used to handle the effects of * load() on internal list pointers */ compref(NiL, 0); if (p = internal.makefiles->prereqs) { p = internal.tmplist->prereqs = listcopy(p); for (; p; p = p->next) readfile(p->rule->name, COMP_FILE, NiL); freelist(internal.tmplist->prereqs); internal.tmplist->prereqs = 0; } /* * if no explicit makefiles then try external.{convert,files} */ if (!state.makefile) { int sep; Sfio_t* exp; Sfio_t* imp; exp = 0; imp = sfstropen(); sep = 0; s = 0; if (*(t = getval(external.convert, VAL_PRIMARY))) { sfputr(tmp, t, 0); sfstrrsrv(tmp, MAXNAME); tok = tokopen(sfstrbase(tmp), 0); while (s = tokread(tok)) { if (!exp) exp = sfstropen(); if (t = colonlist(exp, s, 0, ' ')) { t = tokopen(t, 0); while (s = tokread(t)) { if (readfile(s, COMP_INCLUDE|COMP_DONTCARE, NiL)) break; if (sep) sfputc(imp, ','); else sep = 1; sfputr(imp, s, -1); } tokclose(t); if (s) break; } if (!(s = tokread(tok))) break; } tokclose(tok); sfstrseek(tmp, 0, SEEK_SET); } if (!s && (s = colonlist(tmp, external.files, 1, ' '))) { tok = tokopen(s, 1); while (s = tokread(tok)) { if (readfile(s, COMP_INCLUDE|COMP_DONTCARE, NiL)) break; if (sep) sfputc(imp, ','); else sep = 1; sfputr(imp, s, -1); } tokclose(tok); } if (!s) { /* * this readfile() pulls in the default base rules * that might resolve any delayed self-documenting * options in optcheck() */ if (readfile("-", COMP_FILE, NiL)) optcheck(1); if (*(s = sfstruse(imp))) error(state.errorid ? 1 : 3, "a makefile must be specified when %s omitted", s); else error(state.errorid ? 1 : 3, "a makefile must be specified"); } sfstrclose(imp); if (exp) sfstrclose(exp); } /* * validate external command line options */ optcheck(1); /* * check for listing of variable and rule definitions */ if (state.list) { dump(sfstdout, 0); return 0; } /* * check if makefiles to be compiled */ if (state.compile && !state.virtualdot && state.writeobject) { /* * make the compinit trap */ if (r = getrule(external.compinit)) { state.reading = 1; maketop(r, P_dontcare|P_foreground, NiL); state.reading = 0; } if (state.exec && state.objectfile) { message((-2, "compiling makefile input into %s", state.objectfile)); compile(state.objectfile, NiL); } /* * make the compdone trap */ if (r = getrule(external.compdone)) { state.reading = 1; maketop(r, P_dontcare|P_foreground, NiL); state.reading = 0; } } /* * makefile read cleanup */ if (state.compileonly) return 0; compref(NiL, 0); sfstrclose(tmp); state.compile = COMPILED; if (state.believe) { if (!state.maxview) state.believe = 0; else if (state.fsview) error(3, "%s: option currently works in 2d only", optflag(OPT_believe)->name); } /* * read the state file */ readstate(); /* * place the command line targets in internal.args */ if (internal.main->dynamic & D_dynamic) dynamic(internal.main); internal.args->prereqs = p = 0; for (i = args; i < state.argc; i++) if (state.argf[i] & ARG_TARGET) { List_t* q; q = cons(makerule(state.argv[i]), NiL); if (p) p = p->next = q; else internal.args->prereqs = p = q; } /* * the engine bootstrap is complete -- start user activities */ state.user = 1; /* * make the makeinit trap */ if (r = getrule(external.makeinit)) maketop(r, P_dontcare|P_foreground, NiL); /* * read the command line scripts */ for (i = args; i < state.argc; i++) if (state.argf[i] & ARG_SCRIPT) { state.reading = 1; parse(NiL, state.argv[i], "command line script", NiL); state.reading = 0; } /* * freeze the parameter files and candidate state variables */ state.user = 2; candidates(); /* * make the init trap */ if (r = getrule(external.init)) maketop(r, P_dontcare|P_foreground, NiL); /* * internal.args default to internal.main */ if (!internal.args->prereqs && internal.main->prereqs) internal.args->prereqs = listcopy(internal.main->prereqs); /* * turn up the volume again */ if (!error_info.trace) error_info.trace = trace; /* * make the prerequisites of internal.args */ if (internal.args->prereqs) while (internal.args->prereqs) { /* * we explicitly allow internal.args modifications */ r = internal.args->prereqs->rule; internal.making->prereqs = internal.args->prereqs; internal.args->prereqs = internal.args->prereqs->next; internal.making->prereqs->next = 0; maketop(r, 0, NiL); } else if (state.makefile) error(3, "%s: a main target must be specified", state.makefile); /* * finish up */ finish(0); return 0; }
vector<char> Chromosome::GRCsolution(double badConnectionWeight) { //ofstream output; //output.open(string("results/") + to_string(time(0)) + "_GRCsolution.csv"); // Chromosome::gcrCalls++; int size = _nodeList.size(); vector<char> solution(size); int firstPosition = rand() % size; solution[firstPosition] = 1; //swap half of the bits for (int selectedCount = 1; selectedCount < size / 2; selectedCount++){ int toSelect = size - selectedCount; vector<Candidate> candidates(toSelect); int canId = 0; // candidate id for the array, may be faster than vectors for (int i = 0; i < size; ++i) { if (solution[i] == 0) { int connections = 0; int connectionsBad = 0; for (auto link : _nodeList[i]._links) { if (solution[link] == 1) { connections++; } else { connectionsBad++; } } candidates[canId]._connections = connections; candidates[canId]._connectionsBad = connectionsBad; candidates[canId]._score = (1-badConnectionWeight) * connections - badConnectionWeight * connectionsBad; candidates[canId]._id = i; canId++; } } sort(candidates.begin(), candidates.end(), [](const Candidate & a, const Candidate & b) {return a._score > b._score; }); double lowestConnectionCount = candidates[0]._score; int partSize; for (partSize = 1; partSize < toSelect; partSize++){ if (lowestConnectionCount > candidates[partSize]._score){ break; } } int addId = rand() % partSize; solution[candidates[addId]._id] = 1; //output << solution << endl; // cout << candidates[addId]._id << " " << candidates[addId]._connections << endl; } //output.close(); return solution; }
vector<int> count_crossings_candidate_list(int point_index, vector<Punto> &candidate_list, vector<Punto> &puntos) { Punto p(0,0); int pos_point_in_tp = 0; int num_cand = candidate_list.size(); int num_pts = puntos.size(); vector<candidato> candidates(num_cand); vector<int> cr_list (num_cand, 0); vector<int> cr_list2 (num_cand, 0); vector<int> cr_list3 (num_cand, 0); vector<int> count_change_of_list (num_cand, 0); vector<int> count_change_cr_for_q (num_cand, 0); int cr2=0; int cr3=0; for(int i=0; i<num_cand; i++) { candidates[i].pt = candidate_list[i]; candidates[i].index =i; cr_list2[i]=0; cr_list3[i]=0; count_change_of_list[i]=0; count_change_cr_for_q[i]=0; } vector<candidato> temp_pts(num_pts-1); //int centro=0; unused variable vector<candidato> united_points(2*num_pts-3+num_cand); for(int centro = 0; centro<num_pts; centro++) { if(centro != point_index) { p = puntos[centro]; for(int i=0; i<centro; i++) temp_pts[i].pt = puntos[i]; for(int i=centro; i<num_pts-1; i++) temp_pts[i].pt = puntos[i+1]; temp_pts=sort_around_point(p,temp_pts); for(int i=0; i<num_pts-1; i++) { temp_pts[i].index=i; temp_pts[i].original = true; temp_pts[i].antipodal = true; } //nis for p vector<int> nis(num_pts-1); int j=0; for(int i=0; i<num_pts-1; i++) { Punto p0 = temp_pts[i].pt; Punto p1 = temp_pts[(j+1)%(num_pts-1)].pt; while((turn(p,p0,p1)<=0) && ((j+1)%(num_pts-1)!=i)) { j++; p0=temp_pts[i].pt; p1=temp_pts[(j+1)%(num_pts-1)].pt; } if((j-i)%(num_pts-1)>=0) nis[i]=(j-i)%(num_pts-1); else nis[i]=(j-i)%(num_pts-1)+num_pts-1; } ///////////aca termina nis for(int i=0; i<num_pts-1; i++) //puse un -1 if(temp_pts[i].pt == puntos[point_index]) pos_point_in_tp=i; //Suma 2 cr2 for(int i=0; i<num_pts-1; i++) if(i!=pos_point_in_tp) cr2=cr2+(nis[i]*(nis[i]-1)/2); /////aca comienza el join_pts_antipodal_candidatelist j=0; for(int i=0; i<pos_point_in_tp; i++) { united_points[j] = temp_pts[i]; j=j+1; united_points[j] = temp_pts[i]; united_points[j].pt.x = 2*p.x-temp_pts[i].pt.x; united_points[j].pt.y = 2*p.y-temp_pts[i].pt.y; united_points[j].antipodal = false; j=j+1; } united_points[j].pt = temp_pts[pos_point_in_tp].pt; j=j+1; for(int i=pos_point_in_tp+1; i<num_pts-1; i++) { united_points[j] = temp_pts[i]; j=j+1; united_points[j] = temp_pts[i]; united_points[j].pt.x = 2*p.x-temp_pts[i].pt.x; united_points[j].pt.y = 2*p.y-temp_pts[i].pt.y; united_points[j].original = true; united_points[j].antipodal = false; j=j+1; } for(int i=0; i<num_cand; i++) { united_points[j] = candidates[i]; j=j+1; } united_points=sort_around_point(p,united_points); ////aca termina el join_pts_antipodal_candidatelist int position_p; for(int i=0; i<2*num_pts-3+num_cand; i++) if(united_points[i].pt == puntos[point_index]) position_p=i; ///// Aca comenzamos el change_of_cr_for_list vector<int> aux_nis(num_pts-1); int count_ni=0; int sum_ni=0; for(int i=0; i<num_pts-1; i++) aux_nis[i]=nis[i]; for(int i=1; i<2*num_pts-3+num_cand; i++) { int pos=(position_p+i)%(2*num_pts-3+num_cand); if (united_points[pos].original) { if (united_points[pos].antipodal) { sum_ni=sum_ni+aux_nis[united_points[pos].index]; aux_nis[united_points[pos].index]=aux_nis[united_points[pos].index]+1; count_ni--; } else { sum_ni=sum_ni-aux_nis[united_points[pos].index]+1; aux_nis[united_points[pos].index]=aux_nis[united_points[pos].index]-1; count_ni++; } } else { cr_list2[united_points[pos].index]=cr_list2[united_points[pos].index]+sum_ni; count_change_of_list[united_points[pos].index]=count_ni; } } /////// aca terminamos el change_of_cr_for_list /////////////////Aca comienza la suma 3 cr3///////////////// cr3=cr3+(nis[pos_point_in_tp]*(nis[pos_point_in_tp]-1)/2); ///// cr_list3 for(int i=0; i<num_cand; i++) cr_list3[i]=cr_list3[i]+(count_change_of_list[i]+nis[pos_point_in_tp])*(count_change_of_list[i]+nis[pos_point_in_tp]-1)/2; //////////////////////////////////fin de la suma 3 } } int total=num_pts*(num_pts-1)*(num_pts-2)*(num_pts-3)/8; for(int i=0; i<num_cand; i++) { cr_list2[i]=cr_list2[i]+cr2; cr_list[i]=cr_list2[i]+2*cr_list3[i]-total; } return cr_list; }