//Complexity : theta(n^3) void maximum_partial(graph g, subgraph max) { int index,i; subgraph forbidden; clock_t time = clock(); for(i=0 ; i<NB_VERTICES ; i++) forbidden[i] = 0; while(!is_maximal(max,g)) { index = vertex_with_less_edges(g,forbidden); forbidden[index] = 1; max[index] = 1; if(!is_desert(max,g)) max[index] = 0; } printf("[maximum_partial/maximum_partial] Execution time : %fs\n",(double)time/CLOCKS_PER_SEC); }
/** * Complete a single match between the pairs list and the suffixtree * @param m the match all ready to go * @param text the text of the new version * @param v the new version id * @param log the log to save errors in * @return 1 if the match was at least 1 char long else 0 */ int match_single( match *m, UChar *text, int v, plugin_log *log, int popped ) { UChar c; // go to the deepest match if not the first one (as usual) while ( m->next != NULL ) m = m->next; pos *loc = &m->loc; // preserve popped location in suffix tree if ( !popped ) { loc->v = suffixtree_root( m->st ); loc->loc = node_start(loc->v)-1; m->maximal = 0; } do { UChar *data = pair_data(card_pair(m->end.current)); c = data[m->end.pos]; if ( suffixtree_advance_pos(m->st,loc,c) ) { if ( m->bs == NULL ) m->bs = bitset_clone( pair_versions(card_pair(m->end.current)) ); if ( !m->maximal && node_is_leaf(loc->v) ) { m->text_off = m->st_off + node_start(loc->v)-m->len; if ( !is_maximal(m,text) ) break; else m->maximal = 1; } // we are already matched, so increase length m->len++; if ( !match_advance(m,loc,v,log) ) break; } else break; } while ( 1 ); return m->maximal; }
void Output<Integer>::write_files() const { const Sublattice_Representation<Integer>& BasisChange = Result->getSublattice(); size_t i, nr; const Matrix<Integer>& Generators = Result->getGeneratorsMatrix(); const Matrix<Integer>& Support_Hyperplanes = Result->getSupportHyperplanesMatrix(); vector<libnormaliz::key_t> rees_ideal_key; if (esp && Result->isComputed(ConeProperty::SupportHyperplanes)) { //write the suport hyperplanes of the full dimensional cone Matrix<Integer> Support_Hyperplanes_Full_Cone = BasisChange.to_sublattice_dual(Support_Hyperplanes); // Support_Hyperplanes_Full_Cone.print(name,"esp"); string esp_string = name+".esp"; const char* esp_file = esp_string.c_str(); ofstream esp_out(esp_file); Support_Hyperplanes_Full_Cone.print(esp_out); esp_out << "inequalities" << endl; if (Result->isComputed(ConeProperty::Grading)) { esp_out << 1 << endl << rank << endl; esp_out << BasisChange.to_sublattice_dual(Result->getGrading()); esp_out << "grading" << endl; } if (Result->isComputed(ConeProperty::Dehomogenization)) { esp_out << 1 << endl << rank << endl; esp_out << BasisChange.to_sublattice_dual(Result->getDehomogenization()); esp_out << "dehomogenization" << endl; } esp_out.close(); } if (tgn) Generators.print(name,"tgn"); if (tri && Result->isComputed(ConeProperty::Triangulation)) { //write triangulation write_tri(); } if (out==true) { //printing .out file string name_open=name+".out"; //preparing output files const char* file=name_open.c_str(); ofstream out(file); // write "header" of the .out file size_t nr_orig_gens = 0; if (lattice_ideal_input) { nr_orig_gens = Result->getNrOriginalMonoidGenerators(); out << nr_orig_gens <<" original generators of the toric ring"<<endl; } if (Result->isComputed(ConeProperty::ModuleGenerators)) { out << Result->getNrModuleGenerators() <<" module generators" << endl; } if (Result->isComputed(ConeProperty::HilbertBasis)) { out << Result->getNrHilbertBasis() <<" Hilbert basis elements" << of_monoid << endl; } if (homogeneous && Result->isComputed(ConeProperty::Deg1Elements)) { out << Result->getNrDeg1Elements() <<" Hilbert basis elements of degree 1"<<endl; } if (Result->isComputed(ConeProperty::IsReesPrimary) && Result->isComputed(ConeProperty::HilbertBasis)) { const Matrix<Integer>& Hilbert_Basis = Result->getHilbertBasisMatrix(); nr = Hilbert_Basis.nr_of_rows(); for (i = 0; i < nr; i++) { if (Hilbert_Basis.read(i,dim-1)==1) { rees_ideal_key.push_back(i); } } out << rees_ideal_key.size() <<" generators of integral closure of the ideal"<<endl; } if (Result->isComputed(ConeProperty::VerticesOfPolyhedron)) { out << Result->getNrVerticesOfPolyhedron() <<" vertices of polyhedron" << endl; } if (Result->isComputed(ConeProperty::ExtremeRays)) { out << Result->getNrExtremeRays() <<" extreme rays" << of_cone << endl; } if(Result->isComputed(ConeProperty::ModuleGeneratorsOverOriginalMonoid)) { out << Result->getNrModuleGeneratorsOverOriginalMonoid() <<" module generators over original monoid" << endl; } if (Result->isComputed(ConeProperty::SupportHyperplanes)) { out << Result->getNrSupportHyperplanes() <<" support hyperplanes" << of_polyhedron << endl; } out<<endl; if (Result->isComputed(ConeProperty::ExcludedFaces)) { out << Result->getNrExcludedFaces() <<" excluded faces"<<endl; out << endl; } out << "embedding dimension = " << dim << endl; if (homogeneous) { out << "rank = "<< rank << is_maximal(rank,dim) << endl; //out << "index E:G = "<< BasisChange.get_index() << endl; out << "external index = "<< BasisChange.getExternalIndex() << endl; } else { // now inhomogeneous case if (Result->isComputed(ConeProperty::AffineDim)) out << "affine dimension of the polyhedron = " << Result->getAffineDim() << is_maximal(Result->getAffineDim(),dim-1) << endl; if (Result->isComputed(ConeProperty::RecessionRank)) out << "rank of recession monoid = " << Result->getRecessionRank() << endl; } if(Result->isComputed(ConeProperty::OriginalMonoidGenerators)){ out << "internal index = " << Result->getIndex() << endl; } if (homogeneous && Result->isComputed(ConeProperty::IsIntegrallyClosed)) { if (Result->isIntegrallyClosed()) { out << "original monoid is integrally closed"<<endl; } else { out << "original monoid is not integrally closed"<<endl; } } out << endl; if (Result->isComputed(ConeProperty::TriangulationSize)) { out << "size of "; if (Result->isTriangulationNested()) out << "nested "; if (Result->isTriangulationPartial()) out << "partial "; out << "triangulation = " << Result->getTriangulationSize() << endl; } if (Result->isComputed(ConeProperty::TriangulationDetSum)) { out << "resulting sum of |det|s = " << Result->getTriangulationDetSum() << endl; } if (Result->isComputed(ConeProperty::TriangulationSize)) { out << endl; } if ( Result->isComputed(ConeProperty::Dehomogenization) ) { out << "dehomogenization:" << endl << Result->getDehomogenization() << endl; } if ( Result->isComputed(ConeProperty::Grading) ) { out << "grading:" << endl << Result->getGrading(); Integer denom = Result->getGradingDenom(); if (denom != 1) { out << "with denominator = " << denom << endl; } out << endl; if (homogeneous && Result->isComputed(ConeProperty::ExtremeRays)) { out << "degrees of extreme rays:"<<endl; map<Integer,long> deg_count; vector<Integer> degs = Result->getExtremeRaysMatrix().MxV(Result->getGrading()); for (i=0; i<degs.size(); ++i) { deg_count[degs[i]/denom]++; } out << deg_count; } } else if (Result->isComputed(ConeProperty::IsDeg1ExtremeRays)) { if ( !Result->isDeg1ExtremeRays() ) { out << "No implicit grading found" << endl; } } out<<endl; if (homogeneous && Result->isComputed(ConeProperty::IsDeg1HilbertBasis) && Result->isDeg1ExtremeRays() ) { if (Result->isDeg1HilbertBasis()) { out << "Hilbert basis elements are of degree 1"; } else { out << "Hilbert basis elements are not of degree 1"; } out<<endl<<endl; } if ( Result->isComputed(ConeProperty::ModuleRank) ) { out << "module rank = "<< Result->getModuleRank() << endl; } if ( Result->isComputed(ConeProperty::Multiplicity) ) { out << "multiplicity = "<< Result->getMultiplicity() << endl; } if ( Result->isComputed(ConeProperty::ModuleRank) || Result->isComputed(ConeProperty::Multiplicity)) { out << endl; } if ( Result->isComputed(ConeProperty::HilbertSeries) ) { const HilbertSeries& HS = Result->getHilbertSeries(); out << "Hilbert series:" << endl << HS.getNum(); map<long, long> HS_Denom = HS.getDenom(); long nr_factors = 0; for (map<long, long>::iterator it = HS_Denom.begin(); it!=HS_Denom.end(); ++it) { nr_factors += it->second; } out << "denominator with " << nr_factors << " factors:" << endl; out << HS.getDenom(); out << endl; if (HS.getShift() != 0) { out << "shift = " << HS.getShift() << endl << endl; } out << "degree of Hilbert Series as rational function = " << HS.getDegreeAsRationalFunction() << endl << endl; long period = HS.getPeriod(); if (period == 1) { out << "Hilbert polynomial:" << endl; out << HS.getHilbertQuasiPolynomial()[0]; out << "with common denominator = "; out << HS.getHilbertQuasiPolynomialDenom(); out << endl<< endl; } else { // output cyclonomic representation out << "Hilbert series with cyclotomic denominator:" << endl; out << HS.getCyclotomicNum(); out << "cyclotomic denominator:" << endl; out << HS.getCyclotomicDenom(); out << endl; // Hilbert quasi-polynomial HS.computeHilbertQuasiPolynomial(); if (HS.isHilbertQuasiPolynomialComputed()) { out<<"Hilbert quasi-polynomial of period " << period << ":" << endl; Matrix<mpz_class> HQP(HS.getHilbertQuasiPolynomial()); HQP.pretty_print(out,true); out<<"with common denominator = "<<HS.getHilbertQuasiPolynomialDenom(); } out << endl << endl; } } if (Result->isComputed(ConeProperty::IsReesPrimary)) { if (Result->isReesPrimary()) { out<<"ideal is primary to the ideal generated by the indeterminates"<<endl; } else { out<<"ideal is not primary to the ideal generated by the indeterminates"<<endl; } if (Result->isComputed(ConeProperty::ReesPrimaryMultiplicity)) { out<<"multiplicity of the ideal = "<<Result->getReesPrimaryMultiplicity()<<endl; } out << endl; } if(Result->isComputed(ConeProperty::ClassGroup)) { vector<Integer> ClassGroup=Result->getClassGroup(); out << "rank of class group = " << ClassGroup[0] << endl; if(ClassGroup.size()==1) out << "class group is free" << endl << endl; else{ ClassGroup.erase(ClassGroup.begin()); out << "finite cyclic summands:" << endl; out << count_in_map<Integer,size_t>(ClassGroup); out << endl; } } out << "***********************************************************************" << endl << endl; if (lattice_ideal_input) { out << nr_orig_gens <<" original generators:"<<endl; Result->getOriginalMonoidGeneratorsMatrix().pretty_print(out); out << endl; } if (Result->isComputed(ConeProperty::ModuleGenerators)) { out << Result->getNrModuleGenerators() <<" module generators:" << endl; Result->getModuleGeneratorsMatrix().pretty_print(out); out << endl; } if ( Result->isComputed(ConeProperty::Deg1Elements) ) { const Matrix<Integer>& Hom = Result->getDeg1ElementsMatrix(); write_matrix_ht1(Hom); nr=Hom.nr_of_rows(); out<<nr<<" Hilbert basis elements of degree 1:"<<endl; Hom.pretty_print(out); out << endl; } if (Result->isComputed(ConeProperty::HilbertBasis)) { const Matrix<Integer>& Hilbert_Basis = Result->getHilbertBasisMatrix(); if(!Result->isComputed(ConeProperty::Deg1Elements)){ nr=Hilbert_Basis.nr_of_rows(); out << nr << " Hilbert basis elements" << of_monoid << ":" << endl; Hilbert_Basis.pretty_print(out); out << endl; } else { nr=Hilbert_Basis.nr_of_rows()-Result->getNrDeg1Elements(); out << nr << " further Hilbert basis elements" << of_monoid << " of higher degree:" << endl; Matrix<Integer> HighDeg(nr,dim); for(size_t i=0;i<nr;++i) HighDeg[i]=Hilbert_Basis[i+Result->getNrDeg1Elements()]; HighDeg.pretty_print(out); out << endl; } Matrix<Integer> complete_Hilbert_Basis(0,dim); if (gen || egn || typ) { // for these files we append the module generators if there are any if (Result->isComputed(ConeProperty::ModuleGenerators)) { complete_Hilbert_Basis.append(Hilbert_Basis); complete_Hilbert_Basis.append(Result->getModuleGeneratorsMatrix()); write_matrix_gen(complete_Hilbert_Basis); } else { write_matrix_gen(Hilbert_Basis); } } if (egn || typ) { Matrix<Integer> Hilbert_Basis_Full_Cone = BasisChange.to_sublattice(Hilbert_Basis); if (Result->isComputed(ConeProperty::ModuleGenerators)) { Hilbert_Basis_Full_Cone.append(BasisChange.to_sublattice(Result->getModuleGeneratorsMatrix())); } if (egn) { string egn_string = name+".egn"; const char* egn_file = egn_string.c_str(); ofstream egn_out(egn_file); Hilbert_Basis_Full_Cone.print(egn_out); // egn_out<<"cone"<<endl; egn_out.close(); } if (typ && homogeneous) { write_matrix_typ(Hilbert_Basis_Full_Cone.multiplication(BasisChange.to_sublattice_dual(Support_Hyperplanes).transpose())); } } if (Result->isComputed(ConeProperty::IsReesPrimary)) { out << rees_ideal_key.size() <<" generators of integral closure of the ideal:"<<endl; Matrix<Integer> Ideal_Gens = Hilbert_Basis.submatrix(rees_ideal_key); Ideal_Gens.resize_columns(dim-1); Ideal_Gens.pretty_print(out); out << endl; } } if (Result->isComputed(ConeProperty::VerticesOfPolyhedron)) { out << Result->getNrVerticesOfPolyhedron() <<" vertices of polyhedron:" << endl; Result->getVerticesOfPolyhedronMatrix().pretty_print(out); out << endl; } if (Result->isComputed(ConeProperty::ExtremeRays)) { out << Result->getNrExtremeRays() << " extreme rays" << of_cone << ":" << endl; Result->getExtremeRaysMatrix().pretty_print(out); out << endl; if (ext) { // for the .gen file we append the vertices of polyhedron if there are any if (Result->isComputed(ConeProperty::VerticesOfPolyhedron)) { Matrix<Integer> Extreme_Rays(Result->getExtremeRaysMatrix()); Extreme_Rays.append(Result->getVerticesOfPolyhedronMatrix()); write_matrix_ext(Extreme_Rays); } else { write_matrix_ext(Result->getExtremeRaysMatrix()); } } } if(Result->isComputed(ConeProperty::ModuleGeneratorsOverOriginalMonoid)) { out << Result->getNrModuleGeneratorsOverOriginalMonoid() <<" module generators over original monoid:" << endl; Result->getModuleGeneratorsOverOriginalMonoidMatrix().pretty_print(out); out << endl; if(mod) write_matrix_mod(Result->getModuleGeneratorsOverOriginalMonoidMatrix()); } //write constrains (support hyperplanes, congruences, equations) if (Result->isComputed(ConeProperty::SupportHyperplanes)) { out << Support_Hyperplanes.nr_of_rows() <<" support hyperplanes" << of_polyhedron << ":" << endl; Support_Hyperplanes.pretty_print(out); out << endl; } if (Result->isComputed(ConeProperty::ExtremeRays)) { //equations const Matrix<Integer>& Equations = BasisChange.getEquationsMatrix(); size_t nr_of_equ = Equations.nr_of_rows(); if (nr_of_equ > 0) { out << nr_of_equ <<" equations:" <<endl; Equations.pretty_print(out); out << endl; } //congruences const Matrix<Integer>& Congruences = BasisChange.getCongruencesMatrix(); size_t nr_of_cong = Congruences.nr_of_rows(); if (nr_of_cong > 0) { out << nr_of_cong <<" congruences:" <<endl; Congruences.pretty_print(out); out << endl; } //lattice const Matrix<Integer>& LatticeBasis = BasisChange.getEmbeddingMatrix(); size_t nr_of_latt = LatticeBasis.nr_of_rows(); if (nr_of_latt < dim || BasisChange.getExternalIndex()!=1) { out << nr_of_latt <<" basis elements of lattice:" <<endl; LatticeBasis.pretty_print(out); out << endl; } if(lat) write_matrix_lat(LatticeBasis); //excluded faces if (Result->isComputed(ConeProperty::ExcludedFaces)) { const Matrix<Integer>& ExFaces = Result->getExcludedFacesMatrix(); out << ExFaces.nr_of_rows() <<" excluded faces:" <<endl; ExFaces.pretty_print(out); out << endl; } if(cst) { string cst_string = name+".cst"; const char* cst_file = cst_string.c_str(); ofstream cst_out(cst_file); Support_Hyperplanes.print(cst_out); cst_out<<"inequalities"<<endl; Equations.print(cst_out); cst_out<<"equations"<<endl; Congruences.print(cst_out); cst_out<<"congruences"<<endl; if (Result->isComputed(ConeProperty::ExcludedFaces)) { Result->getExcludedFacesMatrix().print(cst_out); cst_out<<"excluded_faces"<<endl; } if (Result->isComputed(ConeProperty::Grading)) { cst_out << 1 << endl << dim << endl; cst_out << Result->getGrading(); cst_out << "grading" << endl; } if (Result->isComputed(ConeProperty::Dehomogenization)) { cst_out << 1 << endl << dim << endl; cst_out << Result->getDehomogenization(); cst_out << "dehomogenization" << endl; } cst_out.close(); } } out.close(); } write_inv_file(); write_Stanley_dec(); }
/* * sub_weighted_all() * * Recursion function for searching for all cliques of given weight. * * table - subset of vertices of graph g * size - size of table * weight - total weight of vertices in table * current_weight - weight of clique found so far * prune_low - ignore all cliques with weight less or equal to this value * (often heaviest clique found so far) (passed through) * prune_high - maximum weight possible for clique in this subgraph * (passed through) * min_size - minimum weight of cliques to search for (passed through) * Must be greater than 0. * max_size - maximum weight of cliques to search for (passed through) * If no upper limit is desired, use eg. INT_MAX * maximal - search only for maximal cliques * g - the graph * opts - storage options * * All cliques of suitable weight found are stored according to opts. * * Returns weight of heaviest clique found (prune_low if a heavier clique * hasn't been found); if a clique with weight at least min_size is found * then min_size-1 is returned. If clique storage failed, -1 is returned. * * The largest clique found smaller than max_weight is stored in * best_clique, if non-NULL. * * Uses current_clique to store the currently-being-searched clique. * clique_size[] for all values in table must be defined and correct, * otherwise inaccurate results may occur. * * To search for a single maximum clique, use min_weight==max_weight==INT_MAX, * with best_clique non-NULL. To search for a single given-weight clique, * use opts->clique_list and opts->user_function=false_function. When * searching for all cliques, min_weight should be given the minimum weight * desired. */ static int sub_weighted_all(int *table, int size, int weight, int current_weight, int prune_low, int prune_high, int min_weight, int max_weight, boolean maximal, graph_t *g, clique_options *opts) { int i; int v,w; int *newtable; int *p1, *p2; int newweight; if (current_weight >= min_weight) { if ((current_weight <= max_weight) && ((!maximal) || is_maximal(current_clique,g))) { /* We've found one. Store it. */ if (!store_clique(current_clique,g,opts)) { return -1; } } if (current_weight >= max_weight) { /* Clique too heavy. */ return min_weight-1; } } if (size <= 0) { /* current_weight < min_weight, prune_low < min_weight, * so return value is always < min_weight. */ if (current_weight>prune_low) { if (best_clique) set_copy(best_clique,current_clique); if (current_weight < min_weight) return current_weight; else return min_weight-1; } else { return prune_low; } } /* Dynamic memory allocation with cache */ if (temp_count) { temp_count--; newtable=temp_list[temp_count]; } else { newtable=malloc(g->n * sizeof(int)); } for (i = size-1; i >= 0; i--) { v = table[i]; if (current_weight+clique_size[v] <= prune_low) { /* Dealing with subset without heavy enough clique. */ break; } if (current_weight+weight <= prune_low) { /* Even if all elements are added, won't do. */ break; } /* Very ugly code, but works faster than "for (i=...)" */ p1 = newtable; newweight = 0; for (p2=table; p2 < table+i; p2++) { w = *p2; if (GRAPH_IS_EDGE(g, v, w)) { *p1 = w; newweight += g->weights[w]; p1++; } } w=g->weights[v]; weight-=w; /* Avoid a few unneccessary loops */ if (current_weight+w+newweight <= prune_low) { continue; } SET_ADD_ELEMENT(current_clique,v); prune_low=sub_weighted_all(newtable,p1-newtable, newweight, current_weight+w, prune_low,prune_high, min_weight,max_weight,maximal, g,opts); SET_DEL_ELEMENT(current_clique,v); if ((prune_low<0) || (prune_low>=prune_high)) { /* Impossible to find larger clique. */ break; } } temp_list[temp_count++]=newtable; return prune_low; }
/* * sub_unweighted_all() * * Recursion function for searching for all cliques of given size. * * table - subset of vertices of graph g * size - size of table * min_size - minimum size of cliques to search for (decreased with * every recursion) * max_size - maximum size of cliques to search for (decreased with * every recursion). If no upper limit is desired, use * eg. INT_MAX * maximal - require cliques to be maximal (passed through) * g - the graph * opts - storage options * * All cliques of suitable size found are stored according to opts. * * Returns the number of cliques found. If user_function returns FALSE, * then the number of cliques is returned negative. * * Uses current_clique to store the currently-being-searched clique. * clique_size[] for all values in table must be defined and correct, * otherwise inaccurate results may occur. */ static int sub_unweighted_all(int *table, int size, int min_size, int max_size, boolean maximal, graph_t *g, clique_options *opts) { int i; int v; int n; int *newtable; int *p1, *p2; int count=0; /* Amount of cliques found */ if (min_size <= 0) { if ((!maximal) || is_maximal(current_clique,g)) { /* We've found one. Store it. */ count++; if (!store_clique(current_clique,g,opts)) { return -count; } } if (max_size <= 0) { /* If we add another element, size will be too big. */ return count; } } if (size < min_size) { return count; } /* Dynamic memory allocation with cache */ if (temp_count) { temp_count--; newtable=temp_list[temp_count]; } else { newtable=malloc(g->n * sizeof(int)); } for (i=size-1; i>=0; i--) { v = table[i]; if (clique_size[v] < min_size) { break; } if (i+1 < min_size) { break; } /* Very ugly code, but works faster than "for (i=...)" */ p1 = newtable; for (p2=table; p2 < table+i; p2++) { int w = *p2; if (GRAPH_IS_EDGE(g, v, w)) { *p1 = w; p1++; } } /* Avoid unneccessary loops (next size == p1-newtable) */ if (p1-newtable < min_size-1) { continue; } SET_ADD_ELEMENT(current_clique,v); n=sub_unweighted_all(newtable,p1-newtable, min_size-1,max_size-1,maximal,g,opts); SET_DEL_ELEMENT(current_clique,v); if (n < 0) { /* Abort. */ count -= n; count = -count; break; } count+=n; } temp_list[temp_count++]=newtable; return count; }
/** \brief Return true if l is eligible for paramodulation. */ bool clause::is_eligible_for_paramodulation(order & o, literal const & l, unsigned offset, substitution * s) const { return !has_sel_lit() && is_maximal(o, l, offset, s); }
/** \brief Return true if l is eligible for resolution. */ bool clause::is_eligible_for_resolution(order & o, literal const & l, unsigned offset, substitution * s) const { if (has_sel_lit()) return is_sel_maximal(o, l, offset, s); else return is_maximal(o, l, offset, s); }