Hypergraph FKAlgorithmA::transversal (const Hypergraph& H) const {
        BOOST_LOG_TRIVIAL(debug) << "Starting FKA. Hypergraph has "
                                 << H.num_verts() << " vertices and "
                                 << H.num_edges() << " edges.";
        Hypergraph G (H.num_verts());

        Hypergraph Hmin = H.minimization();
        Hypergraph::Edge V = Hmin.verts_covered();

        bool still_searching_for_transversals = true;
        while (still_searching_for_transversals) {
            Hypergraph::Edge omit_set = find_omit_set(Hmin, G);

            if (omit_set.none() and G.num_edges() > 0) {
                BOOST_LOG_TRIVIAL(debug) << "Received empty omit_set, so we're done.";
                still_searching_for_transversals = false;
            } else {
                Hypergraph::Edge new_hs = V - omit_set;
                Hypergraph::Edge new_mhs = minimize_new_hs(H, G, new_hs);
                BOOST_LOG_TRIVIAL(trace) << "Received witness."
                                         << "\nomit_set:\t" << omit_set
                                         << "\nMHS:\t\t" << new_mhs;
                G.add_edge(new_mhs, true);
                BOOST_LOG_TRIVIAL(debug) << "New G size: " << G.num_edges();
            }
        }

        return G;
    }
    Hypergraph::Edge ParBMAlgorithm::minimize_new_hs (const Hypergraph& H,
                                                      Hypergraph::Edge new_hs) {
        /**
           Given a hypergraph H and a new hitting set new_hs, find an
           inclusion-minimal subset of new_hs which is still a hitting
           set of H.
        **/

        BOOST_LOG_TRIVIAL(trace) << "Attempting minimization of"
                                 << "\nS:\t" << new_hs;

        // Input validation
        assert(H.is_transversed_by(new_hs));

        // Iterate through the vertices of new_hs, checking whether they can be
        // removed
        Hypergraph::EdgeIndex v = new_hs.find_first();
        while (v != Hypergraph::Edge::npos) {
            new_hs.reset(v);
            if (not H.is_transversed_by(new_hs)) {
                new_hs.set(v);
            }

            v = new_hs.find_next(v);
        }

        BOOST_LOG_TRIVIAL(trace) << "Minimized to:"
                                 << "\nS:\t" << new_hs;

        return new_hs;
    }
Exemplo n.º 3
0
int main()
{
	Hypergraph H;

	H.readBenchHypergraph("c17.bench");

	HypergraphAttributesES HA(H, EdgeStandardType::tree);
	HypergraphLayoutES hlES;

	hlES.setProfile(HypergraphLayoutES::Normal);
	hlES.call(HA);

	GraphIO::writeGML(HA.repGA(), "c17.gml");

	return 0;
}
    Hypergraph ParBMAlgorithm::l5_full_cover (const Hypergraph& H,
                                              const Hypergraph::Edge& base_transversal) {
        /**
           Find a full cover of the dual of H from the given base_transversal
           in accordance with lemma 5 of BM.
        **/
        Hypergraph C (H.num_verts());
        Hypergraph::Edge V = H.verts_covered();

        C.add_edge(base_transversal, false);

        Hypergraph::EdgeIndex i = base_transversal.find_first();
        while (i != Hypergraph::Edge::npos) {
            V.reset(i);
            C.add_edge(V, false);
            V.set(i);
            i = base_transversal.find_next(i);
        }

        return C;
    }
    Hypergraph ParBMAlgorithm::l4_full_cover (const Hypergraph& H,
                                              const Hypergraph::Edge& base_edge) {
        /**
           Find a full cover of the dual of H from the given base_edge
           in accordance with lemma 4 of BM.
        **/
        Hypergraph C (H.num_verts());
        Hypergraph::Edge V = H.verts_covered();

        for (auto& edge: H) {
            Hypergraph::Edge intersection = edge & base_edge;
            Hypergraph::EdgeIndex i = intersection.find_first();
            while (i != Hypergraph::Edge::npos) {
                Hypergraph::Edge new_edge = V - edge;
                new_edge.set(i);
                C.add_edge(new_edge, false);
                i = intersection.find_next(i);
            }
        }

        return C;
    }
    Hypergraph::Edge ParBMAlgorithm::find_subset_edge (const Hypergraph& H,
                                                       const Hypergraph::Edge& I) {
        /**
           Search for an edge of H which is a subset of I.
           If found, return it; if not, return an empty edge as a signal.
        **/

        for (auto& edge: H) {
            if (edge.is_subset_of(I)) {
                return edge;
            }
        }

        // If we make it this far, no edge was found
        Hypergraph::Edge empty_edge (H.num_verts());
        return empty_edge;
    }
    Hypergraph::Edge ParBMAlgorithm::find_missed_edge (const Hypergraph& H,
                                                       const Hypergraph::Edge& I) {
        /**
           Search for an edge of H which does not intersect I.
           If found, return it; if not, return an empty edge as a signal.
        **/

        for (auto& edge: H) {
            if (not edge.intersects(I)) {
                return edge;
            }
        }

        // If we make it this far, no edge was found
        Hypergraph::Edge empty_edge (H.num_verts());
        return empty_edge;
    }
    void ParBMAlgorithm::find_new_hses_fork (const Hypergraph& H,
                                             const Hypergraph& G,
                                             const Hypergraph& C,
                                             Hypergraph::EdgeQueue& results) const {
        /**
           Find any new hitting sets of H with respect to G,
           splitting the work over the full cover C of Tr(H) and queueing
           the results.
        **/

        Hypergraph::Edge V = C.verts_covered();

        for (auto c: C) {
#pragma omp task shared(H, G, results)
            {
                assert(c.is_subset_of(V));
                find_new_hses(H, G, c, results);
            }
        }
#pragma omp taskwait
    }
Exemplo n.º 9
0
int main(int argc, char** argv) {
	po::variables_map cfg;
	if (!init_params(argc,argv,&cfg)) return 1;

	if (cfg.count("random_seed"))
		rng.reset(new MT19937(cfg["random_seed"].as<uint32_t>()));
	else
		rng.reset(new MT19937);

	// set variables
	lr = cfg["learningrate"].as<double>();
	hope_select = cfg["hope"].as<int>();
	fear_select = cfg["fear"].as<int>();
	optimizer = cfg["optimizer"].as<int>();
	freeze = cfg.count("freeze");
	if (freeze) {
		const vector<string>& ffstrs = cfg["freeze"].as<vector<string> >();
		stringstream ffss;
		ffss << "frozen features: ";
		for (vector<string>::const_iterator ffit=ffstrs.begin();ffit!=ffstrs.end();++ffit) {
			frozen_features.push_back(FD::Convert(*ffit));
			ffss << *ffit << " ";
		}
		cerr << ffss.str() << endl;
	}
	scaling = cfg["scaling"].as<int>();
	scalingfactor = cfg["scalingfactor"].as<double>();
	cerr << "scaling="<< scaling << " scalingfactor=" << scalingfactor << endl;

	// setup decoder
	Decoder* decoder = setupDecoder(cfg);
	if (!decoder) {
		cerr << "error while loading decoder with" << cfg["decoder_config"].as<string>() << "!\n";
		return 1;
	}
	TrainingObserver observer;
	// get reference to decoder weights
	vector<weight_t>& decoder_weights = decoder->CurrentWeightVector();
	// the SMT weights (to be optimized)
	if (cfg.count("weights")) {
		Weights::InitFromFile(cfg["weights"].as<string>(), &decoder_weights);
		Weights::InitSparseVector(decoder_weights, &w);
	} else {
		cerr << "starting with EMPTY weights!\n";
	}
	// the weight vector that gives the oracle
	loadRelevanceWeights(cfg["rweights"].as<string>(), relw);
	negrelw -= relw;
	relw_scaled = relw;
	// initial scaling
	if (scaling != 0) scaleRelevanceWeights(scalingfactor);

	// output some vector stats
	cerr << "W_REL=" << relw << endl;
	cerr << "W_REL_SCALED=" << relw_scaled << endl;
	cerr << "|W_REL|=" << relw_scaled.size() << endl;
	cerr << "|W_SMT|=" << w.size() << endl;

	cerr << "hope selection: " << hope_select << endl;
	const string input = decoder->GetConf()["input"].as<string>();
	cerr << "Reading input from " << ((input == "-") ? "STDIN" : input.c_str()) << endl;
	ReadFile in_read(input);
	istream *in = in_read.stream();
	assert(*in);
	string id, sentence;
	int cur_sent = 0;
	unsigned lc = 0; // line count

	double objective=0;
	double tot_loss = 0;
	WeightVector avg_w = w;
	//SparseVector<double> tot;
	//SparseVector<double> oldw = w;
	//tot.clear();
	//tot += w;

	while(*in >> id) {

		in->ignore(1, '\t');
		getline(*in, sentence);
		if (sentence.empty() || id.empty()) continue;

		cerr << "\nID="<<id << endl;
		decoder->SetId(cur_sent);
		decoder->Decode(sentence, &observer); // decode with decoder_weights
		cur_sent = observer.GetCurrentSent();
		Hypergraph hg = observer.GetCurrentForest();

		vector<boost::shared_ptr<HypothesisInfo> > S;
		MAX_REL = std::numeric_limits<double>::lowest();
		MIN_REL = std::numeric_limits<double>::max();

		// get viterbi
		boost::shared_ptr<HypothesisInfo> viterbi = MakeHypothesisInfo(hg);

		// get the true oracle (sets max_rel)
		hg.Reweight(relw);
		boost::shared_ptr<HypothesisInfo> oracle = MakeHypothesisInfo(hg);
		oracle->oracle = oracle;
		oracle->computeCost();

		// get the worst derivation (to get min_rel)
		hg.Reweight(negrelw);
		boost::shared_ptr<HypothesisInfo> worst = MakeHypothesisInfo(hg);
		worst->oracle = oracle;
		worst->computeCost();

		if (hope_select == 1) { // hope
			hg.Reweight(w + relw_scaled);
			S.push_back(MakeHypothesisInfo(hg));
			S[0]->oracle = oracle;
			S[0]->computeCost();
		} else { // true oracle
			S.push_back(oracle);
		}
		// S contains now ONE (hope/oracle) hypothesis
		S[0]->computeLoss();
		boost::shared_ptr<HypothesisInfo> good = S[0];

		viterbi->oracle = oracle;
		viterbi->computeCost();
		viterbi->computeLoss();

		cerr << "min_rel=" << MIN_REL << " max_rel=" << MAX_REL << endl;
		cerr << "S[0]=" << S[0] << endl;

		boost::shared_ptr<HypothesisInfo> fear;

		if (optimizer == 4) { // PA update (single dual coordinate step)
			cerr << "PA MIRA (single dual coordinate step)\n";

			hg.Reweight(w - relw_scaled);
			fear = MakeHypothesisInfo(hg);
			fear->oracle = oracle;
			fear->computeCost();
			fear->computeLoss();
			cerr << "LOSS: " << fear->loss;
			if (fear->loss > 0.0) {
				double diffsqnorm = (good->features - fear->features).l2norm_sq();
				double delta;
				if (diffsqnorm > 0) {
					delta = fear->loss / (diffsqnorm);
					if (delta > lr) delta = lr;
					w += good->features * delta;
					w -= fear->features * delta;
				}
			}

		} else if (optimizer == 1) {// sgd - nonadapted step size
			cerr << "SGD\n";

			if (fear_select == 1) {
				hg.Reweight(w - relw_scaled);
				fear = MakeHypothesisInfo(hg);
			} else if (fear_select == 2) {
				fear = worst;
			} else if (fear_select == 3) {
				fear = viterbi;
			}
			w += good->features * lr;
			w -= fear->features * lr;

		} else if (optimizer == 2) { // PA MIRA with selection from  cutting plane
			cerr << "PA MIRA with Selection from Cutting Plane\n";

			hg.Reweight(w - relw_scaled);
			fear = MakeHypothesisInfo(hg);
			fear->oracle = oracle;
			fear->computeCost();
			fear->computeLoss();
			if (fear->loss < 0) {
				cerr << "FEAR LOSS < 0! THIS SHOULD NOT HAPPEN!\n";
				abort();
			}
			if (fear->loss > good->loss + SMO_EPS) {
				S.push_back(fear);
				OptimizeSet(S, 1); // only one iteration with a set of two constraints
			} else { cerr << "constraint not violated. fear loss:" << fear->loss << "\n"; }

		} else if (optimizer == 3) { // Cutting Plane MIRA
			cerr << "Cutting Plane MIRA\n";

			unsigned cp_iter=0; // Cutting Plane Iteration
			bool again = true;
			while (again && cp_iter<CP_ITER) {
				again = false;
				cerr << "CuttingPlane: " << cp_iter << endl;
				// find a fear derivation
				hg.Reweight(w - relw_scaled);
				fear = MakeHypothesisInfo(hg);
				fear->oracle = oracle;
				fear->computeCost();
				fear->computeLoss();
				if (fear->loss < 0) {
					cerr << "FEAR LOSS < 0! THIS SHOULD NOT HAPPEN!\n";
					//abort();
				}
				// find max loss hypothesis
				double max_loss_in_set = (*std::max_element(S.begin(), S.end(), lossComp))->loss;
				if (fear->loss > max_loss_in_set + SMO_EPS) {
					cerr << "Adding new fear " << fear << " to S\n";
					S.push_back(fear);
					OptimizeSet(S);
					again = true;
				} else { cerr << "constraint not violated. fear loss:" << fear->loss << "\n"; }
				cp_iter++;
				// update losses
				//for(unsigned i=0;i<S.size();i++) S[i]->computeLoss();
			}
		}

		cerr << "|W|=" << w.size() << endl;
		tot_loss += relscale(viterbi->rel);
		//print objective after this sentence
		//double w_change = (w - oldw).l2norm_sq();
		//double temp_objective = 0.5 * w_change;// + max_step_size * max_fear;
		for(int u=0;u!=S.size();u++) {
			cerr << "alpha=" << S[u]->alpha << " loss=" << S[u]->loss << endl;
			//temp_objective += S[u]->alpha * S[u]->loss;
		}
		//objective += temp_objective;
		//cerr << "SENT OBJ: " << temp_objective << " NEW OBJ: " << objective << endl;

		//tot += w;
		++lc;
		avg_w *= lc;
		avg_w = (w + avg_w) / (lc+1);

		// set decoder weights for next sentence
		decoder_weights.clear();
		w.init_vector(&decoder_weights);
		// rescale relevance weights to balance with new model after the update
		if (scaling == 2) {
			scaleRelevanceWeights(scalingfactor);
			cerr << "W_REL_SCALED=" << relw_scaled << endl;
		}

		// viterbi 2 for debugging
		//hg.Reweight(w);
		//boost::shared_ptr<HypothesisInfo> viterbi2 = MakeHypothesisInfo(hg);
		//viterbi2->oracle = oracle;
		//viterbi2->computeCost();
		//viterbi2->computeLoss();
		//fear->computeLoss();
		//viterbi->computeLoss();
		//good->computeLoss();
		cerr << "FEAR : " << fear << " \n" << TD::GetString(fear->hyp) << endl;
		cerr << "BEST : " << viterbi << " \n" << TD::GetString(viterbi->hyp) << endl;
		//cerr << "BEST2: " << viterbi2 << " \n" << TD::GetString(viterbi2->hyp) << endl;
		cerr << "HOPE : " << good << " \n" << TD::GetString(good->hyp) << endl;

		cout << id << " ||| " << TD::GetString(fear->hyp) << " ||| " << TD::GetString(viterbi->hyp) << " ||| " << TD::GetString(good->hyp) << endl;

		S.clear();
		fear.reset();
		viterbi.reset();
		//viterbi2.reset();
		good.reset();
		worst.reset();
		oracle.reset();

	}

    //cerr << "FINAL OBJECTIVE: "<< objective << endl;
    cerr << "Translated " << lc << " sentences\n";
    cerr << " [AVG METRIC LAST PASS="******"]\n";
    //tot_loss = 0;

	decoder_weights.clear();
	w.init_vector(&decoder_weights);
	//Weights::ShowLargestFeatures(decoder_weights);
	// write weights
	int node_id = rng->next() * 100000;
	cerr << " Writing model to " << node_id << endl;
	ostringstream os;
	os << cfg["weights_output"].as<string>() << "/last." << node_id;
	string msg = "HGMIRA tuned weights ||| " + boost::lexical_cast<std::string>(node_id) + " ||| " + boost::lexical_cast<std::string>(lc);
	Weights::WriteToFile(os.str(), decoder_weights, true, &msg);

	//SparseVector<double> x = tot;
	//x /= lc+1;
	ostringstream sa;
	string msga = "HGMIRA tuned weights AVERAGED ||| " + boost::lexical_cast<std::string>(node_id) + " ||| " + boost::lexical_cast<std::string>(lc);
	sa << cfg["weights_output"].as<string>() << "/avg." << node_id;
	avg_w.init_vector(&decoder_weights);
	Weights::WriteToFile(sa.str(), decoder_weights, true, &msga);


	delete decoder;
	cerr << "\ndone.\n";
	return 0;

}
    Hypergraph::Edge FKAlgorithmA::find_omit_set (const Hypergraph& F,
                                                  const Hypergraph& G) {
        /**
           Test whether F and G are dual.

           If so, return an empty edge as a notification.
           If not, return a omit_set as per FK.
        **/

        BOOST_LOG_TRIVIAL(trace) << "Beginning run with "
                                 << "|F| = " << F.num_edges()
                                 << " and |G| = " << G.num_edges();

        // Input specification
        assert(F.num_verts() == G.num_verts());

        // Create an empty omit_set to use as temporary storage
        Hypergraph::Edge omit_set (F.num_verts());

        // FK step 1: initialize if G is empty
        if (G.num_edges() == 0) {
            BOOST_LOG_TRIVIAL(trace) << "Returning empty omit_set since G is empty.";
            return omit_set;
        }

        // FK step 2: consistency checks
        // Check 1.1: hitting condition
        omit_set = hitting_condition_check(F, G);
        if (omit_set.any()) {
            return omit_set;
        }

        // Check 1.2: same vertices covered
        omit_set = coverage_condition_check(F, G);
        if (omit_set.any()) {
            return omit_set;
        }

        // Check 1.3: neither F nor G has edges too large
        omit_set = edge_size_check(F, G);
        if (omit_set.any()) {
            return omit_set;
        }

        // Check 2.1: satisfiability count condition
        omit_set = satisfiability_count_check(F, G);
        if (omit_set.any()) {
            return omit_set;
        }

        // FK step 3: Check whether F and G are small
        // If either hypergraph is empty, they cannot be dual
        omit_set = small_hypergraphs_check(F, G);
        if (omit_set.any()) {
            return omit_set;
        }

        // FK step 4: Recurse

        // Find the most frequently occurring vertex
        Hypergraph::EdgeIndex max_freq_vert = most_frequent_vertex(F, G);

        // Then we compute the split hypergraphs F0, F1, G0, and G1
        std::pair<Hypergraph, Hypergraph> Fsplit, Gsplit;
        Hypergraph F0, F1, G0, G1;

        Fsplit = split_hypergraph_over_vertex(F, max_freq_vert);
        F0 = Fsplit.first;
        F1 = Fsplit.second;

        Gsplit = split_hypergraph_over_vertex(G, max_freq_vert);
        G0 = Gsplit.first;
        G1 = Gsplit.second;

        // We will also need the unions F0∪F1 and G0∪G1
        Hypergraph Fnew = minimized_union(F0, F1);
        Hypergraph Gnew = minimized_union(G0, G1);

        // And, finally, fire up the two recursions
        if (F1.num_edges() > 0 and Gnew.num_edges() > 0) {
            BOOST_LOG_TRIVIAL(trace) << "Side 1 recursion.";

            Hypergraph::Edge omit_set = find_omit_set(F1, Gnew);
            if (omit_set.any()) {
                return omit_set;
            }
        }

        if (Fnew.num_edges() > 0 and G1.num_edges() > 0) {
            BOOST_LOG_TRIVIAL(trace) << "Side 2 recursion.";

            Hypergraph::Edge omit_set = find_omit_set(Fnew, G1);
            if (omit_set.any()) {
                omit_set.set(max_freq_vert);
                return omit_set;
            }
        }

        // If we make it this far, we did not find a nonempty
        // omit_set, so the pair is dual
        return omit_set;
    }
    Hypergraph ParBMAlgorithm::transversal (const Hypergraph& H) const {
        BOOST_LOG_TRIVIAL(debug) << "Starting BM with " << num_threads
                                 << " threads. Hypergraph has "
                                 << H.num_verts() << " vertices and "
                                 << H.num_edges() << " edges.";

        // Set up inputs
        Hypergraph Hmin = H.minimization();
        Hypergraph G (H.num_verts());
        Hypergraph::Edge V = Hmin.verts_covered();

        // Initialize using any HS we can find
        Hypergraph::Edge first_hs = FKAlgorithm::minimize_new_hs(Hmin, G, V);
        G.add_edge(first_hs);

        // Grow G until it covers all vertices
        bool G_has_full_coverage = false;
        while (not G_has_full_coverage) {
            Hypergraph::Edge new_hs = V - coverage_condition_check(H, G);
            if (new_hs.is_proper_subset_of(V)) {
                Hypergraph::Edge new_mhs = FKAlgorithm::minimize_new_hs(Hmin, G, new_hs);
                G.add_edge(new_mhs);
            } else {
                G_has_full_coverage = true;
            }
        }

        // Apply the BM algorithm repeatedly, generating new transversals
        // until duality is confirmed
        bool still_searching_for_transversals = true;
        Hypergraph::EdgeQueue new_hses, new_mhses;
#pragma omp parallel shared(Hmin, G, new_hses, new_mhses) num_threads(num_threads)
#pragma omp master
        while (still_searching_for_transversals) {
            find_new_hses(Hmin, G, Hmin.verts_covered(), new_hses);

            if (new_hses.size_approx() == 0) {
                still_searching_for_transversals = false;
            } else {
                minimize_new_hses(Hmin, G, new_hses, new_mhses);

                Hypergraph::Edge new_mhs;
                while (new_mhses.try_dequeue(new_mhs)) {
                    // The results will all be inclusion-minimal, but
                    // there may be some overlap. Thus, we try to add
                    // them...
                    try {
                        G.add_edge(new_mhs, true);
                    }
                    // But ignore any minimality_violated_exception
                    // that is thrown.
                    catch (minimality_violated_exception& e) {}
                }
                BOOST_LOG_TRIVIAL(debug) << "New |G|: " << G.num_edges();
            }
        }

        return G;
    }
Exemplo n.º 12
0
bool outputNeeded(OperateOn operateOn, Hypergraph const& h, StateId sid)
{
  return operateOn==kOperateOnOutput || operateOn==kOperateOnInputOutput && !h.outputLabelFollowsInput(sid);
}
Exemplo n.º 13
0
int main(int argc,char** argv)
{
    QApplication a(argc, argv);
    QStringList cmdline_args = QCoreApplication::arguments();
    if(cmdline_args.size() < 5 && !run_demo){
        std::cout << "Not enough arguments." << std::endl;
        std::cout << "Usage: ./hypergraph_filter -original_image -noisy_image -alfa -beta [-demo=yes|no] [-color_image=yes|no]" << std::endl;
        std::cout << "-demo is set to 'yes' by default." << std::endl;
#ifdef WIN32
        system("PAUSE");
#endif
    }else{
        QString original_image_path;
        QString noisy_image_path;
        int alfa = 20;
        int beta = 2;
        bool color_image = false;
        for(int i = 1; i < cmdline_args.size(); i++){
            switch(i){
            case 1:
                original_image_path = cmdline_args.at(i);
                break;
            case 2:
                noisy_image_path = cmdline_args.at(i);
                break;
            case 3:
                alfa = cmdline_args.at(i).toInt();
                break;
            case 4:
                beta = cmdline_args.at(i).toInt();
                break;
            case 5:
                run_demo = false;
                break;
            case 6:
                color_image = true;
                break;
            }
        }

        if(run_demo){
            original_image_path = "test_images/lena.tif";
            noisy_image_path = "test_images/lena_color_512_salt_20.png";
            alfa = 20;
            beta = 2;
            std::cout << "Running demo." << std::endl;
            std::cout << "Noisy image: " << noisy_image_path.toStdString() << std::endl;
            std::cout << "Alfa set to: " << alfa << std::endl;
            std::cout << "Beta set to: " << beta << std::endl;
        }

        QTime timer;
        timer.start();

        Image* original_image = new Image(original_image_path);
        if(!original_image->Loaded()){
            std::cout << "Failed to load image: " << original_image_path.toStdString() << std::endl;
            return -1;
        }

        Image* noisy_image = new Image(noisy_image_path);
        if(!original_image->Loaded()){
            std::cout << "Failed to load image: " << noisy_image_path.toStdString() << std::endl;
            return -1;
        }

        if(noisy_image->Width() != original_image->Width() || original_image->Height() != noisy_image->Height()){
            std::cout << "The images are of different size. Aborting." << std::endl;
            return -1;
        }
        original_image->Display("Original Image");
        noisy_image->Display("Noisy Image");
        if(!color_image){
            double noisy_psnr = CalculatePSNR(original_image,noisy_image);
            double noisy_mae = CalculateMAE(original_image, noisy_image);

            std::cout << "Noisy-Original PSNR: " << noisy_psnr << "%" << std::endl;
            std::cout << "Noisy-Original MAE: " << noisy_mae << "%" << std::endl;

            noisy_image->SetColorChannel(0);
            Hypergraph graph;
            graph.Build(noisy_image,alfa,beta);
            SpernerFilter sperner_filter;
            Image* filtered_image = sperner_filter.Apply(&graph,noisy_image);
            filtered_image->Display("Filtered Image");

            double filtered_psnr = CalculatePSNR(original_image,filtered_image);
            double filtered_mae = CalculateMAE(original_image, filtered_image);

            std::cout << "Filtered-Original PSNR: " << filtered_psnr << "%" << std::endl;
            std::cout << "Filtered-Original MAE: " << filtered_mae << "%" << std::endl;
        }else{
            double red_noisy_psnr = CalculatePSNR(original_image,noisy_image,1);
            double red_noisy_mae = CalculateMAE(original_image, noisy_image,1);

            std::cout << "Red Noisy-Original PSNR: " << red_noisy_psnr << "%" << std::endl;
            std::cout << "Red Noisy-Original MAE: " << red_noisy_mae << "%" << std::endl;

            double green_noisy_psnr = CalculatePSNR(original_image,noisy_image,2);
            double green_noisy_mae = CalculateMAE(original_image, noisy_image,2);

            std::cout << "Green Noisy-Original PSNR: " << green_noisy_psnr << "%" << std::endl;
            std::cout << "Green Noisy-Original MAE: " << green_noisy_mae << "%" << std::endl;

            double blue_noisy_psnr = CalculatePSNR(original_image,noisy_image,3);
            double blue_noisy_mae = CalculateMAE(original_image, noisy_image,3);

            std::cout << "Blue Noisy-Original PSNR: " << blue_noisy_psnr << "%" << std::endl;
            std::cout << "Blue Noisy-Original MAE: " << blue_noisy_mae << "%" << std::endl;

            Image* current_image = noisy_image;
            for(int i = 1; i < 4; i++){
                current_image->SetColorChannel(i);
                Hypergraph graph;
                graph.Build(current_image,alfa,beta);
                SpernerFilter sperner_filter;
                current_image = sperner_filter.Apply(&graph,current_image);
            }

            Image* filtered_image = current_image;
            filtered_image->Display("Filtered Image");

            double red_filtered_psnr = CalculatePSNR(original_image,filtered_image,1);
            double red_filtered_mae = CalculateMAE(original_image, filtered_image,1);
            std::cout << "Red Filtered-Original PSNR: " << red_filtered_psnr << "%" << std::endl;
            std::cout << "Red Filtered-Original MAE: " << red_filtered_mae << "%" << std::endl;

            double green_filtered_psnr = CalculatePSNR(original_image,filtered_image,2);
            double green_filtered_mae = CalculateMAE(original_image, filtered_image,2);
            std::cout << "Green Filtered-Original PSNR: " << green_filtered_psnr << "%" << std::endl;
            std::cout << "Green Filtered-Original MAE: " << green_filtered_mae << "%" << std::endl;

            double blue_filtered_psnr = CalculatePSNR(original_image,filtered_image,3);
            double blue_filtered_mae = CalculateMAE(original_image, filtered_image,3);
            std::cout << "Blue Filtered-Original PSNR: " << blue_filtered_psnr << "%" << std::endl;
            std::cout << "Blue Filtered-Original MAE: " << blue_filtered_mae << "%" << std::endl;


        }
        std::cout << "Finished executing in: " << timer.elapsed() << " ms" << std::endl;
        a.exec();
    }
    return 0;
}
Exemplo n.º 14
0
void Manager::SerializeSearchGraphPB(
    long translationId,
    std::ostream& outputStream) const {
	using namespace hgmert;
  std::map < int, bool > connected;
  std::map < int, int > i2hgnode;
  std::vector< const Hypothesis *> connectedList;
	GetConnectedGraph(&connected, &connectedList);
  connected[ 0 ] = true;
  Hypergraph hg;
	hg.set_is_sorted(false);
	int num_feats = (*m_search->GetHypothesisStacks().back()->begin())->GetScoreBreakdown().size();
	hg.set_num_features(num_feats);
	StaticData::Instance().GetScoreIndexManager().SerializeFeatureNamesToPB(&hg);
	Hypergraph_Node* goal = hg.add_nodes();  // idx=0 goal node must have idx 0
	Hypergraph_Node* source = hg.add_nodes();  // idx=1
	i2hgnode[-1] = 1; // source node
  const std::vector < HypothesisStack* > &hypoStackColl = m_search->GetHypothesisStacks();
  const HypothesisStack &finalStack = *hypoStackColl.back();
  for (std::vector < HypothesisStack* >::const_iterator iterStack = hypoStackColl.begin();
	  iterStack != hypoStackColl.end() ; ++iterStack)
  {
    const HypothesisStack &stack = **iterStack;
    HypothesisStack::const_iterator iterHypo;
		
    for (iterHypo = stack.begin() ; iterHypo != stack.end() ; ++iterHypo)
    {
      const Hypothesis *hypo = *iterHypo;
			bool is_goal = hypo->GetWordsBitmap().IsComplete();
      if (connected.find( hypo->GetId() ) != connected.end())
      {
				int headNodeIdx;
				Hypergraph_Node* headNode = GetHGNode(hypo, &i2hgnode, &hg, &headNodeIdx);
				if (is_goal) {
					Hypergraph_Edge* ge = hg.add_edges();
					ge->set_head_node(0);  // goal
					ge->add_tail_nodes(headNodeIdx);
				  ge->mutable_rule()->add_trg_words("[X,1]");
				}
				Hypergraph_Edge* edge = hg.add_edges();
				SerializeEdgeInfo(hypo, edge);
				edge->set_head_node(headNodeIdx);
				const Hypothesis* prev = hypo->GetPrevHypo();
				int tailNodeIdx = 1; // source
				if (prev)
				  tailNodeIdx = i2hgnode.find(prev->GetId())->second;
				edge->add_tail_nodes(tailNodeIdx);

        const ArcList *arcList = hypo->GetArcList();
        if (arcList != NULL)
        {
          ArcList::const_iterator iterArcList;
          for (iterArcList = arcList->begin() ; iterArcList != arcList->end() ; ++iterArcList)
          {
            const Hypothesis *loserHypo = *iterArcList;
						assert(connected[loserHypo->GetId()]);
						Hypergraph_Edge* edge = hg.add_edges();
						SerializeEdgeInfo(loserHypo, edge);
						edge->set_head_node(headNodeIdx);
						tailNodeIdx = i2hgnode.find(loserHypo->GetPrevHypo()->GetId())->second;
						edge->add_tail_nodes(tailNodeIdx);
          }
        } // end if arcList empty
      } // end if connected
    } // end for iterHypo
  } // end for iterStack
	hg.SerializeToOstream(&outputStream);
}
Exemplo n.º 15
0
int main(int argc, char** argv) {
	po::variables_map cfg;
	if (!init_params(argc,argv,&cfg)) return 1;

	if (cfg.count("random_seed"))
		rng.reset(new MT19937(cfg["random_seed"].as<uint32_t>()));
	else
		rng.reset(new MT19937);


	// setup decoder
	Decoder* decoder = setupDecoder(cfg);
	if (!decoder) {
		cerr << "error while loading decoder with" << cfg["decoder_config"].as<string>() << "!\n";
		return 1;
	}
	TrainingObserver observer;
	// get reference to decoder weights
	vector<weight_t>& decoder_weights = decoder->CurrentWeightVector();
	// setup weights
	WeightVector w, w_hope, w_fear;
	// the SMT weights (to be optimized)
	Weights::InitFromFile(cfg["weights"].as<string>(), &decoder_weights);
	Weights::InitSparseVector(decoder_weights, &w);
	loadWeights(cfg["rweights"].as<string>(), w_hope);
	WeightVector w_inv = w*-1;
	WeightVector w_hope_inv = w_hope*-1;

	//cerr << "W    " << w << endl;
	//cerr << "WINV " << w_inv << endl;
	//cerr << "R    " << w_hope << endl;
	//cerr << "RINV " << w_hope_inv << endl;

	const string input = decoder->GetConf()["input"].as<string>();
	//cerr << "Reading input from " << ((input == "-") ? "STDIN" : input.c_str()) << endl << endl;
	ReadFile in_read(input);
	istream *in = in_read.stream();
	assert(*in);
	string id, sentence;
	std::vector<HypergraphSampler::Hypothesis> samples;

	while(*in >> id) {

		in->ignore(1, '\t');
		getline(*in, sentence);
		if (sentence.empty() || id.empty()) continue;

		//decoder->SetId(id);
		decoder->Decode(sentence, &observer); // decode with decoder_weights
		Hypergraph hg = observer.GetCurrentForest();

		// get max model score
		double max_tscore = ViterbiFeatures(hg).dot(w);
		// get min model score
		hg.Reweight(w_inv);
		double min_tscore = -ViterbiFeatures(hg).dot(w_inv);
		// get max rel score
		hg.Reweight(w_hope);
		double max_rscore = ViterbiFeatures(hg).dot(w_hope);
		// get min rel_score
		hg.Reweight(w_hope_inv);
		double min_rscore = -ViterbiFeatures(hg).dot(w_hope_inv);

		//cerr << max_tscore << " " << min_tscore << " " << max_rscore << " " << min_rscore << endl;

		if (cfg.count("sample")) {

			HypergraphSampler::sample_hypotheses(hg, cfg["sample"].as<int>(), &(*rng), &samples);
			for (unsigned s=0;s<samples.size();++s) {
				const HypergraphSampler::Hypothesis& h = samples[s];
				cout << id << "\t" << "S\t" << vscale(h.fmap.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(h.fmap.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(h.words) << endl;
			}

		} else if (cfg.count("kbest")) {
			typedef KBest::KBestDerivations<vector<WordID>, ESentenceTraversal,KBest::FilterUnique> K;
			// get kbest model score derivations
			hg.Reweight(w);
			K kbest2(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest2.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KBT\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

			// get kworst model score derivations
			hg.Reweight(w_inv);
			K kbest3(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest3.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KWT\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

			// get kbest rel score derivations
			hg.Reweight(w_hope);
			K kbest4(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest4.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KBR\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

			// get kbest model score derivations
			hg.Reweight(w_hope_inv);
			K kbest(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KWR\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

		}


	}

	delete decoder;
	return 0;

}
    void ParBMAlgorithm::find_new_hses (const Hypergraph& H,
                                        const Hypergraph& G,
                                        const Hypergraph::Edge& c,
                                        Hypergraph::EdgeQueue& results) const {
        /**
           Find any new hitting sets of H^c with respect to G_c, queueing the
           results.
        **/

        // Construct the reduced hypergraphs
        Hypergraph Hc = H.contraction(c, false);
        Hypergraph Gc = G.restriction(c);

        BOOST_LOG_TRIVIAL(trace) << "Starting transversal build with |Hc| = " << Hc.num_edges()
                                 << " and |Gc| = " << Gc.num_edges();

        // Initialize a candidate hitting set to store intermediate results
        Hypergraph::Edge new_hs = Hc.verts_covered();

        // Step 1: Initialize Gc if it is empty by returning the set of all vertices
        if (Gc.num_edges() == 0) {
            // If any edge of Hc is empty, this is a dead end
            for (const auto& e: Hc) {
                if (e.none()) {
                    return;
                }
            }

            // Otherwise, the support of Hc is a new hitting set
            results.enqueue(new_hs);
            return;
        }

        // Step 2: Handle |Gc| = 1
        if (Gc.num_edges() == 1) {
            assert(Hc.num_edges() != 0);
            // Check whether H has a singleton for every vertex it covers
            Hypergraph::Edge Hc_verts_to_cover = Gc[0];
            for (const auto& e: Hc) {
                if (e.count() == 1) {
                    Hc_verts_to_cover -= e;
                }
            }

            if (Hc_verts_to_cover.none()) {
                // If so, this is a dead end
                return;
            } else {
                // If not, we choose some vertex that was hit by Gc but was
                // not a singleton in Hc
                Hypergraph::EdgeIndex uncovered_vertex = Hc_verts_to_cover.find_first();
                new_hs.reset(uncovered_vertex);
                results.enqueue(new_hs);
                return;
            }

            // We definitely shouldn't ever be here!
            throw std::runtime_error("invalid state in |Gc|=1 case.");
        }

        // Step 3: Split up subcases using a full cover
        // Construct I according to lemmas 7 and 8
        Hypergraph::Edge I = Gc.vertices_with_degree_above_threshold(0.5);

        // Per lemma 7, look for an edge that does not intersect I
        Hypergraph::Edge missed_edge = find_missed_edge(Hc, I);
        // If found, form a full cover
        if (missed_edge.any()) {
            Hypergraph C = l4_full_cover(Hc, missed_edge);
            find_new_hses_fork(H, G, C, results);
            return;
        }

        // Per lemma 8, look for a transversal that covers I
        Hypergraph::Edge subtrans_edge = find_subset_edge(Gc, I);
        // If found, form a full cover
        if (subtrans_edge.any()) {
            Hypergraph C = l5_full_cover(Hc, subtrans_edge);
            find_new_hses_fork(H, G, C, results);
            return;
        }

        // If we reach this point, I itself is a new HS
        results.enqueue(I);
        return;
    }
Exemplo n.º 17
0
int main(int argc, char ** argv) {
  GOOGLE_PROTOBUF_VERIFY_VERSION;
  google::ParseCommandLineFlags(&argc, &argv, true);

  int sent = 0;
  int last_nonterm_node = -1;
  string name = FLAGS_hypergraph_prefix;
  Hypergraph * h;
  vector <Hypergraph_Node *> nodes(10);
  int cur_edge_id = 0;
  int cur_word_node_id = 0;
  ifstream in(FLAGS_joshua_out_file.c_str(), ios::in | ios::binary);
  Hypergraph_Node * node;
  Hypergraph_Edge * edge;
  int sent_num, length, num_nodes,num_edges;
  //CodedOutputStream::SetTotalBytesLimit(5000000000, 5000000000);
  
  while (in) {
    string blank;
    string t1;
    in >> t1;
    //t = l.strip().split();
    if (t1 == "#SENT:") {
      // flush last sent
      if (sent!= 0) {

        // need to add <s> and </s>
        int subroot = last_nonterm_node;
        int newroot;

        {
          node = h->add_node();
          node->set_label("NEW ROOT");
          edge = node->add_edge();
          Hypergraph_Node * wnode;
          for (int start=0; start < 2; start++) {
            wnode = h->add_node();
            wnode->set_id(cur_word_node_id);
            wnode->set_label("Front");
            wnode->SetExtension(is_word, true);
            wnode->SetExtension(word, "<s>");
            edge->add_tail_node_ids(cur_word_node_id);
            cur_word_node_id++;
          }
          edge->add_tail_node_ids(subroot);
          for (int end=0; end < 2; end++) {
            wnode = h->add_node();
            wnode->set_id(cur_word_node_id);
            wnode->set_label("Back");
            wnode->SetExtension(is_word, true);
            wnode->SetExtension(word, "</s>");
            edge->add_tail_node_ids(cur_word_node_id);
            cur_word_node_id++;
          }
          node->set_id(cur_word_node_id);
          edge->set_id(cur_edge_id);
          cur_edge_id++;
          h->set_root(cur_word_node_id);
          cur_word_node_id++;
        }

        stringstream file_name;
        file_name << name << sent;
        fstream output(file_name.str().c_str(), ios::out | ios::binary);
        h->SerializeToOstream(&output);
        output.close();
      }

      // prep new sent
      h = new Hypergraph();
      cur_edge_id = 0 ;
      sent +=1;
      nodes.clear();

      string buf;
      in >> sent_num >> length >> num_nodes >> num_edges; 
      // need to add nodes for each word
      cur_word_node_id = num_nodes;
      getline(in, buf);

    } else if (t1 == "#I") {
      node = h->add_node();
      int id, left_span, right_span;
      string sym;
      string ig1, ig2, ig3;
      in >> id >> left_span >> right_span >> sym >> ig1 >> ig2 >> ig3;

      stringstream label;
      label << id -1 << " ["<< left_span << ", "<<right_span << "] " <<sym;
      node->set_id(id-1);
      node->set_label(label.str());
      if (nodes.size()<=node->id()) { 
        nodes.resize(node->id()+1);
      }
      nodes[node->id()] = node;
      last_nonterm_node = node->id();      
      cout << "Node id " << node->id() << endl;
    } else {
Exemplo n.º 18
0
DecompositionPtr TreeDecomposer::decompose(const Instance& instance) const
{
	std::unique_ptr<htd::LibraryInstance> htd(htd::createManagementInstance(htd::Id::FIRST));

	// Which algorithm to use?
	if(optEliminationOrdering.getValue() == "min-degree")
		htd->orderingAlgorithmFactory().setConstructionTemplate(new htd::MinDegreeOrderingAlgorithm(htd.get()));
	else {
		assert(optEliminationOrdering.getValue() == "min-fill");
		htd->orderingAlgorithmFactory().setConstructionTemplate(new htd::MinFillOrderingAlgorithm(htd.get()));
	}

	Hypergraph graph = buildNamedHypergraph(*htd, instance);
	std::unique_ptr<htd::TreeDecompositionOptimizationOperation> operation(new htd::TreeDecompositionOptimizationOperation(htd.get()));
	operation->setManagementInstance(htd.get());

	// Add transformation to path decomposition
	if(optPathDecomposition.isUsed())
		operation->addManipulationOperation(new htd::JoinNodeReplacementOperation(htd.get()));

	// Add empty leaves
	if(optNoEmptyLeaves.isUsed() == false)
		operation->addManipulationOperation(new htd::AddEmptyLeavesOperation(htd.get()));

	// Add empty root
	if(optNoEmptyRoot.isUsed() == false)
		operation->addManipulationOperation(new htd::AddEmptyRootOperation(htd.get()));

	// Normalize
	if(optNormalization.getValue() == "semi")
		operation->addManipulationOperation(new htd::SemiNormalizationOperation(htd.get()));
	else if(optNormalization.getValue() == "weak")
		operation->addManipulationOperation(new htd::WeakNormalizationOperation(htd.get()));
	else if(optNormalization.getValue() == "normalized")
		operation->addManipulationOperation(new htd::NormalizationOperation(htd.get()));

	if(optPostJoin.isUsed())
		operation->addManipulationOperation(new htd::AddIdenticalJoinNodeParentOperation(htd.get()));

	// Set up fitness function to find a "good" TD
	FitnessCriterion fitnessCriterion;
	if(optFitnessCriterion.getValue() == "join-bag-avg")
		fitnessCriterion = FitnessCriterion::AVERAGE_JOIN_NODE_BAG_SIZE;
	else if(optFitnessCriterion.getValue() == "join-bag-median")
		fitnessCriterion = FitnessCriterion::MEDIAN_JOIN_NODE_BAG_SIZE;
	else if(optFitnessCriterion.getValue() == "num-joins")
		fitnessCriterion = FitnessCriterion::NUM_JOIN_NODES;
	else {
		assert(optFitnessCriterion.getValue() == "width");
		fitnessCriterion = FitnessCriterion::WIDTH;
	}
	FitnessFunction fitnessFunction(fitnessCriterion);

	std::unique_ptr<htd::ITreeDecompositionAlgorithm> baseAlgorithm(htd->treeDecompositionAlgorithmFactory().createInstance());
	baseAlgorithm->addManipulationOperation(operation.release());
	htd::IterativeImprovementTreeDecompositionAlgorithm algorithm(htd.get(), baseAlgorithm.release(), fitnessFunction);

	int iterationCount = DEFAULT_ITERATION_COUNT;
	if(optIterationCount.isUsed())
		iterationCount = util::strToInt(optIterationCount.getValue(), "Invalid iteration count");
	algorithm.setIterationCount(iterationCount);
	algorithm.setNonImprovementLimit(-1);

	// Compute decomposition
	std::unique_ptr<htd::ITreeDecomposition> decomposition{algorithm.computeDecomposition(graph.internalGraph())};

	// Transform htd's tree decomposition into our format
	DecompositionPtr result = transformTd(*decomposition, graph, app);
	return result;
}