Esempio n. 1
0
	virtual void Task()
	{
		ConLog.Write("Start dump in thread %d!", (int)id);
		const u32 max_value = prog_dial->GetMaxValue(id);
		const u32 shdr_count = ElfType64 ? shdr_arr_64->GetCount() : shdr_arr_32->GetCount();

		for(u32 sh=0, vsize=0; sh<shdr_count; ++sh)
		{
			const u64 sh_size = (ElfType64 ? (*shdr_arr_64)[sh].sh_size : (*shdr_arr_32)[sh].sh_size) / 4;
			const u64 sh_addr = (ElfType64 ? (*shdr_arr_64)[sh].sh_addr : (*shdr_arr_32)[sh].sh_addr);

			u64 d_size = sh_size / cores;
			const u64 s_fix = sh_size % cores;
			if(id <= s_fix) d_size++;

			for(u64 off = id * 4, size=0; size<d_size; vsize++, size++)
			{
				prog_dial->Update(id, vsize,
					wxString::Format("%d thread: %d of %d", (int)id + 1, vsize, max_value));

				disasm->dump_pc = sh_addr + off;
				decoder->Decode(Memory.Read32(disasm->dump_pc));

				arr[id][sh].Add(disasm->last_opcode);

				off += (cores - id) * 4;
				off += id * 4;
			}
		}

		ConLog.Write("Finish dump in thread %d!", (int)id);

		*done = true;
	}
Esempio n. 2
0
int main(int argc, char* argv[])
{
	Decoder dec;

	if (argc != 5)
	{
		printf("\nUSAGE: decoder_with_error_concealment.exe <inputfile> <outputfile> <error_pattern> <conceal_method>\n\n");
		return 1;
	}

	/**
	 * This is an easy way to calculate the the decoding speed:
	 * We save the time before and after the full process. We
	 * know we also count the pure decoding part with this, but 
	 * this isn't a problem as this is just a constant. If we have
	 * our full decoding time, we can divide it by the number of frames
	 * and then by the number of macroblocks to obtain a result that can
	 * be compared throughout videos of different lengths and size.
	 */
	clock_t Start = clock();
	std::cout << "Timer started" << "\n";
	dec.Decode(argv[1], argv[2], argv[3], atoi(argv[4]));
	std::cout << "Time elapsed: " << clock() - Start << "\n";

	return 0;
}
status_t
RawDecoderChunkProvider::GetNextChunk(const void **chunkBuffer, size_t *chunkSize,
                                      media_header *mediaHeader)
{
    int64 frames;
    media_decode_info info;
    status_t res = fDecoder->Decode(fBuffer, &frames, mediaHeader, &info);
    if (res == B_OK) {
        *chunkBuffer = fBuffer;
        *chunkSize = frames * fFrameSize;
//		printf("RawDecoderChunkProvider::GetNextChunk, %lld frames, %ld bytes, start-time %lld\n", frames, *chunkSize, mediaHeader->start_time);
    } else {
        ERROR("RawDecoderChunkProvider::GetNextChunk failed\n");
    }
    return res;
}
Esempio n. 4
0
void armv_end(void* codestart, u32 cycl)
{
	//Normal block end
	//cycle counter rv

	//pop registers & return
	assembler->Subs(w27, w27, cycl);
	ptrdiff_t offset = reinterpret_cast<uintptr_t>(arm_exit) - assembler->GetBuffer()->GetStartAddress<uintptr_t>();
	Label arm_exit_label;
	assembler->BindToOffset(&arm_exit_label, offset);
	assembler->B(&arm_exit_label, mi);	//statically predicted as not taken

	offset = reinterpret_cast<uintptr_t>(arm_dispatch) - assembler->GetBuffer()->GetStartAddress<uintptr_t>();
	Label arm_dispatch_label;
	assembler->BindToOffset(&arm_dispatch_label, offset);
	assembler->B(&arm_dispatch_label);

	assembler->FinalizeCode();
	verify(assembler->GetBuffer()->GetCursorOffset() <= assembler->GetBuffer()->GetCapacity());
	vmem_platform_flush_cache(
		codestart, assembler->GetBuffer()->GetEndAddress<void*>(),
		codestart, assembler->GetBuffer()->GetEndAddress<void*>());
	icPtr += assembler->GetBuffer()->GetSizeInBytes();

#if 0
	Instruction* instr_start = (Instruction *)codestart;
	Instruction* instr_end = assembler->GetBuffer()->GetEndAddress<Instruction*>();
	Decoder decoder;
	Disassembler disasm;
	decoder.AppendVisitor(&disasm);
	Instruction* instr;
	for (instr = instr_start; instr < instr_end; instr += kInstructionSize) {
		decoder.Decode(instr);
		printf("arm64 arec\t %p:\t%s\n",
				   reinterpret_cast<void*>(instr),
				   disasm.GetOutput());
	}
#endif
	delete assembler;
	assembler = NULL;
}
void BlockCompilerFirstPass::Perform(State* s, Address startFromAddress, Decoder& decoder, std::unordered_map<Address, std::shared_ptr<SimulationUnit>>* simulationUnitTable, BlockCompilerSimulatorEngine& simulatorEngine)
{
	s_ = s;
	simulationUnitTable_ = simulationUnitTable;

	discoveredStartAddresses_.clear();
	while (toProcessStartAddresses_.empty() == false)
		toProcessStartAddresses_.pop();

	AddToDiscoveredQueue_(startFromAddress);

	while (toProcessStartAddresses_.empty() == false)
	{
		address_ = toProcessStartAddresses_.front();
		toProcessStartAddresses_.pop();

		simulatorEngine.CreateInterpretedBlockStart(address_);
		
		isEndOfBasicBlockFound_ = false;
		while (isEndOfBasicBlockFound_ == false)
		{
			auto operation = decoder.Decode(address_);
			if (operation == nullptr)
			{
				isEndOfBasicBlockFound_ = true;
			}
			else
			{
				int length = operation->GetMetadata().GetLengthInBytes();
				for (int i = 0; i < length; ++i)
					s_->m[address_ + (Address)i].isCode = true;

				auto outgoingEdges = operation->GetOutgoingEdges();
				outgoingEdges->Accept(*this);
			}
		}
	}
}
bool MethodCfgGenerator::CreateCfg(Decoder & decoder, Address startAddress, std::shared_ptr<Cfg> cfg)
{
	cfg_ = cfg;
	cfg_->Clear();
	
	address_ = startAddress;
	while (toProcessMethodRecords_.empty() == false)
		toProcessMethodRecords_.pop();
	discoveredMethodRecords_.clear();
	numberOfOperations_ = 0;

	MethodRecord methodRecord(startAddress);
	methodRecord.SetStartAddress(startAddress);
	discoveredMethodRecords_.insert(std::make_pair(startAddress, methodRecord));
	toProcessMethodRecords_.push(startAddress);

	while (toProcessMethodRecords_.empty() == false)
	{
		Address currentMethodStartAddress = toProcessMethodRecords_.front();
		toProcessMethodRecords_.pop();

		currentMethodRecordIterator_ = discoveredMethodRecords_.find(currentMethodStartAddress);

		while (toProcessBasicBlockRecords_.empty() == false)
			toProcessBasicBlockRecords_.pop();
		discoveredBasicBlockRecords_.clear();
		AddToDiscoveredBasicBlockRecordQueue_(currentMethodStartAddress);
		
		while (toProcessBasicBlockRecords_.empty() == false)
		{
			address_ = toProcessBasicBlockRecords_.front();
			toProcessBasicBlockRecords_.pop();

			std::string key = MakeBasicBlockRecordKey_(currentMethodStartAddress, address_);
			currentBasicBlockRecord_ = std::make_shared<BasicBlockRecord>(key, address_);

			if (cfg_->GetBasicBlockRecords().empty() == true)
			{
				// first basic block record is also entry block
				cfg_->GetEntryBasicBlockRecords().push_back(currentBasicBlockRecord_->GetKey());
			}
			cfg_->GetBasicBlockRecords().insert(std::make_pair(currentBasicBlockRecord_->GetKey(), currentBasicBlockRecord_));

			isEndOfBasicBlockFound_ = false;
			while (isEndOfBasicBlockFound_ == false)
			{
				auto operation = decoder.Decode(address_);
				if (!operation)
					return false;
				operation_ = std::shared_ptr<Operation>(std::move(operation));
				currentBasicBlockRecord_->GetOperations().push_back(operation_);
				++numberOfOperations_;
				if (ShouldAbort_() == true)
					return false;
				auto outgoingEdges = operation_->GetOutgoingEdges();
				outgoingEdges->Accept(*this);
			}
		}

		for (auto returnFromBasicBlockRecordKey : currentMethodRecordIterator_->second.GetReturnFromBasicBlockRecords())
		{
			auto returnFromBasicBlockRecord = cfg_->GetBasicBlockRecords().find(returnFromBasicBlockRecordKey);
			for (auto returnToBasicBlockRecordKey : currentMethodRecordIterator_->second.GetReturnToBasicBlockRecords())
				returnFromBasicBlockRecord->second->GetOutgoingEdges().push_back(returnToBasicBlockRecordKey);
		}
	}

	return true;
}
Esempio n. 7
0
void DisAsmFrame::Dump(wxCommandEvent& WXUNUSED(event)) 
{
	wxFileDialog ctrl( this, L"Select output file...",
		wxEmptyString, "DisAsm.txt", "*.txt", wxFD_SAVE);

	if(ctrl.ShowModal() == wxID_CANCEL) return;

	vfsStream& f_elf = *new vfsLocalFile(Emu.m_path);
	ConLog.Write("path: %s", Emu.m_path);
	Elf_Ehdr ehdr;
	ehdr.Load(f_elf);

	if(!ehdr.CheckMagic())
	{
		ConLog.Error("Corrupted ELF!");
		return;
	}
	wxArrayString name_arr;

	switch(ehdr.GetClass())
	{
	case CLASS_ELF64:
		ElfType64 = true;
		l_elf64 = new ELF64Loader(f_elf);
		if(!l_elf64->LoadInfo())
		{
			delete l_elf64;
			return;
		}
		name_arr = l_elf64->shdr_name_arr;
		shdr_arr_64 = &l_elf64->shdr_arr;
		if(l_elf64->shdr_arr.GetCount() <= 0) return;
	break;

	case CLASS_ELF32:
		ElfType64 = false;
		l_elf32 = new ELF32Loader(f_elf);
		if(!l_elf32->LoadInfo())
		{
			delete l_elf32;
			return;
		}

		name_arr = l_elf32->shdr_name_arr;
		shdr_arr_32 = &l_elf32->shdr_arr;
		if(l_elf32->shdr_arr.GetCount() <= 0) return;
	break;

	default: ConLog.Error("Corrupted ELF!"); return;
	}

	DisAsm* disasm;
	Decoder* decoder;

	if(Emu.GetCPU().GetThreads()[0].IsSPU())
	{
		SPU_DisAsm& dis_asm = *new SPU_DisAsm(*(PPCThread*)NULL, DumpMode);
		decoder = new SPU_Decoder(dis_asm);
		disasm = &dis_asm;
	}
	else
	{
		PPU_DisAsm& dis_asm = *new PPU_DisAsm(*(PPCThread*)NULL, DumpMode);
		decoder = new PPU_Decoder(dis_asm);
		disasm = &dis_asm;
	}

	const u32 shdr_count = ElfType64 ? shdr_arr_64->GetCount() : shdr_arr_32->GetCount();

	u64 max_count = 0;
	for(u32 sh=0; sh<shdr_count; ++sh)
	{
		const u64 sh_size = (ElfType64 ? (*shdr_arr_64)[sh].sh_size : (*shdr_arr_32)[sh].sh_size) / 4;
		max_count += sh_size;
	}

	wxArrayLong max;
	max.Add(max_count);
	MTProgressDialog& prog_dial = *new MTProgressDialog(NULL, wxDefaultSize, "Saving", "Loading...", max, 1);
	max.Clear();

	wxFile fd(ctrl.GetPath(), wxFile::write);

	for(u32 sh=0, vsize=0; sh<shdr_count; ++sh)
	{
		const u64 sh_size = (ElfType64 ? (*shdr_arr_64)[sh].sh_size : (*shdr_arr_32)[sh].sh_size) / 4;
		const u64 sh_addr = (ElfType64 ? (*shdr_arr_64)[sh].sh_addr : (*shdr_arr_32)[sh].sh_addr);

		const wxString name = sh < name_arr.GetCount() ? name_arr[sh] : "Unknown";

		fd.Write(wxString::Format("Start of section header %s[%d] (instructions count: %d)\n", name, sh, sh_size));
		prog_dial.Update(0, vsize, wxString::Format("Disasm %s section", name));

		if(Memory.IsGoodAddr(sh_addr))
		{
			for(u64 addr=sh_addr; addr<sh_addr+sh_size; addr, vsize++)
			{
				disasm->dump_pc = addr;
				decoder->Decode(Memory.Read32(disasm->dump_pc));
				fd.Write("\t");
				fd.Write(disasm->last_opcode);
			}
		}
		fd.Write(wxString::Format("End of section header %s[%d]\n\n", name, sh));
	}

	prog_dial.Close();
	Emu.Stop();
	/*
	SYSTEM_INFO si;
	GetSystemInfo(&si);
	const uint cores_count =
		(si.dwNumberOfProcessors < 1 || si.dwNumberOfProcessors > 8 ? 2 : si.dwNumberOfProcessors); 

	wxArrayLong max;
	max.Clear();

	u64 max_count = 0;

	if(ElfType64)
	{
		for(uint sh=0; sh<l_elf64->shdr_arr.GetCount(); ++sh)
		{
			max_count += l_elf64->shdr_arr[sh].sh_size / 4;
		}
	}
	else
	{
		for(uint sh=0; sh<l_elf32->shdr_arr.GetCount(); ++sh)
		{
			max_count += l_elf32->shdr_arr[sh].sh_size / 4;
		}
	}

	for(uint c=0; c<cores_count; ++c) max.Add(max_count / cores_count);
	for(uint c=0; c<max_count % cores_count; ++c) max[c]++;

	MTProgressDialog& prog_dial = *new MTProgressDialog(this, wxDefaultSize, "Dumping...", "Loading", max, cores_count);

	DumperThread* dump = new DumperThread[cores_count];
	wxArrayString** arr = new wxArrayString*[cores_count];

	bool* threads_done = new bool[cores_count];

	for(uint i=0; i<cores_count; ++i)
	{
		arr[i] = new wxArrayString[ElfType64 ? l_elf64->shdr_arr.GetCount() : l_elf32->shdr_arr.GetCount()];
		dump[i].Set(i, cores_count, &threads_done[i], prog_dial, arr);
		dump[i].Start();
	}

	WaitDumperThread& wait_dump = 
		*new WaitDumperThread(threads_done, cores_count, ctrl.GetPath(), prog_dial, arr);
	wait_dump.Start();
	*/
}
Esempio n. 8
0
int main(int argc, char** argv) {
	po::variables_map cfg;
	if (!init_params(argc,argv,&cfg)) return 1;
	// setup decoder
	Decoder* decoder = setupDecoder(cfg);
	if (!decoder) {
		cerr << "error while loading decoder with" << cfg["decoder_config"].as<string>() << "!\n";
		return 1;
	}
	TrainingObserver observer;
	// get reference to decoder weights
	vector<weight_t>& decoder_weights = decoder->CurrentWeightVector();
	WeightVector w;
	// the SMT weights (to be optimized)
	if (cfg.count("weights")) {
		Weights::InitFromFile(cfg["weights"].as<string>(), &decoder_weights);
		Weights::InitSparseVector(decoder_weights, &w);
	} else {
		cerr << "starting with EMPTY weights!\n";
	}
	//index
	ReadFile idx_in(cfg["index"].as<string>());
	const Index::Index idx(*idx_in);
	cerr << idx << endl;

	ViterbiScorer vs(&idx);

	// load rank weights
	SparseVector<double> rankweights;

	int J = cfg["jobs"].as<int>();
	omp_set_num_threads(J);
	int K = cfg["K"].as<int>();
	TIMER::timestamp_t t0, t1;
	double time;
	string id, sentence;
	while(cin >> id) {

		cin.ignore(1, '\t');
		getline(cin, sentence);
		if (sentence.empty() || id.empty()) continue;

		cerr << "\nQ="<<id<<endl;
		decoder->Decode(sentence, &observer); // decode with decoder_weights
		Hypergraph hg = observer.GetCurrentForest();
		int len = -1;
		prob_t vit = Viterbi<PathLengthTraversal>(hg, &len);

		t0 = TIMER::get_timestamp();

		vector<prob_t> scores(idx.NumberOfDocuments());
		vs.Score(hg, &scores);
		Scores kbest(K);
		unsigned below_lb=0;
		unsigned lb=0;
		unsigned above_lb=0;
		prob_t max = prob_t::Zero();
		prob_t min = prob_t(100000000);
		for (int d=0;d<idx.NumberOfDocuments();++d) {
			if (scores[d] < vit) below_lb++;
			else if (scores[d] == vit) lb++;
			else above_lb++;
			if (scores[d]>max) max = scores[d];
			if (scores[d]<min) min = scores[d];
			kbest.update( CLIR::Score(idx.GetDocID(d), scores[d]));
		}
		CLIR::writeResult(cout, id, kbest.k_largest(), "0");

		t1 = TIMER::get_timestamp();
		time = (t1-t0) / 1000000.0L;
		cerr << time << "s\n";
		cerr << "<V: " << below_lb << " =V: " << lb << " >V: " << above_lb << endl;
		cerr << "max score: " << max << " min score: " << min << " viterbi score: " << vit << endl;
	}


	delete decoder;
}
Esempio n. 9
0
int main(int argc, char** argv) {
	po::variables_map cfg;
	if (!init_params(argc,argv,&cfg)) return 1;

	if (cfg.count("random_seed"))
		rng.reset(new MT19937(cfg["random_seed"].as<uint32_t>()));
	else
		rng.reset(new MT19937);


	// setup decoder
	Decoder* decoder = setupDecoder(cfg);
	if (!decoder) {
		cerr << "error while loading decoder with" << cfg["decoder_config"].as<string>() << "!\n";
		return 1;
	}
	TrainingObserver observer;
	// get reference to decoder weights
	vector<weight_t>& decoder_weights = decoder->CurrentWeightVector();
	// setup weights
	WeightVector w, w_hope, w_fear;
	// the SMT weights (to be optimized)
	Weights::InitFromFile(cfg["weights"].as<string>(), &decoder_weights);
	Weights::InitSparseVector(decoder_weights, &w);
	loadWeights(cfg["rweights"].as<string>(), w_hope);
	WeightVector w_inv = w*-1;
	WeightVector w_hope_inv = w_hope*-1;

	//cerr << "W    " << w << endl;
	//cerr << "WINV " << w_inv << endl;
	//cerr << "R    " << w_hope << endl;
	//cerr << "RINV " << w_hope_inv << endl;

	const string input = decoder->GetConf()["input"].as<string>();
	//cerr << "Reading input from " << ((input == "-") ? "STDIN" : input.c_str()) << endl << endl;
	ReadFile in_read(input);
	istream *in = in_read.stream();
	assert(*in);
	string id, sentence;
	std::vector<HypergraphSampler::Hypothesis> samples;

	while(*in >> id) {

		in->ignore(1, '\t');
		getline(*in, sentence);
		if (sentence.empty() || id.empty()) continue;

		//decoder->SetId(id);
		decoder->Decode(sentence, &observer); // decode with decoder_weights
		Hypergraph hg = observer.GetCurrentForest();

		// get max model score
		double max_tscore = ViterbiFeatures(hg).dot(w);
		// get min model score
		hg.Reweight(w_inv);
		double min_tscore = -ViterbiFeatures(hg).dot(w_inv);
		// get max rel score
		hg.Reweight(w_hope);
		double max_rscore = ViterbiFeatures(hg).dot(w_hope);
		// get min rel_score
		hg.Reweight(w_hope_inv);
		double min_rscore = -ViterbiFeatures(hg).dot(w_hope_inv);

		//cerr << max_tscore << " " << min_tscore << " " << max_rscore << " " << min_rscore << endl;

		if (cfg.count("sample")) {

			HypergraphSampler::sample_hypotheses(hg, cfg["sample"].as<int>(), &(*rng), &samples);
			for (unsigned s=0;s<samples.size();++s) {
				const HypergraphSampler::Hypothesis& h = samples[s];
				cout << id << "\t" << "S\t" << vscale(h.fmap.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(h.fmap.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(h.words) << endl;
			}

		} else if (cfg.count("kbest")) {
			typedef KBest::KBestDerivations<vector<WordID>, ESentenceTraversal,KBest::FilterUnique> K;
			// get kbest model score derivations
			hg.Reweight(w);
			K kbest2(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest2.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KBT\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

			// get kworst model score derivations
			hg.Reweight(w_inv);
			K kbest3(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest3.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KWT\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

			// get kbest rel score derivations
			hg.Reweight(w_hope);
			K kbest4(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest4.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KBR\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

			// get kbest model score derivations
			hg.Reweight(w_hope_inv);
			K kbest(hg,cfg["kbest"].as<int>());
			for (int i = 0; i < cfg["kbest"].as<int>(); ++i) {
			      typename K::Derivation *d = kbest.LazyKthBest(hg.nodes_.size() - 1, i);
			      if (!d) break;
			      cout << id << "\t" << "KWR\t" << vscale(d->feature_values.dot(w), min_tscore, max_tscore) <<
						"\t" <<  vscale(d->feature_values.dot(w_hope), min_rscore, max_rscore) <<
						"\t" << TD::GetString(d->yield) << endl;
			}

		}


	}

	delete decoder;
	return 0;

}
Esempio n. 10
0
int main(int argc, char** argv) {
	po::variables_map cfg;
	if (!init_params(argc,argv,&cfg)) return 1;

	if (cfg.count("random_seed"))
		rng.reset(new MT19937(cfg["random_seed"].as<uint32_t>()));
	else
		rng.reset(new MT19937);

	// set variables
	lr = cfg["learningrate"].as<double>();
	hope_select = cfg["hope"].as<int>();
	fear_select = cfg["fear"].as<int>();
	optimizer = cfg["optimizer"].as<int>();
	freeze = cfg.count("freeze");
	if (freeze) {
		const vector<string>& ffstrs = cfg["freeze"].as<vector<string> >();
		stringstream ffss;
		ffss << "frozen features: ";
		for (vector<string>::const_iterator ffit=ffstrs.begin();ffit!=ffstrs.end();++ffit) {
			frozen_features.push_back(FD::Convert(*ffit));
			ffss << *ffit << " ";
		}
		cerr << ffss.str() << endl;
	}
	scaling = cfg["scaling"].as<int>();
	scalingfactor = cfg["scalingfactor"].as<double>();
	cerr << "scaling="<< scaling << " scalingfactor=" << scalingfactor << endl;

	// setup decoder
	Decoder* decoder = setupDecoder(cfg);
	if (!decoder) {
		cerr << "error while loading decoder with" << cfg["decoder_config"].as<string>() << "!\n";
		return 1;
	}
	TrainingObserver observer;
	// get reference to decoder weights
	vector<weight_t>& decoder_weights = decoder->CurrentWeightVector();
	// the SMT weights (to be optimized)
	if (cfg.count("weights")) {
		Weights::InitFromFile(cfg["weights"].as<string>(), &decoder_weights);
		Weights::InitSparseVector(decoder_weights, &w);
	} else {
		cerr << "starting with EMPTY weights!\n";
	}
	// the weight vector that gives the oracle
	loadRelevanceWeights(cfg["rweights"].as<string>(), relw);
	negrelw -= relw;
	relw_scaled = relw;
	// initial scaling
	if (scaling != 0) scaleRelevanceWeights(scalingfactor);

	// output some vector stats
	cerr << "W_REL=" << relw << endl;
	cerr << "W_REL_SCALED=" << relw_scaled << endl;
	cerr << "|W_REL|=" << relw_scaled.size() << endl;
	cerr << "|W_SMT|=" << w.size() << endl;

	cerr << "hope selection: " << hope_select << endl;
	const string input = decoder->GetConf()["input"].as<string>();
	cerr << "Reading input from " << ((input == "-") ? "STDIN" : input.c_str()) << endl;
	ReadFile in_read(input);
	istream *in = in_read.stream();
	assert(*in);
	string id, sentence;
	int cur_sent = 0;
	unsigned lc = 0; // line count

	double objective=0;
	double tot_loss = 0;
	WeightVector avg_w = w;
	//SparseVector<double> tot;
	//SparseVector<double> oldw = w;
	//tot.clear();
	//tot += w;

	while(*in >> id) {

		in->ignore(1, '\t');
		getline(*in, sentence);
		if (sentence.empty() || id.empty()) continue;

		cerr << "\nID="<<id << endl;
		decoder->SetId(cur_sent);
		decoder->Decode(sentence, &observer); // decode with decoder_weights
		cur_sent = observer.GetCurrentSent();
		Hypergraph hg = observer.GetCurrentForest();

		vector<boost::shared_ptr<HypothesisInfo> > S;
		MAX_REL = std::numeric_limits<double>::lowest();
		MIN_REL = std::numeric_limits<double>::max();

		// get viterbi
		boost::shared_ptr<HypothesisInfo> viterbi = MakeHypothesisInfo(hg);

		// get the true oracle (sets max_rel)
		hg.Reweight(relw);
		boost::shared_ptr<HypothesisInfo> oracle = MakeHypothesisInfo(hg);
		oracle->oracle = oracle;
		oracle->computeCost();

		// get the worst derivation (to get min_rel)
		hg.Reweight(negrelw);
		boost::shared_ptr<HypothesisInfo> worst = MakeHypothesisInfo(hg);
		worst->oracle = oracle;
		worst->computeCost();

		if (hope_select == 1) { // hope
			hg.Reweight(w + relw_scaled);
			S.push_back(MakeHypothesisInfo(hg));
			S[0]->oracle = oracle;
			S[0]->computeCost();
		} else { // true oracle
			S.push_back(oracle);
		}
		// S contains now ONE (hope/oracle) hypothesis
		S[0]->computeLoss();
		boost::shared_ptr<HypothesisInfo> good = S[0];

		viterbi->oracle = oracle;
		viterbi->computeCost();
		viterbi->computeLoss();

		cerr << "min_rel=" << MIN_REL << " max_rel=" << MAX_REL << endl;
		cerr << "S[0]=" << S[0] << endl;

		boost::shared_ptr<HypothesisInfo> fear;

		if (optimizer == 4) { // PA update (single dual coordinate step)
			cerr << "PA MIRA (single dual coordinate step)\n";

			hg.Reweight(w - relw_scaled);
			fear = MakeHypothesisInfo(hg);
			fear->oracle = oracle;
			fear->computeCost();
			fear->computeLoss();
			cerr << "LOSS: " << fear->loss;
			if (fear->loss > 0.0) {
				double diffsqnorm = (good->features - fear->features).l2norm_sq();
				double delta;
				if (diffsqnorm > 0) {
					delta = fear->loss / (diffsqnorm);
					if (delta > lr) delta = lr;
					w += good->features * delta;
					w -= fear->features * delta;
				}
			}

		} else if (optimizer == 1) {// sgd - nonadapted step size
			cerr << "SGD\n";

			if (fear_select == 1) {
				hg.Reweight(w - relw_scaled);
				fear = MakeHypothesisInfo(hg);
			} else if (fear_select == 2) {
				fear = worst;
			} else if (fear_select == 3) {
				fear = viterbi;
			}
			w += good->features * lr;
			w -= fear->features * lr;

		} else if (optimizer == 2) { // PA MIRA with selection from  cutting plane
			cerr << "PA MIRA with Selection from Cutting Plane\n";

			hg.Reweight(w - relw_scaled);
			fear = MakeHypothesisInfo(hg);
			fear->oracle = oracle;
			fear->computeCost();
			fear->computeLoss();
			if (fear->loss < 0) {
				cerr << "FEAR LOSS < 0! THIS SHOULD NOT HAPPEN!\n";
				abort();
			}
			if (fear->loss > good->loss + SMO_EPS) {
				S.push_back(fear);
				OptimizeSet(S, 1); // only one iteration with a set of two constraints
			} else { cerr << "constraint not violated. fear loss:" << fear->loss << "\n"; }

		} else if (optimizer == 3) { // Cutting Plane MIRA
			cerr << "Cutting Plane MIRA\n";

			unsigned cp_iter=0; // Cutting Plane Iteration
			bool again = true;
			while (again && cp_iter<CP_ITER) {
				again = false;
				cerr << "CuttingPlane: " << cp_iter << endl;
				// find a fear derivation
				hg.Reweight(w - relw_scaled);
				fear = MakeHypothesisInfo(hg);
				fear->oracle = oracle;
				fear->computeCost();
				fear->computeLoss();
				if (fear->loss < 0) {
					cerr << "FEAR LOSS < 0! THIS SHOULD NOT HAPPEN!\n";
					//abort();
				}
				// find max loss hypothesis
				double max_loss_in_set = (*std::max_element(S.begin(), S.end(), lossComp))->loss;
				if (fear->loss > max_loss_in_set + SMO_EPS) {
					cerr << "Adding new fear " << fear << " to S\n";
					S.push_back(fear);
					OptimizeSet(S);
					again = true;
				} else { cerr << "constraint not violated. fear loss:" << fear->loss << "\n"; }
				cp_iter++;
				// update losses
				//for(unsigned i=0;i<S.size();i++) S[i]->computeLoss();
			}
		}

		cerr << "|W|=" << w.size() << endl;
		tot_loss += relscale(viterbi->rel);
		//print objective after this sentence
		//double w_change = (w - oldw).l2norm_sq();
		//double temp_objective = 0.5 * w_change;// + max_step_size * max_fear;
		for(int u=0;u!=S.size();u++) {
			cerr << "alpha=" << S[u]->alpha << " loss=" << S[u]->loss << endl;
			//temp_objective += S[u]->alpha * S[u]->loss;
		}
		//objective += temp_objective;
		//cerr << "SENT OBJ: " << temp_objective << " NEW OBJ: " << objective << endl;

		//tot += w;
		++lc;
		avg_w *= lc;
		avg_w = (w + avg_w) / (lc+1);

		// set decoder weights for next sentence
		decoder_weights.clear();
		w.init_vector(&decoder_weights);
		// rescale relevance weights to balance with new model after the update
		if (scaling == 2) {
			scaleRelevanceWeights(scalingfactor);
			cerr << "W_REL_SCALED=" << relw_scaled << endl;
		}

		// viterbi 2 for debugging
		//hg.Reweight(w);
		//boost::shared_ptr<HypothesisInfo> viterbi2 = MakeHypothesisInfo(hg);
		//viterbi2->oracle = oracle;
		//viterbi2->computeCost();
		//viterbi2->computeLoss();
		//fear->computeLoss();
		//viterbi->computeLoss();
		//good->computeLoss();
		cerr << "FEAR : " << fear << " \n" << TD::GetString(fear->hyp) << endl;
		cerr << "BEST : " << viterbi << " \n" << TD::GetString(viterbi->hyp) << endl;
		//cerr << "BEST2: " << viterbi2 << " \n" << TD::GetString(viterbi2->hyp) << endl;
		cerr << "HOPE : " << good << " \n" << TD::GetString(good->hyp) << endl;

		cout << id << " ||| " << TD::GetString(fear->hyp) << " ||| " << TD::GetString(viterbi->hyp) << " ||| " << TD::GetString(good->hyp) << endl;

		S.clear();
		fear.reset();
		viterbi.reset();
		//viterbi2.reset();
		good.reset();
		worst.reset();
		oracle.reset();

	}

    //cerr << "FINAL OBJECTIVE: "<< objective << endl;
    cerr << "Translated " << lc << " sentences\n";
    cerr << " [AVG METRIC LAST PASS="******"]\n";
    //tot_loss = 0;

	decoder_weights.clear();
	w.init_vector(&decoder_weights);
	//Weights::ShowLargestFeatures(decoder_weights);
	// write weights
	int node_id = rng->next() * 100000;
	cerr << " Writing model to " << node_id << endl;
	ostringstream os;
	os << cfg["weights_output"].as<string>() << "/last." << node_id;
	string msg = "HGMIRA tuned weights ||| " + boost::lexical_cast<std::string>(node_id) + " ||| " + boost::lexical_cast<std::string>(lc);
	Weights::WriteToFile(os.str(), decoder_weights, true, &msg);

	//SparseVector<double> x = tot;
	//x /= lc+1;
	ostringstream sa;
	string msga = "HGMIRA tuned weights AVERAGED ||| " + boost::lexical_cast<std::string>(node_id) + " ||| " + boost::lexical_cast<std::string>(lc);
	sa << cfg["weights_output"].as<string>() << "/avg." << node_id;
	avg_w.init_vector(&decoder_weights);
	Weights::WriteToFile(sa.str(), decoder_weights, true, &msga);


	delete decoder;
	cerr << "\ndone.\n";
	return 0;

}