void PredictionGenerationVerbose(const vector<string> &Hypotheses, const vector<BasecallerRead> &hypothesesReads, const vector<float> &phase_params, const ion::FlowOrder &flow_order, const int &start_flow, const int &prefix_size) { printf("Calculating predictions for %d hypotheses starting at flow %d:\n", (int)Hypotheses.size(), start_flow); for (unsigned int iHyp=0; iHyp<Hypotheses.size(); ++iHyp) { for (unsigned int iBase=0; iBase<Hypotheses[iHyp].length(); ++iBase) printf("%c", Hypotheses[iHyp][iBase]); printf("\n"); } printf("Solved read prefix: "); for (int iBase=0; iBase<prefix_size; ++iBase) printf("%c", hypothesesReads[0].sequence[iBase]); printf("\n"); printf("Extended Hypotheses reads to:\n"); for (unsigned int iHyp=0; iHyp<hypothesesReads.size(); ++iHyp) { for (unsigned int iBase=0; iBase<hypothesesReads[iHyp].sequence.size(); ++iBase) printf("%c", hypothesesReads[iHyp].sequence[iBase]); printf("\n"); } printf("Phasing Parameters, cf: %f ie: %f dr: %f \n Predictions: \n", phase_params[0], phase_params[1], phase_params[2]); cout << "Flow Order : "; for (int i_flow=0; i_flow<flow_order.num_flows(); i_flow++) { cout << flow_order.nuc_at(i_flow) << " "; if (hypothesesReads[0].normalized_measurements[i_flow] < 0) cout << " "; } cout << endl << "Flow Index : "; for (int i_flow=0; i_flow<flow_order.num_flows(); i_flow++) { cout << i_flow << " "; if (i_flow<10) cout << " "; else if (i_flow<100) cout << " "; else if (i_flow<1000) cout << " "; if (hypothesesReads[0].normalized_measurements[i_flow] < 0) cout << " "; } cout << endl << "Measured : "; for (unsigned int i_flow=0; i_flow<hypothesesReads[0].normalized_measurements.size(); ++i_flow) { printf("%.2f", hypothesesReads[0].normalized_measurements[i_flow]); if (hypothesesReads[0].normalized_measurements[i_flow] < 10) cout << " "; } cout << endl; for (unsigned int i_Hyp=0; i_Hyp<hypothesesReads.size(); ++i_Hyp) { cout << "Prediction "<< i_Hyp << ": "; for (unsigned int i_flow=0; i_flow<hypothesesReads[i_Hyp].prediction.size(); ++i_flow) { printf("%.2f", hypothesesReads[i_Hyp].prediction[i_flow]); if (hypothesesReads[i_Hyp].prediction[i_flow] < 10) cout << " "; if (hypothesesReads[0].normalized_measurements[i_flow] < 0) cout << " "; } cout << endl; } cout << " ------------------- " << endl; }
void GetPrefixFlow(Alignment *rai, const string & prefix_bases, const ion::FlowOrder & flow_order) { rai->prefix_flow = 0; unsigned int base_idx = 0; while (base_idx < prefix_bases.length() and rai->prefix_flow < flow_order.num_flows()) { while (rai->prefix_flow < flow_order.num_flows() and flow_order.nuc_at(rai->prefix_flow) != prefix_bases.at(base_idx)) rai->prefix_flow++; base_idx++; } }
void CreateFlowIndex(Alignment *rai, const ion::FlowOrder & flow_order) { rai->flow_index.assign(rai->read_bases.length(), flow_order.num_flows()); int flow = rai->start_flow; unsigned int base_idx = 0; while (base_idx < rai->read_bases.length() and flow < flow_order.num_flows()){ while (flow < flow_order.num_flows() and flow_order.nuc_at(flow) != rai->read_bases[base_idx]) flow++; rai->flow_index[base_idx] = flow; base_idx++; } if (base_idx != rai->read_bases.length()) { cerr << "WARNING in ExtendedReadInfo::CreateFlowIndex: There are more bases in the read than fit into the flow order."; exit(1); } }
int GetMasterReadPrefix(TreephaserLite &treephaser, const ion::FlowOrder &flow_order, const int &start_flow, const string &called_bases, BasecallerRead &master_read) { // Solve beginning of maybe clipped read int until_flow = min((start_flow+20), flow_order.num_flows()); treephaser.Solve(master_read, until_flow, 0); // StartFlow clipped? Get solved HP length at startFlow. unsigned int base = 0; int flow = 0; unsigned int HPlength = 0; while (base < master_read.sequence.size()) { while (flow < flow_order.num_flows() and flow_order.nuc_at(flow) != master_read.sequence[base]) { flow++; } if (flow > start_flow or flow == flow_order.num_flows()) break; if (flow == start_flow) HPlength++; base++; } //if (global_context.DEBUG>2) // printf("Solved %d bases until (not incl.) flow %d. HP of height %d at flow %d.\n", base, flow, HPlength, start_flow); // Get HP size at the start of the read as called in Hypotheses[0] unsigned int count = 1; while (count < called_bases.length() and called_bases.at(count) == called_bases.at(0)) count++; //if (global_context.DEBUG>2) // printf("Hypothesis starts with an HP of length %d\n", count); // Adjust the length of the prefix and erase extra solved bases if (HPlength>count) base -= count; else base -= HPlength; master_read.sequence.erase(master_read.sequence.begin()+base, master_read.sequence.end()); // Get flow of last prefix base int prefix_flow = 0; for (unsigned int i_base = 0; i_base < master_read.sequence.size(); i_base++) { while (prefix_flow < flow_order.num_flows() and flow_order.nuc_at(prefix_flow) != master_read.sequence[i_base]) prefix_flow++; } return prefix_flow; }
void OrderedDatasetWriter::Open(const string& base_directory, BarcodeDatasets& datasets, int num_regions, const ion::FlowOrder& flow_order, const string& key, const string& basecaller_name, const string& basecalller_version, const string& basecaller_command_line, const string& production_date, const string& platform_unit, bool save_filtered_reads) { num_regions_ = num_regions; num_regions_written_ = 0; region_ready_.assign(num_regions_+1,false); region_dropbox_.clear(); region_dropbox_.resize(num_regions_); qv_histogram_.assign(50,0); num_datasets_ = datasets.num_datasets(); num_barcodes_ = datasets.num_barcodes(); num_read_groups_ = datasets.num_read_groups(); num_reads_.resize(num_datasets_,0); bam_filename_.resize(num_datasets_); save_filtered_reads_ = save_filtered_reads; read_group_name_.resize(num_read_groups_); read_group_dataset_.assign(num_read_groups_, -1); read_group_num_Q20_bases_.assign(num_read_groups_,0); read_group_num_barcode_errors_.resize(num_read_groups_); for (int rg = 0; rg < num_read_groups_; ++rg) { read_group_name_[rg] = datasets.read_group_name(rg); read_group_num_barcode_errors_[rg].assign(3,0); } // New filtering and trimming accounting (per read group) read_group_stats_.resize(num_read_groups_); bam_writer_.resize(num_datasets_, NULL); for (int ds = 0; ds < num_datasets_; ++ds) { // Set up BAM header bam_filename_[ds] = base_directory + "/" + datasets.dataset(ds)["basecaller_bam"].asString(); SamHeader sam_header; sam_header.Version = "1.4"; sam_header.SortOrder = "unsorted"; SamProgram sam_program("bc"); sam_program.Name = basecaller_name; sam_program.Version = basecalller_version; sam_program.CommandLine = basecaller_command_line; sam_header.Programs.Add(sam_program); for (Json::Value::iterator rg = datasets.dataset(ds)["read_groups"].begin(); rg != datasets.dataset(ds)["read_groups"].end(); ++rg) { string read_group_name = (*rg).asString(); Json::Value& read_group_json = datasets.read_groups()[read_group_name]; read_group_dataset_[datasets.read_group_name_to_id(read_group_name)] = ds; SamReadGroup read_group (read_group_name); read_group.FlowOrder = flow_order.full_nucs(); read_group.KeySequence = key; read_group.KeySequence += read_group_json.get("barcode_sequence","").asString(); read_group.KeySequence += read_group_json.get("barcode_adapter","").asString(); read_group.ProductionDate = production_date; read_group.Sample = read_group_json.get("sample","").asString(); read_group.Library = read_group_json.get("library","").asString(); read_group.Description = read_group_json.get("description","").asString(); read_group.PlatformUnit = read_group_json.get("platform_unit","").asString(); read_group.SequencingCenter = datasets.json().get("sequencing_center","").asString(); read_group.SequencingTechnology = "IONTORRENT"; sam_header.ReadGroups.Add(read_group); } // Open Bam for writing RefVector empty_reference_vector; bam_writer_[ds] = new BamWriter(); bam_writer_[ds]->SetCompressionMode(BamWriter::Compressed); //bam_writer_[ds]->SetCompressionMode(BamWriter::Uncompressed); bam_writer_[ds]->Open(bam_filename_[ds], sam_header, empty_reference_vector); } }
void IncrementFlows(const ion::FlowOrder &flow_order, const char &nuc, vector<int> &flows) { for (unsigned int idx = 1; idx < flows.size(); idx++) while (flows[idx] < flow_order.num_flows() and flow_order.nuc_at(flows[idx]) != nuc) flows[idx]++; }
void IncrementFlow(const ion::FlowOrder &flow_order, const char &nuc, int &flow) { while (flow < flow_order.num_flows() and flow_order.nuc_at(flow) != nuc) flow++; }
void PhaseEstimator::DoPhaseEstimation(RawWells *wells, Mask *mask, const ion::FlowOrder& flow_order, const vector<KeySequence>& keys, int region_size_x, int region_size_y, bool use_single_core) { flow_order_.SetFlowOrder(flow_order.str(), min(flow_order.num_flows(), 120)); keys_ = keys; chip_size_x_ = mask->W(); chip_size_y_ = mask->H(); region_size_x_ = region_size_x; region_size_y_ = region_size_y; printf("Phase estimation mode = %s\n", phasing_estimator_.c_str()); if (phasing_estimator_ == "override") { // Nothing to do! } else if (phasing_estimator_ == "spatial-refiner") { int num_workers = max(numCores(), 2); if (use_single_core) num_workers = 1; wells->Close(); wells->OpenForIncrementalRead(); SpatialRefiner(wells, mask, num_workers); } else if (phasing_estimator_ == "spatial-refiner-2") { int num_workers = max(numCores(), 2); if (use_single_core) num_workers = 1; wells->Close(); wells->OpenForIncrementalRead(); train_subset_count_ = 2; train_subset_cf_.resize(train_subset_count_); train_subset_ie_.resize(train_subset_count_); train_subset_dr_.resize(train_subset_count_); train_subset_regions_x_.resize(train_subset_count_); train_subset_regions_y_.resize(train_subset_count_); for (train_subset_ = 0; train_subset_ < train_subset_count_; ++train_subset_) { SpatialRefiner(wells, mask, num_workers); train_subset_cf_[train_subset_] = result_cf_; train_subset_ie_[train_subset_] = result_ie_; train_subset_dr_[train_subset_] = result_dr_; train_subset_regions_x_[train_subset_] = result_regions_x_; train_subset_regions_y_[train_subset_] = result_regions_y_; } } else ION_ABORT("Requested phase estimator is not recognized"); // Compute mean cf, ie, dr average_cf_ = 0; average_ie_ = 0; average_dr_ = 0; int count = 0; for (int r = 0; r < result_regions_x_*result_regions_y_; r++) { if (result_cf_[r] || result_ie_[r] || result_dr_[r]) { average_cf_ += result_cf_[r]; average_ie_ += result_ie_[r]; average_dr_ += result_dr_[r]; count++; } } if (count > 0) { average_cf_ /= count; average_ie_ /= count; average_dr_ /= count; } }
void OrderedDatasetWriter::Open(const string& base_directory, BarcodeDatasets& datasets, int read_class_idx, int num_regions, const ion::FlowOrder& flow_order, const string& key, const vector<string> & bead_adapters, int num_bamwriter_threads, const Json::Value & basecaller_json, vector<string>& comments, MolecularTagTrimmer& tag_trimmer, bool trim_barcodes) { num_regions_ = num_regions; num_regions_written_ = 0; region_ready_.assign(num_regions_+1,false); region_dropbox_.clear(); region_dropbox_.resize(num_regions_); qv_histogram_.assign(50,0); num_datasets_ = datasets.num_datasets(); num_barcodes_ = datasets.num_barcodes(); num_read_groups_ = datasets.num_read_groups(); num_reads_.resize(num_datasets_,0); bam_filename_.resize(num_datasets_); // A negative read group index indicates untrimmed/unfiltered bam files (w. library key) and we save all reads if (read_class_idx < 0) { save_filtered_reads_ = true; read_class_idx = 0; } else save_filtered_reads_ = false; read_group_name_.resize(num_read_groups_); read_group_dataset_.assign(num_read_groups_, -1); read_group_num_Q20_bases_.assign(num_read_groups_,0); read_group_barcode_filt_zero_err_.assign(num_read_groups_, 0); read_group_barcode_adapter_rejected_.assign(num_read_groups_, 0); read_group_num_barcode_errors_.resize(num_read_groups_); read_group_barcode_distance_hist_.resize(num_read_groups_); read_group_barcode_bias_.resize(num_read_groups_); for (int rg = 0; rg < num_read_groups_; ++rg) { read_group_name_[rg] = datasets.read_group_name(rg); read_group_num_barcode_errors_[rg].assign(3,0); read_group_barcode_bias_[rg].assign(datasets.GetBCmaxFlows(),0.0); read_group_barcode_distance_hist_[rg].assign(5,0); } // New filtering and trimming accounting (per read group) read_group_stats_.resize(num_read_groups_); for (int rg=0; rg<num_read_groups_; rg++) read_group_stats_[rg].SetBeadAdapters(bead_adapters); combined_stats_.SetBeadAdapters(bead_adapters); bam_writer_.resize(num_datasets_, NULL); sam_header_.resize(num_datasets_); num_bamwriter_threads_ = num_bamwriter_threads; for (int ds = 0; ds < num_datasets_; ++ds) { // Set up BAM header bam_filename_[ds] = base_directory + "/" + datasets.dataset(ds)["basecaller_bam"].asString(); SamHeader& sam_header = sam_header_[ds]; sam_header.Version = "1.4"; sam_header.SortOrder = "unsorted"; SamProgram sam_program("bc"); sam_program.Name = "BaseCaller"; sam_program.Version = basecaller_json["BaseCaller"]["version"].asString() + "/" + basecaller_json["BaseCaller"]["git_hash"].asString(); sam_program.CommandLine = basecaller_json["BaseCaller"]["command_line"].asString(); sam_header.Programs.Add(sam_program); for (Json::Value::iterator rg = datasets.dataset(ds)["read_groups"].begin(); rg != datasets.dataset(ds)["read_groups"].end(); ++rg) { string read_group_name = (*rg).asString(); Json::Value& read_group_json = datasets.read_groups()[read_group_name]; read_group_dataset_[datasets.read_group_name_to_id(read_group_name)] = ds; SamReadGroup read_group (read_group_name); read_group.FlowOrder = flow_order.full_nucs(); read_group.KeySequence = key; if (trim_barcodes){ // We only add the barcode info to the key sequence if we hard clipped it read_group.KeySequence += read_group_json.get("barcode_sequence","").asString(); read_group.KeySequence += read_group_json.get("barcode_adapter","").asString(); } read_group.ProductionDate = basecaller_json["BaseCaller"]["start_time"].asString(); read_group.Sample = read_group_json.get("sample","").asString(); read_group.Library = read_group_json.get("library","").asString(); read_group.Description = read_group_json.get("description","").asString(); read_group.PlatformUnit = read_group_json.get("platform_unit","").asString(); read_group.SequencingCenter = datasets.json().get("sequencing_center","").asString(); read_group.SequencingTechnology = "IONTORRENT"; // Add custom tags: Structure of tags per read group XXX if (datasets.IsLibraryDataset()) { MolTag my_tags = tag_trimmer.GetReadGroupTags(read_group_name); AddCustomReadGroupTag(read_group, "zt", my_tags.prefix_mol_tag); AddCustomReadGroupTag(read_group, "yt", my_tags.suffix_mol_tag); } sam_header.ReadGroups.Add(read_group); } for(size_t i = 0; i < comments.size(); ++i) sam_header.Comments.push_back(comments[i]); } }
void PhaseEstimator::DoPhaseEstimation(RawWells *wells, Mask *mask, const ion::FlowOrder& flow_order, const vector<KeySequence>& keys, bool use_single_core) { // We only load / process what is necessary flow_order_.SetFlowOrder(flow_order.str(), min(flow_order.num_flows(), phasing_end_flow_+20)); keys_ = keys; // Do we have enough flows to do phase estimation? // Check and, if necessary, adjust flow interval for estimation, if (not have_phase_estimates_) { if (flow_order_.num_flows() < 50) { phasing_estimator_ = "override"; cout << "PhaseEstimator WARNING: Not enough flows to estimate phase; using default values." << endl; } else { // Make sure we have at least 30 flows to estimate over if (phasing_end_flow_ - phasing_start_flow_ < 30) { phasing_end_flow_ = min(phasing_start_flow_+30, flow_order_.num_flows()); phasing_start_flow_ = phasing_end_flow_ - 30; // We are guaranteed to have at least 50 flows cout << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl; cerr << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl; } // Check boundaries of estimation window and adjust if necessary, // try to keep estimation window size if possible, but don't start before flow 20 if (phasing_end_flow_ > flow_order_.num_flows()) { phasing_start_flow_ = max(20, (phasing_start_flow_ - phasing_end_flow_ + flow_order_.num_flows()) ); phasing_end_flow_ = flow_order_.num_flows(); cout << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl; cerr << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl; } } } // ------------------------------------ if (phasing_estimator_ == "override") { if (not have_phase_estimates_) SetPhaseParameters(init_cf_, init_ie_, init_dr_); } else if (phasing_estimator_ == "spatial-refiner") { int num_workers = max(numCores(), 2); if (use_single_core) num_workers = 1; wells->Close(); wells->OpenForIncrementalRead(); SpatialRefiner(wells, mask, num_workers); } else if (phasing_estimator_ == "spatial-refiner-2") { int num_workers = max(numCores(), 2); if (use_single_core) num_workers = 1; wells->Close(); wells->OpenForIncrementalRead(); train_subset_count_ = 2; train_subset_cf_.resize(train_subset_count_); train_subset_ie_.resize(train_subset_count_); train_subset_dr_.resize(train_subset_count_); train_subset_regions_x_.resize(train_subset_count_); train_subset_regions_y_.resize(train_subset_count_); for (train_subset_ = 0; train_subset_ < train_subset_count_; ++train_subset_) { SpatialRefiner(wells, mask, num_workers); train_subset_cf_[train_subset_] = result_cf_; train_subset_ie_[train_subset_] = result_ie_; train_subset_dr_[train_subset_] = result_dr_; train_subset_regions_x_[train_subset_] = result_regions_x_; train_subset_regions_y_[train_subset_] = result_regions_y_; } } else ION_ABORT("Requested phase estimator is not recognized"); // Compute mean cf, ie, dr average_cf_ = 0; average_ie_ = 0; average_dr_ = 0; int count = 0; for (int r = 0; r < result_regions_x_*result_regions_y_; r++) { if (result_cf_.at(r) || result_ie_.at(r) || result_dr_.at(r)) { average_cf_ += result_cf_[r]; average_ie_ += result_ie_[r]; average_dr_ += result_dr_[r]; count++; } } if (count > 0) { average_cf_ /= count; average_ie_ /= count; average_dr_ /= count; } have_phase_estimates_ = true; }
void CalculateHypDistances(const vector<float>& NormalizedMeasurements, const float& cf, const float& ie, const float& droop, const ion::FlowOrder& flow_order, const vector<string>& Hypotheses, const int& startFlow, vector<float>& DistanceObserved, vector<float>& DistanceHypotheses, vector<vector<float> >& predictions, vector<vector<float> >& normalizedMeasurements, int applyNormalization, int verbose) { // Create return data structures // Distance of normalized observations to different hypotheses: d(obs,h1), ... , d(obs,hN) DistanceObserved.assign(Hypotheses.size(), 0); // Distance of hypotheses to first hypothesis: d(h1,h2), ... , d(h1, hN) DistanceHypotheses.assign(Hypotheses.size()-1, 0); predictions.resize(Hypotheses.size()); normalizedMeasurements.resize(Hypotheses.size()); // Loading key normalized values into a read and performing adaptive normalization BasecallerRead read; read.key_normalizer = 1; read.raw_measurements = NormalizedMeasurements; read.normalized_measurements = NormalizedMeasurements; read.sequence.clear(); read.sequence.reserve(2*flow_order.num_flows()); read.prediction.assign(flow_order.num_flows(), 0); read.additive_correction.assign(flow_order.num_flows(), 0); read.multiplicative_correction.assign(flow_order.num_flows(), 1.0); int steps, window_size = 50; DPTreephaser dpTreephaser(flow_order); dpTreephaser.SetModelParameters(cf, ie, droop); // Solve beginning of maybe clipped read if (startFlow>0) dpTreephaser.Solve(read, (startFlow+20), 0); // StartFlow clipped? Get solved HP length at startFlow unsigned int base = 0; int flow = 0; int HPlength = 0; while (base<read.sequence.size()){ while (flow < flow_order.num_flows() and flow_order.nuc_at(flow) != read.sequence[base]) flow++; if (flow > startFlow or flow == flow_order.num_flows()) break; if (flow == startFlow) HPlength++; base++; } if (verbose>0) Rprintf("Solved %d bases until (not incl.) flow %d. HP of height %d at flow %d.\n", base, flow, HPlength, startFlow); // Get HP size at the start of the reference, i.e., Hypotheses[0] int count = 1; while (Hypotheses[0][count] == Hypotheses[0][0]) count++; if (verbose>0) Rprintf("Hypothesis starts with an HP of length %d\n", count); // Adjust the length of the prefix and erase extra solved bases if (HPlength>count) base -= count; else base -= HPlength; read.sequence.erase(read.sequence.begin()+base, read.sequence.end()); unsigned int prefix_size = read.sequence.size(); // creating predictions for the individual hypotheses vector<BasecallerRead> hypothesesReads(Hypotheses.size()); int max_last_flow = 0; for (unsigned int r=0; r<hypothesesReads.size(); ++r) { hypothesesReads[r] = read; // add hypothesis sequence to prefix for (base=0; base<Hypotheses[r].length() and base<(2*(unsigned int)flow_order.num_flows()-prefix_size); base++) hypothesesReads[r].sequence.push_back(Hypotheses[r][base]); // get last main incorporating flow int last_incorporating_flow = 0; base = 0; flow = 0; while (base<hypothesesReads[r].sequence.size() and flow<flow_order.num_flows()){ while (flow_order.nuc_at(flow) != hypothesesReads[r].sequence[base]) flow++; last_incorporating_flow = flow; if (last_incorporating_flow > max_last_flow) max_last_flow = last_incorporating_flow; base++; } // Simulate sequence dpTreephaser.Simulate(hypothesesReads[r], flow_order.num_flows()); // Adaptively normalize each hypothesis if (applyNormalization>0) { steps = last_incorporating_flow / window_size; dpTreephaser.WindowedNormalize(hypothesesReads[r], steps, window_size); } // Solver simulates beginning of the read and then fills in the remaining clipped bases dpTreephaser.Solve(hypothesesReads[r], flow_order.num_flows(), last_incorporating_flow); // Store predictions and adaptively normalized measurements predictions[r] = hypothesesReads[r].prediction; normalizedMeasurements[r] = hypothesesReads[r].normalized_measurements; } // --- Calculating distances --- // Include only flow values in the distance where the predictions differ by more than "threshold" float threshold = 0.05; // Do not include flows after main inc. flow of lastest hypothesis for (int flow=0; flow<(max_last_flow+1); ++flow) { bool includeFlow = false; for (unsigned int hyp=1; hyp<hypothesesReads.size(); ++hyp) if (abs(hypothesesReads[hyp].prediction[flow] - hypothesesReads[0].prediction[flow])>threshold) includeFlow = true; if (includeFlow) { for (unsigned int hyp=0; hyp<hypothesesReads.size(); ++hyp) { float residual = hypothesesReads[hyp].normalized_measurements[flow] - hypothesesReads[hyp].prediction[flow]; DistanceObserved[hyp] += residual * residual; if (hyp>0) { residual = hypothesesReads[0].prediction[flow] - hypothesesReads[hyp].prediction[flow]; DistanceHypotheses[hyp-1] += residual * residual; } } } } // --- verbose --- if (verbose>0){ Rprintf("Calculating distances between %d hypotheses starting at flow %d:\n", Hypotheses.size(), startFlow); for (unsigned int i=0; i<Hypotheses.size(); ++i){ for (unsigned int j=0; j<Hypotheses[i].length(); ++j) Rprintf("%c", Hypotheses[i][j]); Rprintf("\n"); } Rprintf("Solved read prefix: "); for (unsigned int j=0; j<prefix_size; ++j) Rprintf("%c", read.sequence[j]); Rprintf("\n"); Rprintf("Extended Hypotheses reads to:\n"); for (unsigned int i=0; i<hypothesesReads.size(); ++i){ for (unsigned int j=0; j<hypothesesReads[i].sequence.size(); ++j) Rprintf("%c", hypothesesReads[i].sequence[j]); Rprintf("\n"); } Rprintf("Calculated Distances d2(obs, H_i), d2(H_i, H_0):\n"); Rprintf("%f, 0\n", DistanceObserved[0]); for (unsigned int i=1; i<Hypotheses.size(); ++i) Rprintf("%f, %f\n", DistanceObserved[i], DistanceHypotheses[i-1]); } // --------------- */ }