void InputStation::GetTimeSeriesData(time_t time, string type, int *nRow, float **data) { Measurement *m = m_measurement[type]; *nRow = m->NumberOfSites(); //cout << type << "\t" << *nRow << endl; *data = m->GetSiteDataByTime(time); }
double do_rand_gen(int size_input, mpz_t *vec, mpz_t prime) { Measurement m; m.begin_with_init(); crypto->get_random_vec_priv(size_input, vec, prime); m.end(); return m.get_ru_elapsed_time(); }
double do_decrypt(int size_input, mpz_t *plain, mpz_t *cipher, int prime_size, mpz_t prime) { Measurement m; m.begin_with_init(); for (int i=0; i<size_input; i++) crypto->elgamal_dec(plain[i], cipher[2*i], cipher[2*i+1]); m.end(); return m.get_ru_elapsed_time(); }
Measurement::Measurement(QObject * parent) : QObject(parent) { m_callback = vtkCallbackCommand::New(); m_callback->SetClientData(this); m_callback->SetCallback([](vtkObject *caller, unsigned long eid, void *clientdata, void *calldata) { Measurement* self = static_cast<Measurement*>(clientdata); self->update(); }); }
// ======== cost of ciphertext operations ====== double do_encrypt(int size_input, mpz_t *plain, mpz_t *cipher, int prime_size, mpz_t prime) { Measurement m; crypto->get_random_vec_priv(size_input, plain, prime); m.begin_with_init(); for (int i=0; i<size_input; i++) crypto->elgamal_enc(cipher[2*i], cipher[2*i+1], plain[i]); m.end(); return m.get_ru_elapsed_time(); }
double do_regular_ciphermul(int size_input, mpz_t *plain, mpz_t *cipher, int prime_size, mpz_t prime) { mpz_t out1, out2; alloc_init_scalar(out1); alloc_init_scalar(out2); Measurement m; m.begin_with_init(); crypto->dot_product_regular_enc(size_input, cipher, plain, out1, out2); m.end(); clear_scalar(out1); clear_scalar(out2); return m.get_ru_elapsed_time(); }
void Entity::init(int id, Measurement &meas) { occluded_dead_age_ = 10; id_ = id; age_ = 0; tracker_.set_measurement(meas.position()); }
double do_div(int size_input, mpz_t *vec1, mpz_t *vec2, mpz_t *vec3, int operand_size1, int operand_size2, mpz_t prime) { Measurement m; crypto->get_random_vec_priv(size_input, vec1, operand_size1); crypto->get_random_vec_priv(size_input, vec2, operand_size2); m.begin_with_init(); for (int i=0; i<size_input; i++) { mpz_invert(vec3[i], vec2[i], prime); mpz_mul(vec3[i], vec3[i], vec1[i]); mpz_mod(vec3[i], vec3[i], prime); } m.end(); return m.get_ru_elapsed_time(); }
void G2ODriver::AddEdge(int edgeId, Vertex* point, Vertex* keyFrame, const Measurement& meas) { Edge* e = nullptr; switch ( meas.GetType() ) { case Measurement::LEFT : // Left-only measurements e = BuildMonoEdge( {meas.GetKeypoints()[0].pt.x, meas.GetKeypoints()[0].pt.y} ); break; case Measurement::STEREO : // Stereo measurements (x1,y1,x2) as cameras are rectified, left and right meas share the same y-axis e = BuildStereoEdge( {meas.GetKeypoints()[0].pt.x, meas.GetKeypoints()[0].pt.y, meas.GetKeypoints()[1].pt.x} ); break; case Measurement::RIGHT : // Right-only measurements e = BuildMonoEdgeRight( {meas.GetKeypoints()[0].pt.x, meas.GetKeypoints()[0].pt.y} ); break; default: // TODO throw an exception (tfischer) assert( false ); break; } e->vertices()[0] = point; e->vertices()[1] = keyFrame; g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; e->setRobustKernel(rk); rk->setDelta(robust_cost_function_delta_); e->setId( edgeId ); optimizer_.addEdge( e ); }
void MultiSolver::solveWithPlanes(SolverInput *input, int currentIdx) { if ((size_t)currentIdx == input->measurements.size()) { inputList.push_back(MultiSolverInput(input)); //end condition return; } Measurement *measurement = input->measurements[currentIdx]; measurement->resetIterator(); while(measurement->nextVBeacon()) { // check measured distance is long enough to reflect corresponding wall if (!measurement->isValidDistance()) continue; // check optimization if (condition.cutBranch1) { if (checkCutBranch1(input, currentIdx)) { continue; } } // optimization 2 is not added #if 0 if (condition.cutBranch2) { if (checkBranchCutoffCylinder(input, currentIdx)) { if (condition.gatherData) input->setInvalidButGather(1); else continue; } } #endif solveWithPlanes(input, currentIdx + 1); } }
// ======== cost of plaintext operations ========= double do_field_mult(int size_input, mpz_t *vec1, mpz_t *vec2, mpz_t *vec3, mpz_t prime) { Measurement m; crypto->get_random_vec_priv(size_input, vec1, prime); crypto->get_random_vec_priv(size_input, vec2, prime); m.begin_with_init(); for (int i=0; i<size_input; i++) { mpz_mul(vec3[i], vec1[i], vec2[i]); mpz_mod(vec3[i], vec3[i], prime); } for (int i=0; i<size_input; i++) { mpz_add(vec3[i], vec1[i], vec2[i]); mpz_mod(vec3[i], vec3[i], prime); } m.end(); return m.get_ru_elapsed_time(); }
Measurement* getMeasurement(bhep::hit& hit) { EVector hit_pos(2,0); EVector meas_pos(3,0); meas_pos[0] = hit.x()[0]; meas_pos[1] = hit.x()[1]; meas_pos[2] = hit.x()[2]; hit_pos[0] = meas_pos[0]; hit_pos[1] = meas_pos[1]; //covariance and meastype hardwired for now. EMatrix cov(2,2,0); cov[0][0] = 1.; cov[1][1] = 1.; string meastype = "xy"; Measurement* me = new Measurement(); me->set_name(meastype); me->set_hv(HyperVector(hit_pos,cov)); me->set_name("volume", "Detector"); me->set_position( meas_pos ); //Add the hit energy deposit as a key to the Measurement. const dict::Key Edep = "E_dep"; const dict::Key EdepVal = hit.data("E_dep"); me->set_name(Edep, EdepVal); return me; }
double CTRCalibration::ComputeSingleShapeError(CTR* robot, MechanicsBasedKinematics* kinematics, const Measurement& meas, double& max_error_current, bool solveKin) { double rotation[3] = {0}; double translation[3] = {0}; double relativeConf[5]; memcpy(relativeConf, meas.GetConfiguration().data(), sizeof(double) * 5); relativeConf[0] *= M_PI/180.0; relativeConf[1] *= M_PI/180.0; relativeConf[3] *= M_PI/180.0; ::std::vector<::Eigen::Vector3d> positionsAlongRobotExp = meas.GetShapeEig(); ::std::vector<::Eigen::Vector3d> positionsAlongRobotModel; ::Eigen::Vector3d error; MechanicsBasedKinematics::RelativeToAbsolute(robot, relativeConf, rotation, translation); ::std::vector<double> robot_length_parameter = meas.GetArcLength(); if(solveKin) if(!kinematics->ComputeKinematics(rotation, translation)) return -1.0; kinematics->GetRobotShape(robot_length_parameter, positionsAlongRobotModel); double sum = 0; for(int i = 0; i < positionsAlongRobotModel.size(); ++i) { error = positionsAlongRobotExp[i] - positionsAlongRobotModel[i]; if (error.norm() > max_error_current) max_error_current = error.norm(); sum += error.norm() * error.norm(); } return sum/positionsAlongRobotModel.size(); //error = positionsAlongRobotExp[positionsAlongRobotExp.size() - 1] - positionsAlongRobotModel[positionsAlongRobotModel.size() - 1]; //max_error_current = error.norm(); //return max_error_current; }
void BasicDistributions(){ Measurement *basics = new Measurement("basics","_FSQJets3_2015C_data_13TeV_LowPU"); basics->IncludeFunction(pt); basics->IncludeFunction(cor); basics->IncludeFunction(unc); basics->IncludeFunction(eta); basics->IncludeFunction(rap); basics->IncludeFunction(phi); basics->IncludeObject(incl); basics->IncludeSample(merged_pt35_eta2d1); basics->ReadDataset(data); basics->CalculateResult(pt_spectrum); basics->WriteToFile("./basics_", 1 /* 1 - only results, 2 - results and observables */); };
void VectorMeas::writeXML(XMLWriter& w) { // Vector w.tagStart(w.buildTagName(SWE_PREFIX, ELT_VECTOR)); // definition attribute char defUri[80]; if (this->Definition != NULL) { buildDefUrl(this->Definition, defUri); w.tagField(w.buildTagName(NULL, ATT_DEFINITION), defUri); } // ref frame attribute if (this->RefFrame != NULL) { buildDefUrl(this->RefFrame, defUri); w.tagField(w.buildTagName(NULL, ATT_REFRAME), defUri); } w.tagEnd(true, false); // label if (this->Label != NULL) w.writeNode(w.buildTagName(SWE_PREFIX, ELT_LABEL), this->Label); // coordinates for (int i = 0; i < numCoords; i++) { Measurement* m = coords[i]; w.tagStart(w.buildTagName(SWE_PREFIX, ELT_COORDINATE)); w.tagField(w.buildTagName(NULL, ATT_NAME), m->getName()); w.tagEnd(true, false); m->writeXML(w); w.tagClose(); } w.tagClose(); }
bool write(const std::string& filename, const Measurement& msr) { // save image { std::string filename_image = filename + ".rgbd"; std::ofstream f_out; f_out.open(filename_image.c_str(), std::ifstream::binary); if (f_out.is_open()) { tue::serialization::OutputArchive a_out(f_out); rgbd::serialize(*msr.image(), a_out); } else { std::cout << "Could not save to " << filename_image << std::endl; } } // save mask { std::string filename_mask = filename + ".mask"; std::ofstream f_out; f_out.open(filename_mask.c_str(), std::ifstream::binary); if (f_out.is_open()) { tue::serialization::OutputArchive a_out(f_out); ed::serialize(msr.imageMask(), a_out); } else { std::cout << "Could not save to " << filename_mask << std::endl; } } return true; }
void RtSss::update(Subject* pSubject) { Measurement* meas = static_cast<Measurement*>(pSubject); //MEG if(!meas->isSingleChannel() && m_bReceiveData) { RealTimeMultiSampleArrayNew* pRTMSANew = static_cast<RealTimeMultiSampleArrayNew*>(pSubject); if(pRTMSANew->getID() == MSR_ID::MEGMNERTCLIENT_OUTPUT) { //Check if buffer initialized if(!m_pRtSssBuffer) { m_pRtSssBuffer = CircularMatrixBuffer<double>::SPtr(new CircularMatrixBuffer<double>(64, pRTMSANew->getNumChannels(), pRTMSANew->getMultiArraySize())); Buffer::SPtr t_buf = m_pRtSssBuffer.staticCast<Buffer>();// unix fix setAcceptorMeasurementBuffer(pRTMSANew->getID(), t_buf); } //Fiff information if(!m_pFiffInfo) m_pFiffInfo = pRTMSANew->getFiffInfo(); MatrixXd t_mat(pRTMSANew->getNumChannels(), pRTMSANew->getMultiArraySize()); //ToDo: Cast to specific Buffer for(unsigned char i = 0; i < pRTMSANew->getMultiArraySize(); ++i) t_mat.col(i) = pRTMSANew->getMultiSampleArray()[i]; getAcceptorMeasurementBuffer(pRTMSANew->getID()).staticCast<CircularMatrixBuffer<double> >() ->push(&t_mat); } } }
void VectorMeas::addCoordinate(const char* axisID, const char* uom, const char* label, const char* type) { Measurement* m = new Measurement(); m->setName(axisID); m->setAxisID(axisID); m->setType((type != NULL) ? type : QUANTITY); m->setLabel(label); m->setUom(uom); this->coords[numCoords++] = m; }
void PileUpCalculation(){ Measurement *pu = new Measurement("pu","_FSQJets3_2015C_data_13TeV_LowPU"); pu->IncludeObject(empty); pu->IncludeFunction(npv_distrib); pu->IncludeSample(plane); pu->ReadDataset(data); pu->WriteToFile("./pu", 2); };
static void measurementUpdate(Filter& filter, const Measurement& m) { Scalar weightSum = 0; for(auto& p : filter.particles) { //~ std::cout << "p.state: " << p.state.transpose() << std::endl; p.weight = math::normalProbabilityUnscaled(m.measurement(p.state), m.z, m.R); //~ std::cout << "p.weight: " << p.weight << std::endl; weightSum += p.weight; } Scalar scalingFactor = 1.0 / weightSum; filter.maxWeight = 0; filter.state.setZero(); for(auto& p : filter.particles) { p.weight *= scalingFactor; if(p.weight > filter.maxWeight) { filter.maxWeight = p.weight; } filter.state += p.weight * p.state; } resample(filter); }
/** * For measurement based scaling, we average the scale factors across the different marker pairs used. * For each marker pair, the scale factor is computed by dividing the average distance between the pair * in the experimental marker data by the distance between the pair on the model. */ double ModelScaler::computeMeasurementScaleFactor(const SimTK::State& s, const Model& aModel, const MarkerData& aMarkerData, const Measurement& aMeasurement) const { double scaleFactor = 0; cout << "Measurement '" << aMeasurement.getName() << "'" << endl; if(aMeasurement.getNumMarkerPairs()==0) return SimTK::NaN; for(int i=0; i<aMeasurement.getNumMarkerPairs(); i++) { const MarkerPair& pair = aMeasurement.getMarkerPair(i); string name1, name2; pair.getMarkerNames(name1, name2); double modelLength = takeModelMeasurement(s, aModel, name1, name2, aMeasurement.getName()); double experimentalLength = takeExperimentalMarkerMeasurement(aMarkerData, name1, name2, aMeasurement.getName()); if(SimTK::isNaN(modelLength) || SimTK::isNaN(experimentalLength)) return SimTK::NaN; cout << "\tpair " << i << " (" << name1 << ", " << name2 << "): model = " << modelLength << ", experimental = " << experimentalLength << endl; scaleFactor += experimentalLength / modelLength; } scaleFactor /= aMeasurement.getNumMarkerPairs(); cout << "\toverall scale factor = " << scaleFactor << endl; return scaleFactor; }
int main(int argc, char** argv) { Timing t; int i; // TODO: debug should be enabled with a command-line option // FILELog::ReportingLevel() = FILELog::FromString("DEBUG4"); // FILELog::ReportingLevel() = FILELog::FromString("DEBUG1"); FILELog::ReportingLevel() = FILELog::FromString("INFO"); FILE_LOG(logDEBUG4) << "starting"; t.Start(); try { Picoscope6000 *pico = new Picoscope6000(); Measurement *meas = new Measurement(pico); Channel *ch[4]; meas->SetTimebaseInPs(400); meas->EnableChannels(true,false,false,false); for(i=0; i<PICOSCOPE_N_CHANNELS; i++) { ch[i] = meas->GetChannel(i); FILE_LOG(logDEBUG4) << "main - Channel " << (char)('A'+i) << " has index " << ch[i]->GetIndex(); } Args x; x.parse_options(argc, argv, meas); if(x.IsJustHelp()) { return 0; } if(x.GetFilename() == NULL) { // TODO: maybe we want to use just text file throw("You have to provide some filename using '--name <filename>'.\n"); } // meas->SetTimebaseInPs(10000); // TODO: fixme for(i=0; i<PICOSCOPE_N_CHANNELS; i++) { ch[i]->SetVoltage(x.GetVoltage()); } // a->SetVoltage(U_100mV); // a[0]->SetVoltage(x.GetVoltage()); // meas->SetLength(GIGA(1)); meas->SetLength(x.GetLength()); if(x.GetNTraces() > 1) { meas->SetNTraces(x.GetNTraces()); // TODO: fix trigger FILE_LOG(logDEBUG4) << "main - checking for triggered events"; if(x.IsTriggered()) { for(i=0; i<PICOSCOPE_N_CHANNELS && !(ch[i]->IsEnabled()); i++); FILE_LOG(logDEBUG4) << "main - will trigger on channel " << (char)('A'+i); meas->SetTrigger(x.GetTrigger(ch[i])); } meas->AllocateMemoryRapidBlock(MEGA(50)); } else { meas->AllocateMemoryBlock(MEGA(50)); } // std::cerr << "test w5\n"; // it only makes sense to measure if we decided to use some positive number of samples if(x.GetLength()>0) { FILE *f = NULL; FILE *fb[4] = {NULL,NULL,NULL,NULL}, *ft[4] = {NULL,NULL,NULL,NULL}; struct tm *current; time_t now; time(&now); current = localtime(&now); for(i=0; i<PICOSCOPE_N_CHANNELS; i++) { if(ch[i]->IsEnabled()) { if(x.IsTextOutput()) { ft[i] = fopen(x.GetFilenameText(i), "wt"); if(ft[i] == NULL) { throw("Unable to open text file.\n"); // TODO: write filename } } if(x.IsBinaryOutput()) { fb[i] = fopen(x.GetFilenameBinary(i), "wb"); if(fb[i] == NULL) { throw("Unable to open binary file.\n"); // TODO: write filename } } } } /************************************************************/ double tmp_dbl; short tmp_short; pico->Open(); meas->InitializeSignalGenerator(); meas->RunBlock(); /* metadata */ f = fopen(x.GetFilenameMeta(), "wt"); if(f == NULL) { throw("Unable to open file with metadata.\n"); } fprintf(f, "command: "); for(i=0; i<argc; i++) { fprintf(f, " %s", argv[i]); } fprintf(f, "\n"); fprintf(f, "timestamp: %d-%02d-%02d %02d:%02d:%02d\n\n", current->tm_year+1900, current->tm_mon+1, current->tm_mday, current->tm_hour, current->tm_min, current->tm_sec); fprintf(f, "channels: "); for(i=0; i<PICOSCOPE_N_CHANNELS; i++) { if(ch[i]->IsEnabled()) { fprintf(f, "%c", 'A'+i); } } fprintf(f, "\n"); fprintf(f, "length: %ld\n", x.GetLength()); fprintf(f, "samples: %ld\n", x.GetNTraces()); // fprintf(f, "unit_x: %.1lf ns | %.1lf ns\n", meas->GetTimebaseInNs(), meas->GetReportedTimebaseInNs()); tmp_dbl = meas->GetTimebaseInNs(); fprintf(f, "unit_x: %.1lf ns\n", tmp_dbl); fprintf(f, "range_x: %.1lf ns\n", x.GetLength()*tmp_dbl); tmp_dbl = x.GetVoltageDouble(); fprintf(f, "unit_y: %.10le V\n", tmp_dbl*3.0757874015748e-5); // 1/(127*256) actually, but this might have to be fixed for series 4000 fprintf(f, "range_y: %g V\n", tmp_dbl); if(x.GetNTraces() > 1) { if(x.IsTriggered()) { fprintf(f, "trigger_ch: %c\n", (char)(meas->GetTrigger()->GetChannel()->GetIndex()+'A')); // fprintf(f, "trigger_xfrac: %g\n", meas->GetTrigger()->GetXFraction()); // fprintf(f, "trigger_yfrac: %g\n", meas->GetTrigger()->GetYFraction()); fprintf(f, "trigger_dx: %d (%g %% of %ld)\n", meas->GetLengthBeforeTrigger(), meas->GetLengthBeforeTrigger()*100.0/x.GetLength(), x.GetLength()); tmp_short = meas->GetTrigger()->GetThreshold(); tmp_dbl = meas->GetTrigger()->GetYFraction(); fprintf(f, "trigger_dy: %g V (%d)\n", meas->GetTrigger()->GetThresholdInVolts() /*tmp_dbl*x.GetVoltageDouble()*/, tmp_short); } } // fprintf(f, "unit_x: %.1lf ns | %.1lf ns\n", meas->GetTimebaseInNs(), meas->GetReportedTimebaseInNs()); fprintf(f, "out_bin: %s\n", x.IsBinaryOutput() ? "yes" : "no"); fprintf(f, "out_dat: %s\n", x.IsTextOutput() ? "yes" : "no"); // triggered (TODO: we could also ask for a single triggered event) if(x.GetNTraces() > 1) { unsigned int run=0; for(run=0; run<x.GetNRepeats() && !_kbhit(); run++) { //FILE_LOG(logINFO) << "Running experiment nr. " << run+1; if(run>0) { cerr << "\nRepeat #" << run+1 << endl; meas->RunBlock(); } while(meas->GetNextDataBulk() > 0) { for(i=0; i<PICOSCOPE_N_CHANNELS; i++) { if(ch[i]->IsEnabled()) { if(x.IsTextOutput()) { meas->WriteDataTxt(ft[i], i); // zero for channel A } if(x.IsBinaryOutput()) { meas->WriteDataBin(fb[i], i); // zero for channel A } } } } } if(run>1) { fprintf(f, "repeats: %u\n", run); } // tmp_dbl = meas->GetRatePerSecond(); // fprintf(f, "\nrate: \n"); // if(fabs(tmp_dbl) > 1e6) { // fprintf(f, "%.3f MS/s\n", tmp_dbl*1e-6); // } else if(fabs(tmp_dbl) > 1e3) { // fprintf(f, "%.3f kS/s\n", tmp_dbl*1e-3); // } else { // fprintf(f, "%f S/s\n", tmp_dbl); // } } else { unsigned int run=0; for(run=0; run<x.GetNRepeats() && !_kbhit(); run++) { if(run>0) { cerr << "\nRepeat #" << run+1 << endl; meas->RunBlock(); } while(meas->GetNextData() > 0) { for(i=0; i<PICOSCOPE_N_CHANNELS; i++) { if(ch[i]->IsEnabled()) { if(x.IsTextOutput()) { meas->WriteDataTxt(ft[i], i); // zero for channel A } if(x.IsBinaryOutput()) { meas->WriteDataBin(fb[i], i); // zero for channel A } } } } } if(run>1) { fprintf(f, "repeats: %u\n", run); } } fclose(f); for(i=0; i<PICOSCOPE_N_CHANNELS; i++) { if(ft[i] != NULL) { fclose(ft[i]); } if(fb[i] != NULL) { fclose(fb[i]); } } // apparently this doesn't work for some weird reason // meas->RunBlock(); meas->GetNextData(); // meas->RunBlock(); meas->GetNextData(); // meas->RunBlock(); meas->GetNextData(); // meas->RunBlock(); meas->GetNextData(); pico->Close(); t.Stop(); cerr << "Timing: " << t.GetSecondsDouble() << "s\n"; } delete pico; pico = NULL; delete meas; meas = NULL; } catch(Picoscope::PicoscopeException& ex) { cerr << "Some picoscope exception:" << endl << "Error number " << ex.GetErrorNumber() << ": " << ex.GetErrorMessage() << endl << '(' << ex.GetVerboseErrorMessage() << ')' << endl; try { // pico.Close(); } catch(...) {} } catch(Picoscope::PicoscopeUserException& ex) { cerr << "Some exception:" << endl << ex.GetErrorMessage() << endl; // catch any exceptions } catch(const char* s) { cerr << "Some exception has occurred:\n" << s << endl; } catch (std::bad_alloc& ba) { std::cerr << "Exception: bad_alloc: " << ba.what() << endl; } catch (const std::exception &exc) { // catch anything thrown within try block that derives from std::exception std::cerr << "Exception: " << exc.what() << endl; } catch(...) { cerr << "Some exception has occurred" << endl; } return 0; }
void MergingWeightsCalculation(){ Measurement *weight = new Measurement("weight","_FSQJets3_2015C_data_13TeV_LowPU"); weight->IncludeObject(all_incl); weight->IncludeObject(all_mn); weight->IncludeFunction(drap); weight->IncludeSample(central_pt35); weight->IncludeSample(central_no_fwd_pt35_eta2d1); weight->IncludeSample(fwd_pt35_eta2d1); weight->IncludeSample(merged_pt35_eta2d1); weight->ReadDataset(data); weight->CalculateResult(forward_incl_pt35_eta2d1_weight); weight->CalculateResult(forward_mn_pt35_eta2d1_weight); weight->WriteToFile("./weight", 2 /* 1 - only results, 2 - results and observables */); };
void EfficiencyCalculation(){ Measurement *eff = new Measurement("eff","_Min_Bias_2015C_data_13TeV_LowPU"); //Measurement *eff = new Measurement("eff","_Min_Bias_2016H_data_13TeV_LowPU"); eff->IncludeObject(all_incl); eff->IncludeFunction(pt_sublead); eff->IncludeFunction(pt_sublead_fwd3); eff->IncludeFunction(pt_sublead_fwd1d3); eff->IncludeSample(central_trg); //eff->IncludeSample(forward2_trg); eff->IncludeSample(forward3_trg); eff->IncludeSample(unbiased_central); //eff->IncludeSample(unbiased_forward2); eff->IncludeSample(unbiased_forward3); eff->ReadDataset(min_bias_2015); eff->CalculateResult(eff_pt_sublead_cntr_trg); eff->CalculateResult(eff_pt_sublead_fwd3_cntr_trg); eff->CalculateResult(eff_pt_sublead_fwd1d3_cntr_trg); //eff->CalculateResult(eff_pt_sublead_forward2_trg); eff->CalculateResult(eff_pt_sublead_forward3_trg); eff->WriteToFile("./eff", 2 /* 1 - only results, 2 - results and observables */); };
void Ekf::correct(const Measurement &measurement) { if (getDebug()) { *debugStream_ << "---------------------- Ekf::correct ----------------------\n"; *debugStream_ << "Measurement is:\n"; *debugStream_ << measurement.measurement_ << "\n"; *debugStream_ << "Measurement covariance is:\n"; *debugStream_ << measurement.covariance_ << "\n"; } // We don't want to update everything, so we need to build matrices that only update // the measured parts of our state vector // First, determine how many state vector values we're updating std::vector<size_t> updateIndices; for (size_t i = 0; i < measurement.updateVector_.size(); ++i) { if (measurement.updateVector_[i]) { // Handle nan and inf values in measurements if (std::isnan(measurement.measurement_(i)) || std::isnan(measurement.covariance_(i,i))) { if (getDebug()) { *debugStream_ << "Value at index " << i << " was nan. Excluding from update.\n"; } } else if (std::isinf(measurement.measurement_(i)) || std::isinf(measurement.covariance_(i,i))) { if (getDebug()) { *debugStream_ << "Value at index " << i << " was inf. Excluding from update.\n"; } } else { updateIndices.push_back(i); } } } if (getDebug()) { *debugStream_ << "Update indices are:\n"; *debugStream_ << updateIndices << "\n"; } size_t updateSize = updateIndices.size(); // Now set up the relevant matrices Eigen::VectorXd stateSubset(updateSize); // x (in most literature) Eigen::VectorXd measurementSubset(updateSize); // z Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize); // R Eigen::MatrixXd stateToMeasurementSubset(updateSize, state_.rows()); // H Eigen::MatrixXd kalmanGainSubset(state_.rows(), updateSize); // K Eigen::VectorXd innovationSubset(updateSize); // z - Hx stateSubset.setZero(); measurementSubset.setZero(); measurementCovarianceSubset.setZero(); stateToMeasurementSubset.setZero(); kalmanGainSubset.setZero(); innovationSubset.setZero(); // Now build the sub-matrices from the full-sized matrices for (size_t i = 0; i < updateSize; ++i) { measurementSubset(i) = measurement.measurement_(updateIndices[i]); stateSubset(i) = state_(updateIndices[i]); for (size_t j = 0; j < updateSize; ++j) { measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]); } // Handle negative (read: bad) covariances in the measurement. Rather // than exclude the measurement or make up a covariance, just take // the absolute value. if (measurementCovarianceSubset(i, i) < 0) { if (getDebug()) { *debugStream_ << "WARNING: Negative covariance for index " << i << " of measurement (value is" << measurementCovarianceSubset(i, i) << "). Using absolute value...\n"; } measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i)); } // If the measurement variance for a given variable is very // near 0 (as in e-50 or so) and the variance for that // variable in the covariance matrix is also near zero, then // the Kalman gain computation will blow up. Really, no // measurement can be completely without error, so add a small // amount in that case. if (measurementCovarianceSubset(i, i) < 1e-6) { measurementCovarianceSubset(i, i) = 1e-6; if (getDebug()) { *debugStream_ << "WARNING: measurement had very small error covariance for index " << i << ". Adding some noise to maintain filter stability.\n"; } } } // The state-to-measurement function, h, will now be a measurement_size x full_state_size // matrix, with ones in the (i, i) locations of the values to be updated for (size_t i = 0; i < updateSize; ++i) { stateToMeasurementSubset(i, updateIndices[i]) = 1; } if (getDebug()) { *debugStream_ << "Current state subset is:\n"; *debugStream_ << stateSubset << "\n"; *debugStream_ << "Measurement subset is:\n"; *debugStream_ << measurementSubset << "\n"; *debugStream_ << "Measurement covariance subset is:\n"; *debugStream_ << measurementCovarianceSubset << "\n"; *debugStream_ << "State-to-measurement subset is:\n"; *debugStream_ << stateToMeasurementSubset << "\n"; } // (1) Compute the Kalman gain: K = (PH') / (HPH' + R) kalmanGainSubset = estimateErrorCovariance_ * stateToMeasurementSubset.transpose() * (stateToMeasurementSubset * estimateErrorCovariance_ * stateToMeasurementSubset.transpose() + measurementCovarianceSubset).inverse(); // (2) Apply the gain to the difference between the state and measurement: x = x + K(z - Hx) innovationSubset = (measurementSubset - stateSubset); // Wrap angles in the innovation for (size_t i = 0; i < updateSize; ++i) { if (updateIndices[i] == StateMemberRoll || updateIndices[i] == StateMemberPitch || updateIndices[i] == StateMemberYaw) { if (innovationSubset(i) < -pi_) { innovationSubset(i) += tau_; } else if (innovationSubset(i) > pi_) { innovationSubset(i) -= tau_; } } } state_ = state_ + kalmanGainSubset * innovationSubset; // (3) Update the estimate error covariance using the Joseph form: (I - KH)P(I - KH)' + KRK' Eigen::MatrixXd gainResidual = identity_ - kalmanGainSubset * stateToMeasurementSubset; estimateErrorCovariance_ = gainResidual * estimateErrorCovariance_ * gainResidual.transpose() + kalmanGainSubset * measurementCovarianceSubset * kalmanGainSubset.transpose(); // Handle wrapping of angles wrapStateAngles(); if (getDebug()) { *debugStream_ << "Kalman gain subset is:\n"; *debugStream_ << kalmanGainSubset << "\n"; *debugStream_ << "Innovation:\n"; *debugStream_ << innovationSubset << "\n\n"; *debugStream_ << "Corrected full state is:\n"; *debugStream_ << state_ << "\n"; *debugStream_ << "Corrected full estimate error covariance is:\n"; *debugStream_ << estimateErrorCovariance_ << "\n"; *debugStream_ << "Last measurement time is:\n"; *debugStream_ << std::setprecision(20) << lastMeasurementTime_ << "\n"; *debugStream_ << "\n---------------------- /Ekf::correct ----------------------\n"; } }
Entity::Entity() { Measurement m; m.set_position(Eigen::Vector2d(-1,-1)); this->init(-1,m); }
void FilterBase::processMeasurement(const Measurement &measurement) { if (debug_) { *debugStream_ << "------ FilterBase::processMeasurement ------\n"; } double delta = 0.0; // If we've had a previous reading, then go through the predict/update // cycle. Otherwise, set our state and covariance to whatever we get // from this measurement. if (initialized_) { // Determine how much time has passed since our last measurement delta = measurement.time_ - lastMeasurementTime_; if (debug_) { *debugStream_ << "Filter is already initialized. Carrying out predict/correct loop...\n"; *debugStream_ << "Measurement time is " << std::setprecision(20) << measurement.time_ << ", last measurement time is " << lastMeasurementTime_ << ", delta is " << delta << "\n"; } // Only want to carry out a prediction if it's // forward in time. Otherwise, just correct. if (delta > 0) { validateDelta(delta); predict(delta); // Return this to the user predictedState_ = state_; } correct(measurement); } else { if (debug_) { *debugStream_ << "First measurement. Initializing filter.\n"; } // Initialize the filter, but only with the values we're using size_t measurementLength = measurement.updateVector_.size(); for(size_t i = 0; i < measurementLength; ++i) { state_[i] = (measurement.updateVector_[i] ? measurement.measurement_[i] : state_[i]); } // Same for covariance for(size_t i = 0; i < measurementLength; ++i) { for(size_t j = 0; j < measurementLength; ++j) { estimateErrorCovariance_(i, j) = (measurement.updateVector_[i] && measurement.updateVector_[j] ? measurement.covariance_(i, j) : estimateErrorCovariance_(i, j)); } } initialized_ = true; } if(delta >= 0.0) { // Update the last measurement and update time. // The measurement time is based on the time stamp of the // measurement, whereas the update time is based on this // node's current ROS time. The update time is used to // determine if we have a sensor timeout, whereas the // measurement time is used to calculate time deltas for // prediction and correction. lastMeasurementTime_ = measurement.time_; } if (debug_) { *debugStream_ << "\n----- /FilterBase::processMeasurement ------\n"; } }
void Ukf::correct(const Measurement &measurement) { FB_DEBUG("---------------------- Ukf::correct ----------------------\n" << "State is:\n" << state_ << "\nMeasurement is:\n" << measurement.measurement_ << "\nMeasurement covariance is:\n" << measurement.covariance_ << "\n"); // In our implementation, it may be that after we call predict once, we call correct // several times in succession (multiple measurements with different time stamps). In // that event, the sigma points need to be updated to reflect the current state. if(!uncorrected_) { // Take the square root of a small fraction of the estimateErrorCovariance_ using LL' decomposition weightedCovarSqrt_ = ((STATE_SIZE + lambda_) * estimateErrorCovariance_).llt().matrixL(); // Compute sigma points // First sigma point is the current state sigmaPoints_[0] = state_; // Next STATE_SIZE sigma points are state + weightedCovarSqrt_[ith column] // STATE_SIZE sigma points after that are state - weightedCovarSqrt_[ith column] for(size_t sigmaInd = 0; sigmaInd < STATE_SIZE; ++sigmaInd) { sigmaPoints_[sigmaInd + 1] = state_ + weightedCovarSqrt_.col(sigmaInd); sigmaPoints_[sigmaInd + 1 + STATE_SIZE] = state_ - weightedCovarSqrt_.col(sigmaInd); } } // We don't want to update everything, so we need to build matrices that only update // the measured parts of our state vector // First, determine how many state vector values we're updating std::vector<size_t> updateIndices; for (size_t i = 0; i < measurement.updateVector_.size(); ++i) { if (measurement.updateVector_[i]) { // Handle nan and inf values in measurements if (std::isnan(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was nan. Excluding from update.\n"); } else if (std::isinf(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was inf. Excluding from update.\n"); } else { updateIndices.push_back(i); } } } FB_DEBUG("Update indices are:\n" << updateIndices << "\n"); size_t updateSize = updateIndices.size(); // Now set up the relevant matrices Eigen::VectorXd stateSubset(updateSize); // x (in most literature) Eigen::VectorXd measurementSubset(updateSize); // z Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize); // R Eigen::MatrixXd stateToMeasurementSubset(updateSize, STATE_SIZE); // H Eigen::MatrixXd kalmanGainSubset(STATE_SIZE, updateSize); // K Eigen::VectorXd innovationSubset(updateSize); // z - Hx Eigen::VectorXd predictedMeasurement(updateSize); Eigen::VectorXd sigmaDiff(updateSize); Eigen::MatrixXd predictedMeasCovar(updateSize, updateSize); Eigen::MatrixXd crossCovar(STATE_SIZE, updateSize); std::vector<Eigen::VectorXd> sigmaPointMeasurements(sigmaPoints_.size(), Eigen::VectorXd(updateSize)); stateSubset.setZero(); measurementSubset.setZero(); measurementCovarianceSubset.setZero(); stateToMeasurementSubset.setZero(); kalmanGainSubset.setZero(); innovationSubset.setZero(); predictedMeasurement.setZero(); predictedMeasCovar.setZero(); crossCovar.setZero(); // Now build the sub-matrices from the full-sized matrices for (size_t i = 0; i < updateSize; ++i) { measurementSubset(i) = measurement.measurement_(updateIndices[i]); stateSubset(i) = state_(updateIndices[i]); for (size_t j = 0; j < updateSize; ++j) { measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]); } // Handle negative (read: bad) covariances in the measurement. Rather // than exclude the measurement or make up a covariance, just take // the absolute value. if (measurementCovarianceSubset(i, i) < 0) { FB_DEBUG("WARNING: Negative covariance for index " << i << " of measurement (value is" << measurementCovarianceSubset(i, i) << "). Using absolute value...\n"); measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i)); } // If the measurement variance for a given variable is very // near 0 (as in e-50 or so) and the variance for that // variable in the covariance matrix is also near zero, then // the Kalman gain computation will blow up. Really, no // measurement can be completely without error, so add a small // amount in that case. if (measurementCovarianceSubset(i, i) < 1e-9) { measurementCovarianceSubset(i, i) = 1e-9; FB_DEBUG("WARNING: measurement had very small error covariance for index " << updateIndices[i] << ". Adding some noise to maintain filter stability.\n"); } } // The state-to-measurement function, h, will now be a measurement_size x full_state_size // matrix, with ones in the (i, i) locations of the values to be updated for (size_t i = 0; i < updateSize; ++i) { stateToMeasurementSubset(i, updateIndices[i]) = 1; } FB_DEBUG("Current state subset is:\n" << stateSubset << "\nMeasurement subset is:\n" << measurementSubset << "\nMeasurement covariance subset is:\n" << measurementCovarianceSubset << "\nState-to-measurement subset is:\n" << stateToMeasurementSubset << "\n"); // (1) Generate sigma points, use them to generate a predicted measurement for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { sigmaPointMeasurements[sigmaInd] = stateToMeasurementSubset * sigmaPoints_[sigmaInd]; predictedMeasurement += stateWeights_[sigmaInd] * sigmaPointMeasurements[sigmaInd]; } // (2) Use the sigma point measurements and predicted measurement to compute a predicted // measurement covariance matrix P_zz and a state/measurement cross-covariance matrix P_xz. for(size_t sigmaInd = 0; sigmaInd < sigmaPoints_.size(); ++sigmaInd) { sigmaDiff = sigmaPointMeasurements[sigmaInd] - predictedMeasurement; predictedMeasCovar += covarWeights_[sigmaInd] * (sigmaDiff * sigmaDiff.transpose()); crossCovar += covarWeights_[sigmaInd] * ((sigmaPoints_[sigmaInd] - state_) * sigmaDiff.transpose()); } // (3) Compute the Kalman gain, making sure to use the actual measurement covariance: K = P_xz * (P_zz + R)^-1 Eigen::MatrixXd invInnovCov = (predictedMeasCovar + measurementCovarianceSubset).inverse(); kalmanGainSubset = crossCovar * invInnovCov; // (4) Apply the gain to the difference between the actual and predicted measurements: x = x + K(z - z_hat) innovationSubset = (measurementSubset - predictedMeasurement); // (5) Check Mahalanobis distance of innovation if (checkMahalanobisThreshold(innovationSubset, invInnovCov, measurement.mahalanobisThresh_)) { // Wrap angles in the innovation for (size_t i = 0; i < updateSize; ++i) { if (updateIndices[i] == StateMemberRoll || updateIndices[i] == StateMemberPitch || updateIndices[i] == StateMemberYaw) { while (innovationSubset(i) < -PI) { innovationSubset(i) += TAU; } while (innovationSubset(i) > PI) { innovationSubset(i) -= TAU; } } } state_ = state_ + kalmanGainSubset * innovationSubset; // (6) Compute the new estimate error covariance P = P - (K * P_zz * K') estimateErrorCovariance_ = estimateErrorCovariance_.eval() - (kalmanGainSubset * predictedMeasCovar * kalmanGainSubset.transpose()); wrapStateAngles(); // Mark that we need to re-compute sigma points for successive corrections uncorrected_ = false; FB_DEBUG("Predicated measurement covariance is:\n" << predictedMeasCovar << "\nCross covariance is:\n" << crossCovar << "\nKalman gain subset is:\n" << kalmanGainSubset << "\nInnovation:\n" << innovationSubset << "\nCorrected full state is:\n" << state_ << "\nCorrected full estimate error covariance is:\n" << estimateErrorCovariance_ << "\n\n---------------------- /Ukf::correct ----------------------\n"); } }
//-------------------------------------------------------------------- RP::Trajectory* KFSvcManager::CreateTrajectory(std::vector<const Hit* > hits, std::vector<double> hitErrors) //-------------------------------------------------------------------- { log4cpp::Category& klog = log4cpp::Category::getRoot(); klog << log4cpp::Priority::DEBUG << " In CreateTrajectory --> " ; klog << log4cpp::Priority::DEBUG << "Cov Matrix --> " ; EVector m(3,0); fCov = EMatrix(3,3,0); for (size_t i=0; i<3; ++i) { fCov[i][i] = hitErrors[i]; } klog << log4cpp::Priority::DEBUG << "Cov Matrix --> " << fCov; // destroy measurements in fMeas container //stc_tools::destroy(fMeas); RP::measurement_vector fMeas; auto size = hits.size(); klog << log4cpp::Priority::DEBUG << " size of hits --> " << size; klog << log4cpp::Priority::DEBUG << " fill measurement vector --> " << size; for(auto hit : hits) { m[0]=hit->XYZ().X(); m[1]=hit->XYZ().Y(); m[2]=hit->XYZ().Z(); klog << log4cpp::Priority::DEBUG << "+++hit x = " << m[0] << " y = " << m[1] << " z = " << m[2]; klog << log4cpp::Priority::DEBUG << "Create a measurement " ; Measurement* meas = new Measurement(); string type=RP::xyz; meas->set_name(type); // the measurement type meas->set_hv(HyperVector(m,fCov,type)); // the HyperVector meas->set_deposited_energy(hit->Edep()); // the deposited energy meas->set_name(RP::setup_volume,"mother"); // the volume // the position of the measurement klog << log4cpp::Priority::DEBUG << "Create measurement plane " ; meas->set_position_hv(HyperVector(m,EMatrix(3,3,0),RP::xyz)); // Add the measurement to the vector fMeas.push_back(meas); } // add the measurements to the trajectory klog << log4cpp::Priority::INFO << " Measurement vector created " ; Trajectory* trj = new Trajectory(); trj->add_constituents(fMeas); klog << log4cpp::Priority::INFO << " Trajectory --> " << *trj ; return trj; }
void Ekf::correct(const Measurement &measurement) { FB_DEBUG("---------------------- Ekf::correct ----------------------\n" << "State is:\n" << state_ << "\n" "Topic is:\n" << measurement.topicName_ << "\n" "Measurement is:\n" << measurement.measurement_ << "\n" "Measurement topic name is:\n" << measurement.topicName_ << "\n\n" "Measurement covariance is:\n" << measurement.covariance_ << "\n"); // We don't want to update everything, so we need to build matrices that only update // the measured parts of our state vector. Throughout prediction and correction, we // attempt to maximize efficiency in Eigen. // First, determine how many state vector values we're updating std::vector<size_t> updateIndices; for (size_t i = 0; i < measurement.updateVector_.size(); ++i) { if (measurement.updateVector_[i]) { // Handle nan and inf values in measurements if (std::isnan(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was nan. Excluding from update.\n"); } else if (std::isinf(measurement.measurement_(i))) { FB_DEBUG("Value at index " << i << " was inf. Excluding from update.\n"); } else { updateIndices.push_back(i); } } } FB_DEBUG("Update indices are:\n" << updateIndices << "\n"); size_t updateSize = updateIndices.size(); // Now set up the relevant matrices Eigen::VectorXd stateSubset(updateSize); // x (in most literature) Eigen::VectorXd measurementSubset(updateSize); // z Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize); // R Eigen::MatrixXd stateToMeasurementSubset(updateSize, state_.rows()); // H Eigen::MatrixXd kalmanGainSubset(state_.rows(), updateSize); // K Eigen::VectorXd innovationSubset(updateSize); // z - Hx stateSubset.setZero(); measurementSubset.setZero(); measurementCovarianceSubset.setZero(); stateToMeasurementSubset.setZero(); kalmanGainSubset.setZero(); innovationSubset.setZero(); // Now build the sub-matrices from the full-sized matrices for (size_t i = 0; i < updateSize; ++i) { measurementSubset(i) = measurement.measurement_(updateIndices[i]); stateSubset(i) = state_(updateIndices[i]); for (size_t j = 0; j < updateSize; ++j) { measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]); } // Handle negative (read: bad) covariances in the measurement. Rather // than exclude the measurement or make up a covariance, just take // the absolute value. if (measurementCovarianceSubset(i, i) < 0) { FB_DEBUG("WARNING: Negative covariance for index " << i << " of measurement (value is" << measurementCovarianceSubset(i, i) << "). Using absolute value...\n"); measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i)); } // If the measurement variance for a given variable is very // near 0 (as in e-50 or so) and the variance for that // variable in the covariance matrix is also near zero, then // the Kalman gain computation will blow up. Really, no // measurement can be completely without error, so add a small // amount in that case. if (measurementCovarianceSubset(i, i) < 1e-9) { FB_DEBUG("WARNING: measurement had very small error covariance for index " << updateIndices[i] << ". Adding some noise to maintain filter stability.\n"); measurementCovarianceSubset(i, i) = 1e-9; } } // The state-to-measurement function, h, will now be a measurement_size x full_state_size // matrix, with ones in the (i, i) locations of the values to be updated for (size_t i = 0; i < updateSize; ++i) { stateToMeasurementSubset(i, updateIndices[i]) = 1; } FB_DEBUG("Current state subset is:\n" << stateSubset << "\nMeasurement subset is:\n" << measurementSubset << "\nMeasurement covariance subset is:\n" << measurementCovarianceSubset << "\nState-to-measurement subset is:\n" << stateToMeasurementSubset << "\n"); // (1) Compute the Kalman gain: K = (PH') / (HPH' + R) Eigen::MatrixXd pht = estimateErrorCovariance_ * stateToMeasurementSubset.transpose(); Eigen::MatrixXd hphrInv = (stateToMeasurementSubset * pht + measurementCovarianceSubset).inverse(); kalmanGainSubset.noalias() = pht * hphrInv; innovationSubset = (measurementSubset - stateSubset); // Wrap angles in the innovation for (size_t i = 0; i < updateSize; ++i) { if (updateIndices[i] == StateMemberRoll || updateIndices[i] == StateMemberPitch || updateIndices[i] == StateMemberYaw) { while (innovationSubset(i) < -PI) { innovationSubset(i) += TAU; } while (innovationSubset(i) > PI) { innovationSubset(i) -= TAU; } } } // (2) Check Mahalanobis distance between mapped measurement and state. if (checkMahalanobisThreshold(innovationSubset, hphrInv, measurement.mahalanobisThresh_)) { // (3) Apply the gain to the difference between the state and measurement: x = x + K(z - Hx) state_.noalias() += kalmanGainSubset * innovationSubset; // (4) Update the estimate error covariance using the Joseph form: (I - KH)P(I - KH)' + KRK' Eigen::MatrixXd gainResidual = identity_; gainResidual.noalias() -= kalmanGainSubset * stateToMeasurementSubset; estimateErrorCovariance_ = gainResidual * estimateErrorCovariance_ * gainResidual.transpose(); estimateErrorCovariance_.noalias() += kalmanGainSubset * measurementCovarianceSubset * kalmanGainSubset.transpose(); // Handle wrapping of angles wrapStateAngles(); FB_DEBUG("Kalman gain subset is:\n" << kalmanGainSubset << "\nInnovation is:\n" << innovationSubset << "\nCorrected full state is:\n" << state_ << "\nCorrected full estimate error covariance is:\n" << estimateErrorCovariance_ << "\n\n---------------------- /Ekf::correct ----------------------\n"); } }