void run() { createAccumulator(); accumulator()->process(Value(numeric_limits<long long>::max()), false); accumulator()->process(Value(numeric_limits<long long>::max()), false); accumulator()->process(Value(1.0), false); checkSum(); }
void run() { createAccumulator(); accumulator()->process(Value(numeric_limits<double>::quiet_NaN()), false); // NaN is unequal to itself. ASSERT_NOT_EQUALS(accumulator()->getValue(false).getDouble(), accumulator()->getValue(false).getDouble()); }
void checkAvg(const Value& a, const Value& b) { createAccumulator(); accumulator()->process(a, false); accumulator()->process(b, false); assertBinaryEqual(expectedResult(), fromDocument(accumulator()->getValue(true).getDocument())); }
// Returns sum nlogn (rels of merged i and j). Basically a union. // For speed, counts self-self/self-other rels just like any other; // we correct it separately. // static Score scoreUnion(const Rels& rels1, const Rels& rels2) { Score score = 0; Idx size1 = rels1.size(); Idx size2 = rels2.size(); if (size1 <= size2/SMALL_BIG_RATIO) { score = scoreUnionSmallBig(rels1, rels2); } else if (size2 <= size1/SMALL_BIG_RATIO) { score = scoreUnionSmallBig(rels2, rels1); } else { ScoreAccumulator accumulator(score); applyToRelsUnion<ScoreAccumulator> (rels1, rels2, accumulator); } if (false && AssertEnabled && ( size1 <= size2/SMALL_BIG_RATIO || size2 <= size1/SMALL_BIG_RATIO)) { Score scoreBrute = 0; ScoreAccumulator accumulator(scoreBrute); applyToRelsUnion<ScoreAccumulator> (rels1, rels2, accumulator); AssertMsg(scoreEqual(score, scoreBrute), score<<" "<<scoreBrute<<" "<<(score-scoreBrute) ); } return score; }
void run() { createAccumulator(); accumulator()->process(Value(5), false); accumulator()->process(Value(99), false); accumulator()->process(Value(0.2), false); checkSum(); }
void run() { createAccumulator(); accumulator()->process(Value(1), false); accumulator()->process(Value(2LL), false); accumulator()->process(Value(4.0), false); assertBinaryEqual(BSON("subTotal" << 7.0 << "count" << 3LL), fromDocument(accumulator()->getValue(true).getDocument())); }
void run() { createAccumulator(); accumulator()->process(Value(numeric_limits<long long>::max()), false); accumulator()->process(Value(numeric_limits<long long>::max()), false); ASSERT_EQUALS( ((double)numeric_limits<long long>::max() + numeric_limits<long long>::max()) / 2.0, accumulator()->getValue(false).getDouble()); }
lr::lr(const pointcloud_t& data) { double X = std::for_each(data.begin(), data.end(), accumulator(x)); double Y = std::for_each(data.begin(), data.end(), accumulator(y)); double X2 = std::for_each(data.begin(), data.end(), accumulator(x2)); double XY = std::for_each(data.begin(), data.end(), accumulator(xy)); int n = data.size(); _a = (n * XY - X * Y) / (n * X2 - X * X); _b = (Y - _a * X) / n; }
static void processLine(char *s) { struct Object *o; movie = (struct Object *)accumulator(movie, sizeof(struct Object) * (numObjects + 1), 0); o = &movie[numObjects]; if (*s == 's') { // s x y z radius r g b if (7 == sscanf(s+1, "%f%f%f%f%f%f%f", &o->u.sphere.x, &o->u.sphere.y, &o->u.sphere.z, &o->u.sphere.radius, &o->u.sphere.r, &o->u.sphere.g, &o->u.sphere.b)) { o->type = OBJ_SPHERE; numObjects++; } else { fprintf(stderr, "couldn't parse sphere line: <<%s>>\n", s); } } else if (*s == 'l') { if (9 == sscanf(s+1, "%f%f%f%f%f%f%f%f%f", &o->u.line.x1, &o->u.line.y1, &o->u.line.z1, &o->u.line.x2, &o->u.line.y2, &o->u.line.z2, &o->u.line.r, &o->u.line.g, &o->u.line.b)) { o->type = OBJ_LINE; numObjects++; } else { fprintf(stderr, "couldn't parse line line: <<%s>>\n", s); } } else if (*s == 'f') { o->type = OBJ_FRAME; o->u.frame.s = copy_string(s+1); numObjects++; numFrames++; frames = (int *)accumulator(frames, sizeof(int) * numFrames, 0); frames[numFrames-1] = startOfLastFrame; startOfLastFrame = numObjects; if (followLastFrame) { currentFrame = numFrames - 1; repaint(); } } }
int HoughTester::DoYourJob(Magick::Image& image) { std::cout<<"\"noise\";\"R\";\"FI\";\"value\""<<std::endl; for(int level=0; level<NumberOfNoiseLevels+2; level++) { noise.Percent=level*100/(NumberOfNoiseLevels+1); for(int pass=0; pass<NumberOfTriesPerLevel; pass++) { Magick::Image copy=image; noise(copy); bw(copy); accumulator(copy); blur(accumulator.Accumulator); HoughResult max=accumulator.Maximum(); std::cout<<noise.Percent<<";"<<max.R<<";"<<max.Fi<<";"<<max.Value<<std::endl; if(level==0) break; } } }
void Pothos::BufferChunk::append(const BufferChunk &other) { //this is a null buffer, take the input buffers type if (not *this) { //the other buffer is within bounds, copy the reference if (other.getEnd() <= other.getBuffer().getEnd()) { *this = other; return; } //otherwise make a new buffer and copy in the contents *this = Pothos::BufferChunk(other.dtype, other.elements()); std::memcpy((void *)this->address, (const void *)other.address, this->length); } //otherwise allocate and copy two buffers together else { Pothos::BufferChunk accumulator(this->length + other.length); accumulator.dtype = this->dtype; std::memcpy((void *)accumulator.address, (const void *)this->address, this->length); std::memcpy((char *)accumulator.address+this->length, (const void *)other.address, other.length); *this = accumulator; } }
//inline void ParserCKYBest::follow_unary_chain(Cell& cell, const Edge * edge, bool isroot) const { static int start_symbol = SymbolTable::instance_nt().get(LorgConstants::tree_root_name); std::vector<const Edge*> accumulator(1,edge); Edge candidate; candidate.set_right_child(NULL); do { const Edge * current_edge = accumulator.back(); accumulator.pop_back(); candidate.set_left_child(current_edge); const std::vector<const Rule*>& rules = isroot ? unary_rhs_2_rules_toponly[current_edge->get_lhs()] : unary_rhs_2_rules_notop[current_edge->get_lhs()]; std::vector<const Rule*>::const_iterator rule_end = rules.end(); for (std::vector<const Rule*>::const_iterator rule_itr = rules.begin(); rule_itr != rule_end; ++rule_itr) { // std::cout << *(*rule_itr) << std::endl; if(isroot || (*rule_itr)->get_lhs() != start_symbol) candidate.set_lhs((*rule_itr)->get_lhs()); candidate.set_probability(current_edge->get_probability() + (*rule_itr)->get_probability()); const Edge * new_edge = cell.process_candidate(candidate); if(new_edge && rules_for_unary_exist(new_edge->get_lhs())) { accumulator.push_back(new_edge); // std::cout << *(*rule_itr) << std::endl; } } } while(!accumulator.empty()); }
int main(int argc, char **argv) { cargo_t cargo; int ret = 0; int *integers = NULL; size_t integer_count = 0; accumulator_f accumulator = max_ints; int sum_flag = 0; printf("cargo version v%s\n", cargo_get_version()); if (cargo_init(&cargo, 0, "%s", argv[0])) { fprintf(stderr, "Failed to init command line parsing\n"); return -1; } cargo_set_description(cargo, "Process some integers."); ret |= cargo_add_option(cargo, 0, "integers", "An integer for the accumulator", "[i]+", &integers, &integer_count); ret |= cargo_add_option(cargo, 0, "--sum -s", "Sum the integers (default: find the max)", "b", &sum_flag); assert(ret == 0); if (cargo_parse(cargo, 0, 1, argc, argv)) return -1; if (sum_flag) accumulator = sum_ints; printf("%d\n", accumulator(integers, integer_count)); cargo_destroy(&cargo); free(integers); return 0; }
void PageSerializer::serializeFrame(Frame* frame) { Document* document = frame->document(); URL url = document->url(); if (!url.isValid() || url.isBlankURL()) { // For blank frames we generate a fake URL so they can be referenced by their containing frame. url = urlForBlankFrame(frame); } if (m_resourceURLs.contains(url)) { // FIXME: We could have 2 frame with the same URL but which were dynamically changed and have now // different content. So we should serialize both and somehow rename the frame src in the containing // frame. Arg! return; } Vector<Node*> nodes; SerializerMarkupAccumulator accumulator(*this, *document, &nodes); TextEncoding textEncoding(document->charset()); CString data; if (!textEncoding.isValid()) { // FIXME: iframes used as images trigger this. We should deal with them correctly. return; } String text = accumulator.serializeNodes(*document->documentElement(), 0, IncludeNode); CString frameHTML = textEncoding.encode(text, EntitiesForUnencodables); m_resources->append(Resource(url, document->suggestedMIMEType(), SharedBuffer::create(frameHTML.data(), frameHTML.length()))); m_resourceURLs.add(url); for (Vector<Node*>::iterator iter = nodes.begin(); iter != nodes.end(); ++iter) { Node* node = *iter; if (!is<Element>(*node)) continue; Element& element = downcast<Element>(*node); // We have to process in-line style as it might contain some resources (typically background images). if (is<StyledElement>(element)) retrieveResourcesForProperties(downcast<StyledElement>(element).inlineStyle(), document); if (is<HTMLImageElement>(element)) { HTMLImageElement& imageElement = downcast<HTMLImageElement>(element); URL url = document->completeURL(imageElement.fastGetAttribute(HTMLNames::srcAttr)); CachedImage* cachedImage = imageElement.cachedImage(); addImageToResources(cachedImage, imageElement.renderer(), url); } else if (is<HTMLLinkElement>(element)) { HTMLLinkElement& linkElement = downcast<HTMLLinkElement>(element); if (CSSStyleSheet* sheet = linkElement.sheet()) { URL url = document->completeURL(linkElement.getAttribute(HTMLNames::hrefAttr)); serializeCSSStyleSheet(sheet, url); ASSERT(m_resourceURLs.contains(url)); } } else if (is<HTMLStyleElement>(element)) { if (CSSStyleSheet* sheet = downcast<HTMLStyleElement>(element).sheet()) serializeCSSStyleSheet(sheet, URL()); } } for (Frame* childFrame = frame->tree().firstChild(); childFrame; childFrame = childFrame->tree().nextSibling()) serializeFrame(childFrame); }
String createMarkup(const Node* node, EChildrenOnly childrenOnly, Vector<RawPtr<Node> >* nodes, EAbsoluteURLs shouldResolveURLs, Vector<QualifiedName>* tagNamesToSkip) { if (!node) return ""; MarkupAccumulator accumulator(nodes, shouldResolveURLs); return accumulator.serializeNodes(const_cast<Node&>(*node), childrenOnly, tagNamesToSkip); }
String XMLSerializer::serializeToString(Node* node, ExceptionState& exceptionState) { if (!node) { exceptionState.throwTypeError("Invalid node value."); return String(); } MarkupAccumulator accumulator(0, DoNotResolveURLs, nullptr, ForcedXML); return accumulator.serializeNodes(*node, IncludeNode); }
String createMarkup(const Node* node, EChildrenOnly childrenOnly, EAbsoluteURLs shouldResolveURLs) { if (!node) return ""; MarkupAccumulator accumulator(shouldResolveURLs); return serializeNodes<EditingStrategy>(accumulator, const_cast<Node&>(*node), childrenOnly); }
const FUNCTIONSDLL_API double simpleMonteCarlo( const VanillaOption& vanillaOption, const double spot, const Parameters& volatility, const Parameters& interestRate, const size_t numberOfPath, Statistics& gatherer) { const double maturity = vanillaOption.getMaturity(); const int seed = 100; boost::random::mt19937 gen(seed); boost::random::uniform_01<> dist; boost::random::variate_generator<boost::random::mt19937&, boost::random::uniform_01<> > rand(gen, dist); const size_t numberOfSteps = 100; const double dt = maturity / numberOfSteps; boost::accumulators::accumulator_set<double, boost::accumulators::stats<boost::accumulators::tag::mean> > accumulator; const double variance = volatility.IntegralSquare(0.0, maturity); const double rootVariance = std::sqrt(variance); const double itoCorrection = -0.5 * variance; const double movedSpot = spot * std::exp(interestRate.Integral(0.0, maturity) + itoCorrection); double thisSpot = 0.0; const double discountFactor = std::exp(-interestRate.Integral(0.0, maturity)); // create paths for (size_t i = 0; i < numberOfPath; ++i) { // by one step const double randomness = rand(); thisSpot = movedSpot * std::exp(rootVariance * randomness); double thisPayoff = vanillaOption.getPayoff(thisSpot); //double nextSpot = spot; //for (size_t step = 0; step < numberOfSteps; ++step) //{ // nextSpot += nextSpot * (interestRate * dt - 0.5 * volatility * volatility * std::sqrt(dt) * dist(generator)); //} gatherer.dumpOneResult(thisPayoff * discountFactor); accumulator(thisPayoff); } // assume interestRate is fixed. const double price = boost::accumulators::mean(accumulator) * discountFactor; return price; }
int main() { auto const n = 1000000000; auto const delta = 1.0 / n; auto const startTime = std::chrono::steady_clock::now(); tbb::task_scheduler_init tbb_initializer; partialSum accumulator(delta); tbb::parallel_reduce(tbb::blocked_range<long>(0, n), accumulator, tbb::auto_partitioner()); auto const pi = 4.0 * delta * accumulator.getSum(); auto const elapseTime = std::chrono::steady_clock::now() - startTime; out("TBB Implicit", pi, n, elapseTime, tbb::task_scheduler_init::default_num_threads(), tbb::task_scheduler_init::default_num_threads()); return 0; }
auto static_for(TFBody&& body) { auto step = [body = FWD(body)]( auto self, auto state, auto&& x, auto&&... xs) { auto next_state = body(state, x); constexpr auto last_iteration = bool_v<(sizeof...(xs) == 0)>; constexpr auto must_break = bool_v<( // . std::is_same< // . decltype(next_state.next_action()), // . impl::action::a_break // . >{} // . )>; return static_if(bool_v<(must_break || last_iteration)>) .then([next_state](auto&&) { return next_state.accumulator(); }) .else_([next_state, state, &xs...](auto&& xself) { // vvvvv return xself(next_state, xs...); })(self); }; return [step = std::move(step)](auto accumulator) { return [step, accumulator](auto&&... xs) { return static_if(bool_v<(sizeof...(xs) == 0)>) .then([accumulator](auto&&) { return accumulator; }) .else_([accumulator](auto&& xstep, auto&&... ys) { auto initial_state = impl::make_state( // . sz_v<0>, // . accumulator, // . impl::action::a_continue{} // . ); // vvvvvvvvvvvvvvvvvvv return y_combinator(xstep)( // . initial_state, FWD(ys)...); })(step, FWD(xs)...); }; }; }
/** Score due to one segment */ void LexicalReorderingFeatureFunction::doSingleUpdate (const TranslationOption* option, const TargetGap& gap, FVector& scores) { vector<float> accumulator(m_mosesLexReorder->GetNumScoreComponents(),0); //The previous state of the (new) current hypo. LRStateHandle prevState = m_prevStates[gap.segment.GetStartPos()]; //Evaluate the score of inserting this hypo, and get the prev state //for the next hypo. prevState.reset(prevState->Expand(*option,accumulator)); addScore(accumulator,scores); //if there's a hypo on the right, then evaluate it. if (gap.rightHypo) { prevState.reset(prevState->Expand(gap.rightHypo->GetTranslationOption(),accumulator)); addScore(accumulator,scores); } }
void Node::evaluateChildBounds() const { if (_childBoundsChanged) { ASSERT_MESSAGE(!_childBoundsMutex, "re-entering bounds evaluation"); _childBoundsMutex = true; _childBounds = AABB(); // Instantiate an AABB accumulator AABBAccumulateWalker accumulator(_childBounds); // greebo: traverse the children of this node traverse(accumulator); _childBoundsMutex = false; _childBoundsChanged = false; } }
/* Score due to two segments. The left and right refer to the target positions.**/ void LexicalReorderingFeatureFunction::doContiguousPairedUpdate (const TranslationOption* leftOption,const TranslationOption* rightOption, const TargetGap& gap, FVector& scores) { vector<float> accumulator(m_mosesLexReorder->GetNumScoreComponents(),0); //The previous state of the (new) current hypo. LRStateHandle prevState(m_prevStates[gap.segment.GetStartPos()]); //Evaluate the hypos in the gap prevState.reset(prevState->Expand(*leftOption,accumulator)); addScore(accumulator,scores); prevState.reset(prevState->Expand(*rightOption,accumulator)); addScore(accumulator,scores); //if there's a hypo on the right, then evaluate it. if (gap.rightHypo) { prevState.reset(prevState->Expand(gap.rightHypo->GetTranslationOption(),accumulator)); addScore(accumulator,scores); } }
static char * assembleCommandLine(int argc, char **argv) { int len = 0; int len1; char *arg; char *s = NULL; while (argc-- > 0) { arg = *argv++; len1 = len + strlen(arg); s = (char *)accumulator(s, len1, 0); while (len < len1) { s[len++] = *arg++; } s[len++] = ' '; } s[len] = '\0'; return s; }
/** \brief Compute occlusion from the current view direction to the * given sample region. * * \param sampleRegion - parallelogram region over which to sample the map * \param sampleOpts - set of sampling options * \param numSamples - number of samples to take for the region. * \param outSamps[0] - Return parameter; amount of occlusion over the * sample region in the viewing direction for this map. */ void sample(const Sq3DSamplePllgram& sampleRegion, const CqShadowSampleOptions& sampleOpts, const TqInt numSamples, TqFloat* outSamps) { // filter weights CqConstFilter filterWeights; // Use constant depth approximation for the surface for maximum // sampling speed. We use the depth from the camera to the centre // of the sample region. CqConstDepthApprox depthFunc((m_currToLight*sampleRegion.c).z()); // Determine rough filter support. This results in a // texture-aligned box, so doesn't do proper anisotropic filtering. // For occlusion this isn't visible anyway because of the large // amount of averaging. We also want the filter setup to be as // fast as possible. // CqVector3D side1 = m_currToRasterVec*sampleRegion.s1; // CqVector3D side2 = m_currToRasterVec*sampleRegion.s2; // CqVector3D center = m_currToRaster*sampleRegion.c; // TqFloat sWidthOn2 = max(side1.x(), side2.x())*m_pixels.width()/2; // TqFloat tWidthOn2 = max(side1.y(), side2.y())*m_pixels.height()/2; // TODO: Fix the above calculation so that the width is actually // taken into account properly. CqVector3D center = m_currToRaster*sampleRegion.c; TqFloat sWidthOn2 = 0.5*(sampleOpts.sBlur()*m_pixels.width()); TqFloat tWidthOn2 = 0.5*(sampleOpts.tBlur()*m_pixels.height()); SqFilterSupport support( lround(center.x()-sWidthOn2), lround(center.x()+sWidthOn2) + 1, lround(center.y()-tWidthOn2), lround(center.y()+tWidthOn2) + 1); // percentage closer accumulator CqPcfAccum<CqConstFilter, CqConstDepthApprox> accumulator( filterWeights, depthFunc, sampleOpts.startChannel(), sampleOpts.biasLow(), sampleOpts.biasHigh(), outSamps); // accumulate occlusion over the filter support. filterTextureNowrapStochastic(accumulator, m_pixels, support, numSamples); }
//************************** //This method is called to compute the solution ERMsg CLoganModel::OnExecuteAnnual() { _ASSERTE( m_weather.GetNbYear() >= 2); ERMsg msg; CAnnualStatVector stat(model.GetNbYear(), CTRef((short)model.GetYear(0)) ); // save result to disk for( int y=0; y<(int)model.GetNbYear(); y++) { //compute Cold Tolerance CMPBColdTolerance coldTolerance; coldTolerance.ComputeAnnual(weather); const CMPBCTResultVector& CT=coldTolerance.GetResult(); //*********************************** // fill accumulator CAccumulator accumulator(m_n); for(int y=0; y<weather.GetNbYear(); y++) { const CWeatherYear& weatherYear = weather[y]; CTPeriod p = weatherYear.GetGrowingSeason(); CAccumulatorData data; //Th = 42 F, Aug 1 to end of effective growing season if( p.End().GetMonth() >= AUGUST ) { p.Begin().SetJDay(FIRST_DAY); p.Begin().m_month = AUGUST; data.m_DDHatch = weatherYear.GetDD(5.56, p); //Th = 42 F, whole year data.m_DDGen = weatherYear.GetDD(5.56); } data.m_lowestMinimum = weatherYear.GetStat(STAT_TMIN, LOWEST); data.m_meanMaxAugust = weatherYear[AUGUST].GetStat(STAT_TMAX, MEAN); data.m_totalPrecip = weatherYear.GetStat(STAT_PRCP, SUM); //wather deficit was in mm CThornthwaitePET TPET(weatherYear, 0, CThornthwaitePET::POTENTIEL_STANDARD); data.m_waterDeficit = TPET.GetWaterDeficit(weatherYear)/25.4; //Water deficit, in inches data.m_precAMJ = 0; data.m_precAMJ += weatherYear[APRIL].GetStat(STAT_PRCP, SUM); data.m_precAMJ += weatherYear[MAY].GetStat(STAT_PRCP, SUM); data.m_precAMJ += weatherYear[JUNE].GetStat(STAT_PRCP, SUM); data.m_stabilityFlag = GetStabilityFlag(weatherYear); _ASSERTE( CT[y].m_year == weatherYear.GetYear()); //Skip the first year: No Cold Resistance data.m_S = (y>0)?CT[y].m_Psurv:1; accumulator.push_back(data); } accumulator.ComputeMeanP_Y1_Y2(); //*********************************** if( m_runLength<=0 || m_runLength>weather.GetNbYear()) m_runLength = weather.GetNbYear(); //skip the first year if m_runLength == weather.GetNbYear()-1 int s0 = max(1, m_runLength-1); m_firstYear = weather.GetFirstYear() + s0;//keep in memory the first year for(int i=0; i<NB_OUTPUT; i++) { //on doit changer le code si on veux utiliser la notion de runlength for(int y=s0; y<weather.GetNbYear(); y++) { m_F[i].push_back(GetProbability(accumulator, i, y, m_runLength) ); } } } SetOutput(stat); return msg; }
String XMLSerializer::serializeToString(Node* root) { ASSERT(root); MarkupAccumulator accumulator(0, DoNotResolveURLs, nullptr, ForcedXML); return accumulator.serializeNodes(*root, IncludeNode); }
int main(void) { //Initialize GLFW GLFWwindow* window; glfwSetErrorCallback(error_callback); if (!glfwInit()) exit(EXIT_FAILURE); window = glfwCreateWindow(640, 480, "Simple example", NULL, NULL); if (!window) { glfwTerminate(); exit(EXIT_FAILURE); } glfwMakeContextCurrent(window); glfwSetKeyCallback(window, key_callback); #ifdef WIN32 //Initialize GLEW glewExperimental = GL_TRUE; GLenum glewInitStatus = glewInit(); if (glewInitStatus != GLEW_OK) { TRACE("Glew Init Error: " << glewGetErrorString(glewInitStatus)); glfwDestroyWindow(window); glfwTerminate(); exit(EXIT_FAILURE); } #endif Rocket::Core::Context* context = nullptr; glClearColor(0.0f, 0.0f, 0.4f, 0.0f); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LESS); g_game.Initialize(); // Set up transformation matrices glm::mat4 Projection; glm::mat4 View; glm::mat4 ViewProjection; //Main loop std::chrono::duration<double> t(0.0); std::chrono::duration<double> dt(0.01); std::chrono::duration<double> accumulator(0.0); std::chrono::time_point<std::chrono::system_clock> currentTime, newTime; currentTime = std::chrono::system_clock::now(); while (!glfwWindowShouldClose(window)) { newTime = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds = newTime - currentTime; currentTime = newTime; accumulator += elapsed_seconds; //Simulation while (accumulator >= dt) { g_game.SimulationStep((float)dt.count()); accumulator -= dt; t += dt; } //Render { //If user has resized window, update viewport and projection int width, height; glfwGetFramebufferSize(window, &width, &height); //Draw glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); g_game.Render(width, height); glfwSwapBuffers(window); glfwPollEvents(); } } //Main loop has exited, clean up glfwDestroyWindow(window); glfwTerminate(); exit(EXIT_SUCCESS); }
snapshot_type snapshot() const { return accumulator().snapshot(); }
void SensorDataRead() { int j; //ReadPulses_RNAV(); //Reading sensor pulses Delta_Angle[0] = t_delta_theta_2p5ms[0]; //Gyros Delta_Angle[1] = t_delta_theta_2p5ms[1]; Delta_Angle[2] = t_delta_theta_2p5ms[2]; Delta_Vel[0] = t_delta_velocity_2p5ms[0]; //Accelerometers Delta_Vel[1] = t_delta_velocity_2p5ms[1]; Delta_Vel[2] = t_delta_velocity_2p5ms[2]; put_model(); //Sensor Data Modeling /********* Ideal Sensor Equations***********/ pure_Delta_Angle[0] = earth_rate * cos(latm)/400 + (GYRX_BIAS * cdr / 3600 / 400); pure_Delta_Angle[1] = 0.0 + (GYRY_BIAS * cdr / 3600 / 400); pure_Delta_Angle[2] = -earth_rate * sin(latm)/400+ (GYRZ_BIAS * cdr / 3600 / 400); pure_Delta_Vel[0] = (ACCX_BIAS * 1e-6 * 10/400); pure_Delta_Vel[1] = (ACCY_BIAS * 1e-6 * 10/400); pure_Delta_Vel[2] = -(pure_g_ecef_mag/400 )+ (ACCZ_BIAS * 1e-6 * 10/400); // END OF GYRO MODEL if (qcnt == 0) //sample 1 { p_alp1[0] = pure_Delta_Angle[0]; p_alp1[1] = pure_Delta_Angle[1]; p_alp1[2] = pure_Delta_Angle[2]; } //sample 2 else if (qcnt == 1) { p_alp2[0] = pure_Delta_Angle[0]; p_alp2[1] = pure_Delta_Angle[1]; p_alp2[2] = pure_Delta_Angle[2]; } //sample 3 else if (qcnt == 2) { p_alp3[0] = pure_Delta_Angle[0]; p_alp3[1] = pure_Delta_Angle[1]; p_alp3[2] = pure_Delta_Angle[2]; } //sample 4 else if (qcnt == 3) { p_alp4[0] = pure_Delta_Angle[0]; p_alp4[1] = pure_Delta_Angle[1]; p_alp4[2] = pure_Delta_Angle[2]; } for (j = 0; j < 3; j++) { p_velo[j] = p_velo[j] + pure_Delta_Vel[j]; p_Ang[j] = p_Ang[j] + pure_Delta_Angle[j]; } velcnt++; if (velcnt == 8) { velcnt = 0; for (j = 0; j < 3; j++) { p_velo_20ms[j] = p_velo[j]; } accumulator(); } }