Estimate operator / (const Estimate &lhs, i32 rhs) { if (rhs == 0) throw DomainException("division by int"); LongFloat r(lhs.m_Value / rhs); return Estimate(r, lhs.m_Error / ErrorEstimate(double(rhs)) + RoundingError(r, r.AdditionRoundingError())); }
Estimate operator * (const Estimate &lhs, const Estimate &rhs) { LongFloat r(lhs.m_Value * rhs.m_Value); ErrorEstimate e(lhs.m_Error * rhs.m_Error + lhs.m_Error * ErrorEstimate(rhs.m_Value) + rhs.m_Error * ErrorEstimate(lhs.m_Value)); return Estimate(r, e + RoundingError(r, r.MultiplicationRoundingError())); }
double HMM<Distribution>::Estimate(const arma::mat& dataSeq, arma::mat& stateProb) const { // We don't need to save these. arma::mat forwardProb, backwardProb; arma::vec scales; return Estimate(dataSeq, stateProb, forwardProb, backwardProb, scales); }
Estimate recip(const Estimate &arg) { if (!arg.IsNonZero()) throw PrecisionException("recip"); LongFloat r(arg.m_Value.recip()); ErrorEstimate e(arg.m_Value, ErrorEstimate::Down); ErrorEstimate re(RoundingError(r, r.DivisionRoundingError())); return Estimate(r, (arg.m_Error / (e - arg.m_Error) / e) + re); // multiplication in denominator would have the wrong rounding mode }
Estimate operator / (const Estimate &lhs, const Estimate &rhs) { if (!rhs.IsNonZero()) throw PrecisionException("division"); // this also assures e - rhs.m_Error > 0 LongFloat r(lhs.m_Value / rhs.m_Value); ErrorEstimate e(rhs.m_Value, ErrorEstimate::Down); ErrorEstimate n(ErrorEstimate(lhs.m_Value) * rhs.m_Error + ErrorEstimate(rhs.m_Value, ErrorEstimate::Up) * lhs.m_Error); return Estimate(r, n / (e - rhs.m_Error) / e + RoundingError(r, r.DivisionRoundingError())); // multiplication in denominator would have the wrong rounding mode }
void main(){ float lambda = 0.95, gamma = 100; float t_final[m]; initializeData(); Estimate(lambda,gamma, t_final); printf("\n Theta: a0, b0 \n"); printArray(t_final, m); }
TestPage::TestPage(QWidget *parent) : QWidget(parent), ui(new Ui::TestPage) { ui->setupUi(this); ui->lineEditBid->setEnabled(false); // cannot calc until update clicked and data fetched connect(ui->pushButtonBTCExplorer, SIGNAL(clicked()), this, SLOT(SummonBTCExplorer())); connect(ui->pushButtonLTCExplorer, SIGNAL(clicked()), this, SLOT(SummonLTCExplorer())); connect(ui->pushButtonDASHExplorer, SIGNAL(clicked()), this, SLOT(SummonDASHExplorer())); connect(ui->pushButtonBTC, SIGNAL(clicked()), this, SLOT(SummonBTCWallet())); connect(ui->pushButtonLTC, SIGNAL(clicked()), this, SLOT(SummonLTCWallet())); connect(ui->pushButtonDASH, SIGNAL(clicked()), this, SLOT(SummonDASHWallet())); connect(ui->pushButtonRefresh, SIGNAL(clicked()), this, SLOT(GetBids())); connect(ui->lineEditBid, SIGNAL(returnPressed()), this, SLOT(Estimate())); theme = GetArg("-theme", ""); QString themestring = QString::fromUtf8(theme.c_str()); if (themestring.contains("orange")) { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #ffa405"); } else if (themestring.contains("dark")) { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #ffa405"); } else if (themestring.contains("green")) { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #45f806"); } else if (themestring.contains("blue")) { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #088af8"); } else if (themestring.contains("pink")) { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #fb04db"); } else if (themestring.contains("purple")) { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #cb03d2"); } else if (themestring.contains("turq")) { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #0ab4dc"); } //fallback on default else { ui->pushButtonRefresh->setStyleSheet("border: 2px solid #ffa405"); } }
void AStar::Execute(const Graph &Graph, const string &VetexId) { const auto& Vertexes = Graph.GetVertexes(); Vertex* pVertexStart = Vertexes.find(VetexId)->second; vector< Vertex* > Q; // 初始化顶点 for (auto& it : Vertexes) { Vertex* pV = it.second; pV->PathfindingData.Cost = 0; pV->PathfindingData.pParent = nullptr; pV->PathfindingData.Heuristic = 0x0FFFFFFF; pV->PathfindingData.Flag = false; } // 初始化起始顶点 pVertexStart->PathfindingData.pParent = 0; pVertexStart->PathfindingData.Cost = 0; pVertexStart->PathfindingData.Heuristic = Estimate(pVertexStart, m_pVTarget); // 把起始顶点放入列表中 Q.push_back(pVertexStart); pVertexStart->PathfindingData.Flag = true; for (; Q.size() > 0;) { // 选出最小路径估计的顶点 auto v = ExtractMin(Q); v->PathfindingData.Flag = false; if (v == m_pVTarget) { return; } // 对所有的出边进行“松弛” const auto& EO = v->GetEdgesOut(); for (auto& it : EO) { Edge* pEdge = it.second; Vertex* pVEnd = pEdge->GetEndVertex(); bool bRet = Relax(v, pVEnd, pEdge->GetWeight()); // 如果松弛成功,加入列表中 if (bRet && pVEnd->PathfindingData.Flag == false) { Q.push_back(pVEnd); pVEnd->PathfindingData.Flag = true; } } } }
void HMM<Distribution>::Smooth(const arma::mat& dataSeq, arma::mat& smoothSeq) const { // First run the forward algorithm. arma::mat stateProb; Estimate(dataSeq, stateProb); // Compute expected emissions. // Will not work for distributions without a Mean() function. smoothSeq.zeros(dimensionality, dataSeq.n_cols); for (size_t i = 0; i < emission.size(); i++) smoothSeq += emission[i].Mean() * stateProb.row(i); }
bool AStar::Relax(Vertex* v1, Vertex* v2, int Weight) { // 这里就是启发式函数 int G = v1->PathfindingData.Cost + Weight; // 取得从V1到V2的实际路径代价 int H = Estimate(v2, m_pVTarget); // 估计V2到目标节点的路径代价 int nHeuristic = G + H; // 实际 + 估算 = 启发式函数的值 // 如果从此路径达到目标会被之前计算的更短,就更新 if (nHeuristic < v2->PathfindingData.Heuristic) { v2->PathfindingData.Cost = G; v2->PathfindingData.pParent = v1; v2->PathfindingData.Heuristic = nHeuristic; return true; } return false; }
/*! Estimates shift of current image relative to background image. \param [in] current - current image. \param [in] region - a region at the background where the algorithm start to search current image. Estimated shift is taken relative of the region. \param [in] maxShift - a maximal distance which characterizes maximal possible shift of the region. \param [in] hiddenAreaPenalty - a parameter used to restrict searching of the shift at the border of background image. \param [in] regionAreaMin - a parameter used to set minimal area of region use for shift estimation. By default is equal to 25. \return a result of shift estimation. */ bool Estimate(const View & current, const Rect & region, int maxShift, double hiddenAreaPenalty = 0, ptrdiff_t regionAreaMin = REGION_CORRELATION_AREA_MIN) { return Estimate(current, region, Point(maxShift, maxShift), hiddenAreaPenalty, regionAreaMin); }
void HMM<Distribution>::Train(const std::vector<arma::mat>& dataSeq) { // We should allow a guess at the transition and emission matrices. double loglik = 0; double oldLoglik = 0; // Maximum iterations? size_t iterations = 1000; // Find length of all sequences and ensure they are the correct size. size_t totalLength = 0; for (size_t seq = 0; seq < dataSeq.size(); seq++) { totalLength += dataSeq[seq].n_cols; if (dataSeq[seq].n_rows != dimensionality) Log::Fatal << "HMM::Train(): data sequence " << seq << " has " << "dimensionality " << dataSeq[seq].n_rows << " (expected " << dimensionality << " dimensions)." << std::endl; } // These are used later for training of each distribution. We initialize it // all now so we don't have to do any allocation later on. std::vector<arma::vec> emissionProb(transition.n_cols, arma::vec(totalLength)); arma::mat emissionList(dimensionality, totalLength); // This should be the Baum-Welch algorithm (EM for HMM estimation). This // follows the procedure outlined in Elliot, Aggoun, and Moore's book "Hidden // Markov Models: Estimation and Control", pp. 36-40. for (size_t iter = 0; iter < iterations; iter++) { // Clear new transition matrix and emission probabilities. arma::mat newTransition(transition.n_rows, transition.n_cols); newTransition.zeros(); // Reset log likelihood. loglik = 0; // Sum over time. size_t sumTime = 0; // Loop over each sequence. for (size_t seq = 0; seq < dataSeq.size(); seq++) { arma::mat stateProb; arma::mat forward; arma::mat backward; arma::vec scales; // Add the log-likelihood of this sequence. This is the E-step. loglik += Estimate(dataSeq[seq], stateProb, forward, backward, scales); // Now re-estimate the parameters. This is the M-step. // T_ij = sum_d ((1 / P(seq[d])) sum_t (f(i, t) T_ij E_i(seq[d][t]) b(i, // t + 1))) // E_ij = sum_d ((1 / P(seq[d])) sum_{t | seq[d][t] = j} f(i, t) b(i, t) // We store the new estimates in a different matrix. for (size_t t = 0; t < dataSeq[seq].n_cols; t++) { for (size_t j = 0; j < transition.n_cols; j++) { if (t < dataSeq[seq].n_cols - 1) { // Estimate of T_ij (probability of transition from state j to state // i). We postpone multiplication of the old T_ij until later. for (size_t i = 0; i < transition.n_rows; i++) newTransition(i, j) += forward(j, t) * backward(i, t + 1) * emission[i].Probability(dataSeq[seq].unsafe_col(t + 1)) / scales[t + 1]; } // Add to list of emission observations, for Distribution::Estimate(). emissionList.col(sumTime) = dataSeq[seq].col(t); emissionProb[j][sumTime] = stateProb(j, t); } sumTime++; } } // Assign the new transition matrix. We use %= (element-wise // multiplication) because every element of the new transition matrix must // still be multiplied by the old elements (this is the multiplication we // earlier postponed). transition %= newTransition; // Now we normalize the transition matrix. for (size_t i = 0; i < transition.n_cols; i++) transition.col(i) /= accu(transition.col(i)); // Now estimate emission probabilities. for (size_t state = 0; state < transition.n_cols; state++) emission[state].Estimate(emissionList, emissionProb[state]); Log::Debug << "Iteration " << iter << ": log-likelihood " << loglik << std::endl; if (std::abs(oldLoglik - loglik) < tolerance) { Log::Debug << "Converged after " << iter << " iterations." << std::endl; break; } oldLoglik = loglik; } }
Estimate operator - (const Estimate &lhs, const Estimate &rhs) { LongFloat s(lhs.m_Value - rhs.m_Value); return Estimate(s, lhs.m_Error + rhs.m_Error + RoundingError(s, s.AdditionRoundingError())); }
Estimate Estimate::operator << (i32 howmuch) const { LongFloat v(m_Value << howmuch); return Estimate(v, (m_Error << howmuch) + RoundingError(v, v.AdditionRoundingError())); }
Estimate operator * (const Estimate &lhs, i32 rhs) { LongFloat r(lhs.m_Value * rhs); return Estimate(r, lhs.m_Error * ErrorEstimate(double(rhs)) + RoundingError(r, r.AdditionRoundingError())); }
Estimate Estimate::weak_round() const { return Estimate(m_Value.round()); }
//------------------------------------------------------------------------------ Solver::SolverState SequentialEstimator::AdvanceState() { switch (currentState) { case INITIALIZING: #ifdef WALK_STATE_MACHINE MessageInterface::ShowMessage("Executing the INITIALIZING state\n"); #endif CompleteInitialization(); break; case PROPAGATING: #ifdef WALK_STATE_MACHINE MessageInterface::ShowMessage("Executing the PROPAGATING state\n"); #endif FindTimeStep(); break; case CALCULATING: #ifdef WALK_STATE_MACHINE MessageInterface::ShowMessage("Executing the CALCULATING state\n"); #endif CalculateData(); break; case LOCATING: #ifdef WALK_STATE_MACHINE MessageInterface::ShowMessage("Executing the LOCATING state\n"); #endif ProcessEvent(); break; case ESTIMATING: #ifdef WALK_STATE_MACHINE MessageInterface::ShowMessage("Executing the ESTIMATING state\n"); #endif Estimate(); break; case CHECKINGRUN: #ifdef WALK_STATE_MACHINE MessageInterface::ShowMessage("Executing the CHECKINGRUN state\n"); #endif CheckCompletion(); break; case FINISHED: #ifdef WALK_STATE_MACHINE MessageInterface::ShowMessage("Executing the FINISHED state\n"); #endif RunComplete(); break; default: throw EstimatorException("Unknown state encountered in the " + instanceName + " sequential estimator."); } return currentState; }
rVector<Goal> Pathfinder::AStar(Tile* _agentTile,bool _avoidEntities, unsigned char _allowedCost, bool _squadPathFinding) { // uint64_t T0, T1; // ticks // // uint64_t calcTimeLimit = 0; // float ms = 500; // m_TimeLimit = (uint64_t)(m_ClockFreq*ms / (1000)); // T0 = SDL_GetPerformanceCounter(); int counter = 0; Terrain* terrain = Terrain::GetInstance(); rVector<Goal> pathList; Entity agent = terrain->WhoIsOnTile(_agentTile); //if first run, set current = start if (m_ClosedList.size() == 0) { m_Current = m_Start; m_Nodes[m_Start->Y][m_Start->X].Fcost = 0; m_Nodes[m_Start->Y][m_Start->X].TilePointer = m_Current; m_Nodes[m_Start->Y][m_Start->X].Parent = m_Current; DirtyTile dt; dt.x = m_Start->Y; dt.y = m_Start->X; m_Dirty.push_back(dt); m_OpenQ.push(m_Nodes[m_Start->Y][m_Start->X]); // #if GENERATE_AI_MAP == 1 // if (terrain->IsUnpathableTerrain(m_Goal)) // { // DirtyTile dt; // dt.x = m_Goal->X; // dt.y = m_Goal->Y; // m_BadTiles.push_back(dt); // return pathList; // } // #else if (!UnblockGoal()) { return pathList; } //#endif } m_Finished = false; while (true) { counter++; // #pragma omp parallel num_threads(8) // { // #pragma omp critical // printf("ID: %d, threads: %d, cpus: %d\n", omp_get_thread_num(), omp_get_num_threads(),omp_get_num_procs()); //#pragma omp for for (int i = -1; i < 2; i++) { for (int j = -1; j < 2; j++) { if (i == 0 && j == 0) continue; int x, y; x = (*m_Current).X + j; y = (*m_Current).Y + i; if (x < 0 || y < 0 || x > m_GridWidth - 1 || y > m_GridHeight - 1) { continue; } if (m_Nodes[x][y].Closed) continue; Tile* t = Terrain::GetInstance()->GetTile(x, y); //unwalkable if (Terrain::GetInstance()->IsUnpathableTerrain(t)) continue; if(_avoidEntities) { if(Terrain::GetInstance()->IsOccupiedByNonEvasiveUnitExclude(t, agent)) continue; } int diagonal = i*j; int gCost = 0; //use integers instead of float, multiply 100. 141 == approx sqrt(2) diagonal == 0 ? gCost = COST_FACTOR_STRAIGHT : gCost = COST_FACTOR_DIAGONAL; if (t->Cost < _allowedCost) gCost += ((_allowedCost - t->Cost) * COST_FACTOR_STRAIGHT) * 4; m_Nodes[x][y].Fcost = gCost + Estimate(t, m_Goal); m_Nodes[x][y].TilePointer = t; m_Nodes[x][y].Parent = m_Current; m_Nodes[x][y].Closed = true; //#if AI_DEBUG == 1 // Terrain::GetInstance()->GetTile(x, y)->DebugInfo = 1; //#endif // #pragma omp critical // { m_OpenQ.push(m_Nodes[x][y]); DirtyTile dt; dt.x = x; dt.y = y; m_Dirty.push_back(dt); // } } } // } Node n = m_OpenQ.top(); Tile* t = n.TilePointer; m_ClosedList.push_back(n); m_Current = n.TilePointer; //terrain->SetTerrain(t->x, t->y, 'C'); m_OpenQ.pop(); // T1 = SDL_GetPerformanceCounter(); // calcTimeLimit += (T1 - T0)/**1000 / m_clockFreq */; if (t == m_Goal /*|| calcTimeLimit > m_TimeLimit*/ || (m_ClosedList.size() > MAX_CLOSED_LIST_SIZE_SQUAD && _squadPathFinding) || (m_ClosedList.size() > MAX_CLOSED_LIST_SIZE_AGENT && !_squadPathFinding) || m_OpenQ.empty()) { // #if AI_DEBUG == 1 // if (calcTimeLimit > m_TimeLimit) // printf("Time limit break %.4fms\n", static_cast<double>((T1 - T0) * 1000) / (m_ClockFreq)); // // #endif // if (calcTimeLimit > m_TimeLimit) // { // ConstructPath(m_Goal, m_ClosedList, pathList); // break; // } // else m_Finished = true; // if (m_ClosedList.size() > MAX_CLOSED_LIST_SIZE) // printf("closed list too big\n"); //#if GENERATE_AI_MAP == 0 // if (m_OpenQ.empty()) // printf("NO PATH FOUND\n"); ConstructPath(m_Goal, m_ClosedList, pathList,_avoidEntities,_allowedCost); // #else // if (m_ClosedList.size() > MAX_CLOSED_LIST_SIZE_SQUAD || m_OpenQ.empty()) // { // //printf("A* Early exit, reached max size on closed list. %d\n", MAX_CLOSED_LIST_SIZE); // DirtyTile dt; // dt.x = m_Goal->X; // dt.y = m_Goal->Y; // m_BadTiles.push_back(dt); // } // #endif break; } } //cleanup if (m_Finished) CleanUpLists(); return pathList; }
//------------------------------------------------------------------------------ Solver::SolverState BatchEstimator::AdvanceState() { switch (currentState) { case INITIALIZING: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "INITIALIZING\n"); #endif // ReportProgress(); CompleteInitialization(); break; case PROPAGATING: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "PROPAGATING\n"); #endif // ReportProgress(); FindTimeStep(); break; case CALCULATING: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "CALCULATING\n"); #endif // ReportProgress(); CalculateData(); break; case LOCATING: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "LOCATING\n"); #endif // ReportProgress(); ProcessEvent(); break; case ACCUMULATING: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "ACCUMULATING\n"); #endif // ReportProgress(); Accumulate(); break; case ESTIMATING: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "ESTIMATING\n"); #endif // ReportProgress(); Estimate(); break; case CHECKINGRUN: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "CHECKINGRUN\n"); #endif // ReportProgress(); CheckCompletion(); break; case FINISHED: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "FINISHED\n"); #endif RunComplete(); // ReportProgress(); break; default: #ifdef DEBUG_STATE_MACHINE MessageInterface::ShowMessage("Entered Estimator state machine: " "Bad state for an estimator.\n"); #endif /* throw EstimatorException("Solver state not supported for the simulator")*/; } return currentState; }
// operations Estimate operator - (const Estimate &arg) { return Estimate(-arg.m_Value, arg.m_Error); }