bool nmethod::encompasses(void* p) { return includes(p, this, this + 1) || includes(p, deps(), depsEnd()) || includes(p, insts(), instsEnd()) || includes(p, locs(), locsEnd()) || scopes->includes((ScopeDesc*) p) || includes(p, pcs(), pcsEnd()); }
void Project::includeDeps(const Fields& columns) { for (Fields f = flds; ! nil(f); ++f) { gcstring deps(*f + "_deps"); if (columns.member(deps) && ! flds.member(deps)) flds.append(deps); } }
template<class CLS, class MAPPERCLASSTOSTRING> void HMMTables<CLS, MAPPERCLASSTOSTRING>::addAlCount(int istrich, int k, int sentLength, int J, CLS w1, CLS w2, int j, double value, double valuePredicted) { int pos=istrich-k; switch (PredictionInAlignments) { case 0: pos=istrich-k; break; case 1: pos=k; break; case 2: pos=(k*J-j*sentLength); if (pos>0) pos+=J/2; else pos-=J/2; pos/=J; break; default: abort(); } AlDeps<CLS> deps(AlDeps<CLS>(sentLength, istrich, j, w1, w2)); { lock.lock(); typename map<AlDeps<CLS>,FlexArray<double> >::iterator p= alProb.find(deps); if (p==alProb.end() ) { if ( (CompareAlDeps&1)==0) p =alProb.insert(make_pair(deps,FlexArray<double> (-MAX_SENTENCE_LENGTH,MAX_SENTENCE_LENGTH,0.0))).first; else p=alProb.insert(make_pair(deps,FlexArray<double> (-sentLength,sentLength,0.0))).first; } p->second[pos]+=value; lock.unlock(); } if (valuePredicted) { lock.lock(); typename map<AlDeps<CLS>,FlexArray<double> >::iterator p= alProbPredicted.find(deps); if (p==alProbPredicted.end() ) { if ( (CompareAlDeps&1)==0) p =alProbPredicted.insert(make_pair(deps,FlexArray<double> (-MAX_SENTENCE_LENGTH,MAX_SENTENCE_LENGTH,0.0))).first; else p=alProbPredicted.insert(make_pair(deps,FlexArray<double> (-sentLength,sentLength,0.0))).first; } p->second[pos]+=valuePredicted; lock.unlock(); } }
graph::graphnode& graph::_insert(item_wrapper it_p){ graphnode& g = nodes[it_p->name()]; g.p = it_p; g.processed=0; std::vector<std::string> deps(it_p.dependencies()); g.pointsto.clear(); // g.pointsto.insert(g.pointsto.begin(), deps.begin(), deps.end()); for(auto& val : deps){ trim(val); g.pointsto.push_back(val); } return g; }
void nmethod::unlink() { removeFromCodeTable(); zoneLink.remove(); remove_me_from_inline_cache(); if (diLink.notEmpty()) { assert(diLink.next->next == &diLink, "should only be a pair on a diLink"); diLink.next->asDIDesc()->unlink_me(); } for (nmln* d = deps(), *dend = depsEnd(); d < dend; d++) { d->remove(); } MachineCache::flush_instruction_cache_for_debugging(); }
void setUp(void) { m_keyNamesEPS.push_back(L"InstanceID"); m_keyNamesIPPE.push_back(L"Name"); m_keyNamesIPPE.push_back(L"SystemCreationClassName"); m_keyNamesIPPE.push_back(L"SystemName"); m_keyNamesIPPE.push_back(L"CreationClassName"); m_keyNamesLANE.push_back(L"Name"); m_keyNamesLANE.push_back(L"SystemCreationClassName"); m_keyNamesLANE.push_back(L"SystemName"); m_keyNamesLANE.push_back(L"CreationClassName"); std::wstring errMsg; TestableContext context; SetUpAgent<mi::SCX_EthernetPortStatistics_Class_Provider>(context, CALL_LOCATION(errMsg)); CPPUNIT_ASSERT_EQUAL_MESSAGE(ERROR_MESSAGE, true, context.WasRefuseUnloadCalled() ); SetUpAgent<mi::SCX_IPProtocolEndpoint_Class_Provider>(context, CALL_LOCATION(errMsg)); CPPUNIT_ASSERT_EQUAL_MESSAGE(ERROR_MESSAGE, true, context.WasRefuseUnloadCalled() ); SetUpAgent<mi::SCX_LANEndpoint_Class_Provider>(context, CALL_LOCATION(errMsg)); CPPUNIT_ASSERT_EQUAL_MESSAGE(ERROR_MESSAGE, true, context.WasRefuseUnloadCalled() ); vector< SCXCoreLib::SCXHandle<SCXSystemLib::NetworkInterfaceInstance> > originalInstances; const unsigned allProperties = static_cast<unsigned> (-1); const unsigned noOptionalProperties = 0; originalInstances.push_back( SCXCoreLib::SCXHandle<SCXSystemLib::NetworkInterfaceInstance>( new NetworkInterfaceInstance(NetworkInterfaceInfo( L"eth0", allProperties, // L"0a123C4Defa6", L"192.168.0.34", L"255.255.255.0", L"192.168.0.255", 10000, 20000, 100, 200, 1, 2, 3, true, true, SCXCoreLib::SCXHandle<NetworkInterfaceDependencies>(0))))); originalInstances.push_back(SCXCoreLib::SCXHandle<SCXSystemLib::NetworkInterfaceInstance>( new NetworkInterfaceInstance(NetworkInterfaceInfo( L"eth1", noOptionalProperties, // L"001122334455", L"192.168.1.35", L"255.255.255.0", L"192.168.1.255", 20000, 40000, 200, 400, 2, 4, 6, false, false, SCXCoreLib::SCXHandle<NetworkInterfaceDependencies>(0))))); SCXHandle<InjectedNetworkProviderDependencies> deps(new InjectedNetworkProviderDependencies()); deps->SetInstances(originalInstances); SCXCore::g_NetworkProvider.UpdateDependencies(deps); }
Number CGPenaltyCq::curr_scaled_y_Amax() { DBG_START_METH("CGPenaltyCq::curr_scaled_y_Amax()", dbg_verbosity); Number result; SmartPtr<const Vector> x = ip_data_->curr()->x(); SmartPtr<const Vector> y_c = ip_data_->curr()->y_c(); SmartPtr<const Vector> y_d = ip_data_->curr()->y_d(); std::vector<const TaggedObject*> deps(3); deps[0] = GetRawPtr(x); deps[1] = GetRawPtr(y_c); deps[2] = GetRawPtr(y_d); if (!curr_scaled_y_Amax_cache_.GetCachedResult(result, deps)) { result = Max(y_c->Amax(), y_d->Amax()); result /= Max(1., ip_cq_->curr_grad_f()->Amax()); curr_scaled_y_Amax_cache_.AddCachedResult(result, deps); } return result; }
void ConsoleBatch::setupDeskew(std::set<PageId> allPages) { IntrusivePtr<deskew::Filter> deskew = m_ptrStages->deskewFilter(); CommandLine const& cli = CommandLine::get(); for (std::set<PageId>::iterator i=allPages.begin(); i!=allPages.end(); i++) { PageId page = *i; // DESKEW FILTER OrthogonalRotation rotation; if (cli.hasDeskewAngle() || cli.hasDeskew()) { double angle = 0.0; if (cli.hasDeskewAngle()) angle = cli.getDeskewAngle(); deskew::Dependencies deps(QPolygonF(), rotation); deskew::Params params(angle, deps, MODE_MANUAL); deskew->getSettings()->setPageParams(page, params); } } }
SmartPtr<const Vector> AugRestoSystemSolver::D_x_plus_wr_d( const SmartPtr<const Vector>& CD_x0, Number factor, const Vector& wr_d) { DBG_START_METH("AugRestoSystemSolver::D_x_plus_wr_d",dbg_verbosity); SmartPtr<Vector> retVec; std::vector<const TaggedObject*> deps(2); deps[0] = &wr_d; if (IsValid(CD_x0)) { deps[1] = GetRawPtr(CD_x0); } else { deps[1] = NULL; } std::vector<Number> scalar_deps(1); scalar_deps[0] = factor; if (!d_x_plus_wr_d_cache_.GetCachedResult(retVec, deps, scalar_deps)) { DBG_PRINT((1,"Not found in cache\n")); retVec = wr_d.MakeNew(); Number fact; SmartPtr<const Vector> v; if (IsValid(CD_x0)) { fact = 1.; v = CD_x0; } else { fact = 0.; v = &wr_d; } retVec->AddTwoVectors(factor, wr_d, fact, *v, 0.); d_x_plus_wr_d_cache_.AddCachedResult(retVec, deps, scalar_deps); } DBG_PRINT_VECTOR(2, "retVec", *retVec); return ConstPtr(retVec); }
SmartPtr<const Vector> AugRestoSystemSolver::Neg_Omega_d_plus_D_d( const Matrix& Pd_L, const SmartPtr<const Vector>& sigma_tilde_n_d_inv, const Matrix& neg_Pd_U, const SmartPtr<const Vector>& sigma_tilde_p_d_inv, const Vector* D_d, const Vector& any_vec_in_d) { DBG_START_METH("AugRestoSystemSolver::Neg_Omega_d_plus_D_d",dbg_verbosity); SmartPtr<Vector> retVec; if (IsValid(sigma_tilde_n_d_inv) || IsValid(sigma_tilde_p_d_inv) || D_d) { std::vector<const TaggedObject*> deps(5); std::vector<Number> scalar_deps; deps[0] = &Pd_L; deps[1] = GetRawPtr(sigma_tilde_n_d_inv); deps[2] = &neg_Pd_U; deps[3] = GetRawPtr(sigma_tilde_p_d_inv); deps[4] = D_d; if (!neg_omega_d_plus_D_d_cache_. GetCachedResult(retVec, deps, scalar_deps)) { DBG_PRINT((1,"Not found in cache\n")); retVec = any_vec_in_d.MakeNew(); retVec->Set(0.0); if (IsValid(sigma_tilde_n_d_inv)) { Pd_L.MultVector(-1.0, *sigma_tilde_n_d_inv, 1.0, *retVec); } if (IsValid(sigma_tilde_p_d_inv)) { neg_Pd_U.MultVector(1.0, *sigma_tilde_p_d_inv, 1.0, *retVec); } if (D_d) { retVec->Copy(*D_d); } neg_omega_d_plus_D_d_cache_. AddCachedResult(retVec, deps, scalar_deps); } } return ConstPtr(retVec); }
SmartPtr<const Vector> AugRestoSystemSolver::Sigma_tilde_n_c_inv( const SmartPtr<const Vector>& sigma_n_c, Number delta_x, const Vector& any_vec_in_c) { DBG_START_METH("AugRestoSystemSolver::Sigma_tilde_n_c_inv",dbg_verbosity); SmartPtr<Vector> retVec; if (IsValid(sigma_n_c) || delta_x != 0.0) { std::vector<const TaggedObject*> deps(1); std::vector<Number> scalar_deps(1); deps[0] = GetRawPtr(sigma_n_c); scalar_deps[0] = delta_x; if (!sigma_tilde_n_c_inv_cache_.GetCachedResult(retVec, deps, scalar_deps)) { DBG_PRINT((1,"Not found in cache\n")); retVec = any_vec_in_c.MakeNew(); if (IsValid(sigma_n_c)) { if (delta_x != 0.) { retVec->Copy(*sigma_n_c); retVec->AddScalar(delta_x); retVec->ElementWiseReciprocal(); } else { // Given a "homogenous vector" implementation (such as in // DenseVector) the following should be more efficient retVec->Set(1.); retVec->ElementWiseDivide(*sigma_n_c); } } else { retVec->Set(1./delta_x); } sigma_tilde_n_c_inv_cache_.AddCachedResult(retVec, deps, scalar_deps); } } return ConstPtr(retVec); }
SmartPtr<const Vector> AugRestoSystemSolver::Rhs_dR(const Vector& rhs_d, const SmartPtr<const Vector>& sigma_tilde_n_d_inv, const Vector& rhs_n_d, const Matrix& pd_L, const SmartPtr<const Vector>& sigma_tilde_p_d_inv, const Vector& rhs_p_d, const Matrix& neg_pd_U) { DBG_START_METH("AugRestoSystemSolver::Rhs_dR",dbg_verbosity); SmartPtr<Vector> retVec; std::vector<const TaggedObject*> deps(7); std::vector<Number> scalar_deps; deps[0] = &rhs_d; deps[1] = GetRawPtr(sigma_tilde_n_d_inv); deps[2] = &rhs_n_d; deps[3] = &pd_L; deps[4] = GetRawPtr(sigma_tilde_p_d_inv); deps[5] = &rhs_p_d; deps[6] = &neg_pd_U; if (!rhs_dR_cache_.GetCachedResult(retVec, deps, scalar_deps)) { DBG_PRINT((1,"Not found in cache\n")); retVec = rhs_d.MakeNew(); retVec->Copy(rhs_d); if (IsValid(sigma_tilde_n_d_inv)) { SmartPtr<Vector> tmpn = sigma_tilde_n_d_inv->MakeNew(); tmpn->Copy(*sigma_tilde_n_d_inv); tmpn->ElementWiseMultiply(rhs_n_d); pd_L.MultVector(-1.0, *tmpn, 1.0, *retVec); } if (IsValid(sigma_tilde_p_d_inv)) { SmartPtr<Vector> tmpp = sigma_tilde_p_d_inv->MakeNew(); tmpp->Copy(*sigma_tilde_p_d_inv); tmpp->ElementWiseMultiply(rhs_p_d); neg_pd_U.MultVector(-1.0, *tmpp, 1.0, *retVec); } rhs_dR_cache_.AddCachedResult(retVec, deps, scalar_deps); } return ConstPtr(retVec); }
SmartPtr<const Vector> AugRestoSystemSolver::Sigma_tilde_p_d_inv( const SmartPtr<const Vector>& sigma_p_d, Number delta_x, const Vector& any_vec_in_p_d) { DBG_START_METH("AugRestoSystemSolver::Sigma_tilde_p_d_inv",dbg_verbosity); SmartPtr<Vector> retVec; if (IsValid(sigma_p_d) || delta_x != 0) { std::vector<const TaggedObject*> deps(1); std::vector<Number> scalar_deps(1); deps[0] = GetRawPtr(sigma_p_d); scalar_deps[0] = delta_x; if (!sigma_tilde_p_d_inv_cache_.GetCachedResult(retVec, deps, scalar_deps)) { DBG_PRINT((1,"Not found in cache\n")); retVec = any_vec_in_p_d.MakeNew(); if (IsValid(sigma_p_d)) { if (delta_x != 0.) { retVec->Copy(*sigma_p_d); retVec->AddScalar(delta_x); retVec->ElementWiseReciprocal(); } else { retVec->Set(1.); retVec->ElementWiseDivide(*sigma_p_d); } } else { retVec->Set(1./delta_x); } sigma_tilde_p_d_inv_cache_.AddCachedResult(retVec, deps, scalar_deps); } } return ConstPtr(retVec); }
void nmethod::printDeps() { ResourceMark m; // in case methods get printed from gdb printIndent(); lprintf("dependents:\n"); Indent ++; for (nmln* n = deps(); n < depsEnd(); n ++) { printIndent(); n->print(); nmln* n1; for (n1= n->next; n1 != n; n1= n1->next) { oop *q= (oop*)(Memory->code->dZone->findStartOfBlock(n1)); oop mapP= *q; if (Memory->really_contains(mapP)) { lprintf(" "); oop p= Memory->spaceFor(mapP)->find_oop_backwards(mapP); p->print(); lprintf("\n"); break; } } if (n1 == n) lprintf(" ??not linked to any map??\n"); } Indent --; }
void nmethod::addDeps(dependencyList *dl) { nmln *d; for (d= dl->start(); d < dl->end(); d++) { for (nmln *p= d->next; p != d; p= p->next) if (p >= deps() && p < depsEnd()) { // nmethod already on this list p->remove(); break; } } for (nmln *nd= deps(); nd < depsEnd(); nd++) if (nd->notEmpty()) { dl->add(nd); // could make this a shade more efficient by adding // a function to dependencyList nd->remove(); } fint ndeps= dl->length(); if (ndeps == depsEnd() - deps()) { // no need to allocate new region, just copy copy_words((int32*)dl->start(), (int32*)deps(), depsLen / sizeof(nmln*)); for (d= deps(); d < depsEnd(); d++) d->relocate(); return; } assert(ndeps > depsEnd() - deps(), "deps didn't grow!"); // now make dl the method's deps fint newDepsLen= ndeps * sizeof(nmln); char *dAddr= Memory->code->allocateDeps(newDepsLen + sizeof(nmethod*)); nmln *newDeps= (nmln*) (dAddr + sizeof(nmethod*)); copy_words((int32*)dl->start(), (int32*)newDeps, newDepsLen / sizeof(nmln*)); for (d= newDeps; d < newDeps + ndeps; d++) d->relocate(); Memory->code->deallocateDeps((char*)dBackLinkAddr(), depsLen + sizeof(nmethod*)); depsAddr= newDeps; depsLen= newDepsLen; *dBackLinkAddr()= this; }
bool PDFullSpaceSolver::SolveOnce(bool resolve_with_better_quality, bool pretend_singular, const SymMatrix& W, const Matrix& J_c, const Matrix& J_d, const Matrix& Px_L, const Matrix& Px_U, const Matrix& Pd_L, const Matrix& Pd_U, const Vector& z_L, const Vector& z_U, const Vector& v_L, const Vector& v_U, const Vector& slack_x_L, const Vector& slack_x_U, const Vector& slack_s_L, const Vector& slack_s_U, const Vector& sigma_x, const Vector& sigma_s, Number alpha, Number beta, const IteratesVector& rhs, IteratesVector& res) { // TO DO LIST: // // 1. decide for reasonable return codes (e.g. fatal error, too // ill-conditioned...) // 2. Make constants parameters that can be set from the outside // 3. Get Information out of Ipopt structures // 4. add heuristic for structurally singular problems // 5. see if it makes sense to distinguish delta_x and delta_s, // or delta_c and delta_d // 6. increase pivot tolerance if number of get evals so too small DBG_START_METH("PDFullSpaceSolver::SolveOnce",dbg_verbosity); IpData().TimingStats().PDSystemSolverSolveOnce().Start(); // Compute the right hand side for the augmented system formulation SmartPtr<Vector> augRhs_x = rhs.x()->MakeNewCopy(); Px_L.AddMSinvZ(1.0, slack_x_L, *rhs.z_L(), *augRhs_x); Px_U.AddMSinvZ(-1.0, slack_x_U, *rhs.z_U(), *augRhs_x); SmartPtr<Vector> augRhs_s = rhs.s()->MakeNewCopy(); Pd_L.AddMSinvZ(1.0, slack_s_L, *rhs.v_L(), *augRhs_s); Pd_U.AddMSinvZ(-1.0, slack_s_U, *rhs.v_U(), *augRhs_s); // Get space into which we can put the solution of the augmented system SmartPtr<IteratesVector> sol = res.MakeNewIteratesVector(true); // Now check whether any data has changed std::vector<const TaggedObject*> deps(13); deps[0] = &W; deps[1] = &J_c; deps[2] = &J_d; deps[3] = &z_L; deps[4] = &z_U; deps[5] = &v_L; deps[6] = &v_U; deps[7] = &slack_x_L; deps[8] = &slack_x_U; deps[9] = &slack_s_L; deps[10] = &slack_s_U; deps[11] = &sigma_x; deps[12] = &sigma_s; void* dummy; bool uptodate = dummy_cache_.GetCachedResult(dummy, deps); if (!uptodate) { dummy_cache_.AddCachedResult(dummy, deps); augsys_improved_ = false; } // improve_current_solution can only be true, if that system has // been solved before DBG_ASSERT((!resolve_with_better_quality && !pretend_singular) || uptodate); ESymSolverStatus retval; if (uptodate && !pretend_singular) { // Get the perturbation values Number delta_x; Number delta_s; Number delta_c; Number delta_d; perturbHandler_->CurrentPerturbation(delta_x, delta_s, delta_c, delta_d); // No need to go through the pain of finding the appropriate // values for the deltas, because the matrix hasn't changed since // the last call. So, just call the Solve Method // // Note: resolve_with_better_quality is true, then the Solve // method has already asked the augSysSolver to increase the // quality at the end solve, and we are now getting the solution // with that better quality retval = augSysSolver_->Solve(&W, 1.0, &sigma_x, delta_x, &sigma_s, delta_s, &J_c, NULL, delta_c, &J_d, NULL, delta_d, *augRhs_x, *augRhs_s, *rhs.y_c(), *rhs.y_d(), *sol->x_NonConst(), *sol->s_NonConst(), *sol->y_c_NonConst(), *sol->y_d_NonConst(), false, 0); if (retval!=SYMSOLVER_SUCCESS) { IpData().TimingStats().PDSystemSolverSolveOnce().End(); return false; } } else { const Index numberOfEVals=rhs.y_c()->Dim()+rhs.y_d()->Dim(); // counter for the number of trial evaluations // (ToDo is not at the correct place) Index count = 0; // Get the very first perturbation values from the perturbation // Handler Number delta_x; Number delta_s; Number delta_c; Number delta_d; perturbHandler_->ConsiderNewSystem(delta_x, delta_s, delta_c, delta_d); retval = SYMSOLVER_SINGULAR; bool fail = false; while (retval!= SYMSOLVER_SUCCESS && !fail) { if (pretend_singular) { retval = SYMSOLVER_SINGULAR; pretend_singular = false; } else { count++; Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA, "Solving system with delta_x=%e delta_s=%e\n delta_c=%e delta_d=%e\n", delta_x, delta_s, delta_c, delta_d); bool check_inertia = true; if (neg_curv_test_tol_ > 0.) { check_inertia = false; } retval = augSysSolver_->Solve(&W, 1.0, &sigma_x, delta_x, &sigma_s, delta_s, &J_c, NULL, delta_c, &J_d, NULL, delta_d, *augRhs_x, *augRhs_s, *rhs.y_c(), *rhs.y_d(), *sol->x_NonConst(), *sol->s_NonConst(), *sol->y_c_NonConst(), *sol->y_d_NonConst(), check_inertia, numberOfEVals); } if (retval==SYMSOLVER_FATAL_ERROR) return false; if (retval==SYMSOLVER_SINGULAR && (rhs.y_c()->Dim()+rhs.y_d()->Dim() > 0) ) { // Get new perturbation factors from the perturbation // handlers for the singular case bool pert_return = perturbHandler_->PerturbForSingularity(delta_x, delta_s, delta_c, delta_d); if (!pert_return) { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "PerturbForSingularity can't be done\n"); IpData().TimingStats().PDSystemSolverSolveOnce().End(); return false; } } else if (retval==SYMSOLVER_WRONG_INERTIA && augSysSolver_->NumberOfNegEVals() < numberOfEVals) { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "Number of negative eigenvalues too small!\n"); // If the number of negative eigenvalues is too small, then // we first try to remedy this by asking for better quality // solution (e.g. increasing pivot tolerance), and if that // doesn't help, we assume that the system is singular bool assume_singular = true; if (!augsys_improved_) { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "Asking augmented system solver to improve quality of its solutions.\n"); augsys_improved_ = augSysSolver_->IncreaseQuality(); if (augsys_improved_) { IpData().Append_info_string("q"); assume_singular = false; } else { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "Quality could not be improved\n"); } } if (assume_singular) { bool pert_return = perturbHandler_->PerturbForSingularity(delta_x, delta_s, delta_c, delta_d); if (!pert_return) { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "PerturbForSingularity can't be done for assume singular.\n"); IpData().TimingStats().PDSystemSolverSolveOnce().End(); return false; } IpData().Append_info_string("a"); } } else if (retval==SYMSOLVER_WRONG_INERTIA || retval==SYMSOLVER_SINGULAR) { // Get new perturbation factors from the perturbation // handlers for the case of wrong inertia bool pert_return = perturbHandler_->PerturbForWrongInertia(delta_x, delta_s, delta_c, delta_d); if (!pert_return) { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "PerturbForWrongInertia can't be done for wrong interia or singular.\n"); IpData().TimingStats().PDSystemSolverSolveOnce().End(); return false; } } else if (neg_curv_test_tol_ > 0.) { DBG_ASSERT(augSysSolver_->ProvidesInertia()); // we now check if the inertia is possible wrong Index neg_values = augSysSolver_->NumberOfNegEVals(); if (neg_values != numberOfEVals) { // check if we have a direction of sufficient positive curvature SmartPtr<Vector> x_tmp = sol->x()->MakeNew(); W.MultVector(1., *sol->x(), 0., *x_tmp); Number xWx = x_tmp->Dot(*sol->x()); x_tmp->Copy(*sol->x()); x_tmp->ElementWiseMultiply(sigma_x); xWx += x_tmp->Dot(*sol->x()); SmartPtr<Vector> s_tmp = sol->s()->MakeNewCopy(); s_tmp->ElementWiseMultiply(sigma_s); xWx += s_tmp->Dot(*sol->s()); if (neg_curv_test_reg_) { x_tmp->Copy(*sol->x()); x_tmp->Scal(delta_x); xWx += x_tmp->Dot(*sol->x()); s_tmp->Copy(*sol->s()); s_tmp->Scal(delta_s); xWx += s_tmp->Dot(*sol->s()); } Number xs_nrmsq = pow(sol->x()->Nrm2(),2) + pow(sol->s()->Nrm2(),2); Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "In inertia heuristic: xWx = %e xx = %e\n", xWx, xs_nrmsq); if (xWx < neg_curv_test_tol_*xs_nrmsq) { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, " -> Redo with modified matrix.\n"); bool pert_return = perturbHandler_->PerturbForWrongInertia(delta_x, delta_s, delta_c, delta_d); if (!pert_return) { Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "PerturbForWrongInertia can't be done for inertia heuristic.\n"); IpData().TimingStats().PDSystemSolverSolveOnce().End(); return false; } retval = SYMSOLVER_WRONG_INERTIA; } } } } // while (retval!=SYMSOLVER_SUCCESS && !fail) { // Some output Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "Number of trial factorizations performed: %d\n", count); Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA, "Perturbation parameters: delta_x=%e delta_s=%e\n delta_c=%e delta_d=%e\n", delta_x, delta_s, delta_c, delta_d); // Set the perturbation values in the Data object IpData().setPDPert(delta_x, delta_s, delta_c, delta_d); } // Compute the remaining sol Vectors Px_L.SinvBlrmZMTdBr(-1., slack_x_L, *rhs.z_L(), z_L, *sol->x(), *sol->z_L_NonConst()); Px_U.SinvBlrmZMTdBr(1., slack_x_U, *rhs.z_U(), z_U, *sol->x(), *sol->z_U_NonConst()); Pd_L.SinvBlrmZMTdBr(-1., slack_s_L, *rhs.v_L(), v_L, *sol->s(), *sol->v_L_NonConst()); Pd_U.SinvBlrmZMTdBr(1., slack_s_U, *rhs.v_U(), v_U, *sol->s(), *sol->v_U_NonConst()); // Finally let's assemble the res result vectors res.AddOneVector(alpha, *sol, beta); IpData().TimingStats().PDSystemSolverSolveOnce().End(); return true; }
void CBaseLuaDepScreen::Refresh() { NLua::CLuaFunc func(m_iLuaCheckRef, LUA_REGISTRYINDEX); if (func) { func(1); func.PopData(); luaL_checktype(NLua::LuaState, -1, LUA_TTABLE); NLua::CLuaTable deps(-1); m_Dependencies.clear(); if (deps) { std::string key; while (deps.Next(key)) { deps[key].GetTable(); luaL_checktype(NLua::LuaState, -1, LUA_TTABLE); NLua::CLuaTable tab(-1); if (tab) { const char *d, *p; tab["desc"] >> d; tab["problem"] >> p; m_Dependencies.push_back(SDepInfo(key, d, p)); } lua_pop(NLua::LuaState, 1); // deps[key] } } lua_pop(NLua::LuaState, 1); // Func ret #if 0 int tab = lua_gettop(NLua::LuaState); int count = luaL_getn(NLua::LuaState, tab); m_Dependencies.clear(); for (int i=1; i<=count; i++) { lua_rawgeti(NLua::LuaState, tab, i); luaL_checktype(NLua::LuaState, -1, LUA_TTABLE); NLua::CLuaTable tab(-1); if (tab) { const char *n, *d, *p; tab["name"] >> n; tab["desc"] >> d; tab["problem"] >> p; m_Dependencies.push_back(SDepInfo(n, d, p)); } } lua_pop(NLua::LuaState, 1); #endif } CoreUpdateList(); }
static UniValue getblocktemplate(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() > 1) throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to explicitly select between the default 'template' request or a 'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, 9, and 145:\n" " https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki#getblocktemplate_changes\n" " https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n" "\nArguments:\n" "1. template_request (json object, optional) A json object in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of strings\n" " \"support\" (string) client side supported feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', 'serverlist', 'workid'\n" " ,...\n" " ],\n" " \"rules\":[ (array, optional) A list of strings\n" " \"support\" (string) client side supported softfork deployment\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"version\" : n, (numeric) The preferred block version\n" " \"rules\" : [ \"rulename\", ... ], (array of strings) specific block rules that are to be enforced\n" " \"vbavailable\" : { (json object) set of pending, supported versionbit (BIP 9) softfork deployments\n" " \"rulename\" : bitnumber (numeric) identifies the bit number as indicating acceptance and readiness for the named softfork rule\n" " ,...\n" " },\n" " \"vbrequired\" : n, (numeric) bit mask of versionbits the server requires set in submissions\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n" " \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded in little-endian hexadecimal (including witness data)\n" " \"depends\" : [ (array) array of numbers \n" " n (numeric) transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in value between transaction inputs and outputs (in satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n" " \"sigops\" : n, (numeric) total SigOps cost, as counted for purposes of block limits; if key is not present, sigop cost is unknown and clients MUST NOT assume it is zero\n" " \"weight\" : n, (numeric) total transaction weight, as counted for purposes of block limits\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable input to coinbase transaction, including the generation award and transaction fees (in satoshis)\n" " \"coinbasetxn\" : { ... }, (json object) information for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum timestamp appropriate for next block time in seconds since epoch (Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of ways the block template may be changed \n" " \"value\" (string) A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops in blocks\n" " \"sizelimit\" : n, (numeric) limit of block size\n" " \"weightlimit\" : n, (numeric) limit of block weight\n" " \"curtime\" : ttt, (numeric) current timestamp in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed target of next block\n" " \"height\" : n (numeric) The height of the next block\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "{\"rules\": [\"segwit\"]}") + HelpExampleRpc("getblocktemplate", "{\"rules\": [\"segwit\"]}") ); LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set<std::string> setClientRules; int64_t nMaxVersionPreVB = -1; if (!request.params[0].isNull()) { const UniValue& oparam = request.params[0].get_obj(); const UniValue& modeval = find_value(oparam, "mode"); if (modeval.isStr()) strMode = modeval.get_str(); else if (modeval.isNull()) { /* Do nothing */ } else throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue& dataval = find_value(oparam, "data"); if (!dataval.isStr()) throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); uint256 hash = block.GetHash(); const CBlockIndex* pindex = LookupBlockIndex(hash); if (pindex) { if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) return "duplicate"; if (pindex->nStatus & BLOCK_FAILED_MASK) return "duplicate-invalid"; return "duplicate-inconclusive"; } CBlockIndex* const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) return "inconclusive-not-best-prevblk"; CValidationState state; TestBlockValidity(state, Params(), block, pindexPrev, false, true); return BIP22ValidationResult(state); } const UniValue& aClientRules = find_value(oparam, "rules"); if (aClientRules.isArray()) { for (unsigned int i = 0; i < aClientRules.size(); ++i) { const UniValue& v = aClientRules[i]; setClientRules.insert(v.get_str()); } } else { // NOTE: It is important that this NOT be read if versionbits is supported const UniValue& uvMaxVersion = find_value(oparam, "maxversion"); if (uvMaxVersion.isNum()) { nMaxVersionPreVB = uvMaxVersion.get_int64(); } } } if (strMode != "template") throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Bitcoin is not connected!"); if (IsInitialBlockDownload()) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Bitcoin is downloading blocks..."); static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has passed and there are more transactions uint256 hashWatchedChain; std::chrono::steady_clock::time_point checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: <hashBestChain><nTransactionsUpdatedLast> std::string lpstr = lpval.get_str(); hashWatchedChain = ParseHashV(lpstr.substr(0, 64), "longpollid"); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = std::chrono::steady_clock::now() + std::chrono::minutes(1); WAIT_LOCK(g_best_block_mutex, lock); while (g_best_block == hashWatchedChain && IsRPCRunning()) { if (g_best_block_cv.wait_until(lock, checktxtime) == std::cv_status::timeout) { // Timeout: Check transactions for update if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) break; checktxtime += std::chrono::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); // TODO: Maybe recheck connections/IBD and (if something wrong) send an expires-immediately template to stop miners? } const struct VBDeploymentInfo& segwit_info = VersionBitsDeploymentInfo[Consensus::DEPLOYMENT_SEGWIT]; // If the caller is indicating segwit support, then allow CreateNewBlock() // to select witness transactions, after segwit activates (otherwise // don't). bool fSupportsSegwit = setClientRules.find(segwit_info.name) != setClientRules.end(); // Update block static CBlockIndex* pindexPrev; static int64_t nStart; static std::unique_ptr<CBlockTemplate> pblocktemplate; // Cache whether the last invocation was with segwit support, to avoid returning // a segwit-block to a non-segwit caller. static bool fLastTemplateSupportsSegwit = true; if (pindexPrev != chainActive.Tip() || (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5) || fLastTemplateSupportsSegwit != fSupportsSegwit) { // Clear pindexPrev so future calls make a new block, despite any failures from here on pindexPrev = nullptr; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = mempool.GetTransactionsUpdated(); CBlockIndex* pindexPrevNew = chainActive.Tip(); nStart = GetTime(); fLastTemplateSupportsSegwit = fSupportsSegwit; // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(Params()).CreateNewBlock(scriptDummy, fSupportsSegwit); if (!pblocktemplate) throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } assert(pindexPrev); CBlock* pblock = &pblocktemplate->block; // pointer for convenience const Consensus::Params& consensusParams = Params().GetConsensus(); // Update nTime UpdateTime(pblock, consensusParams, pindexPrev); pblock->nNonce = 0; // NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration const bool fPreSegWit = (ThresholdState::ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache)); UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); std::map<uint256, int64_t> setTxIndex; int i = 0; for (const auto& it : pblock->vtx) { const CTransaction& tx = *it; uint256 txHash = tx.GetHash(); setTxIndex[txHash] = i++; if (tx.IsCoinBase()) continue; UniValue entry(UniValue::VOBJ); entry.pushKV("data", EncodeHexTx(tx)); entry.pushKV("txid", txHash.GetHex()); entry.pushKV("hash", tx.GetWitnessHash().GetHex()); UniValue deps(UniValue::VARR); for (const CTxIn &in : tx.vin) { if (setTxIndex.count(in.prevout.hash)) deps.push_back(setTxIndex[in.prevout.hash]); } entry.pushKV("depends", deps); int index_in_template = i - 1; entry.pushKV("fee", pblocktemplate->vTxFees[index_in_template]); int64_t nTxSigOps = pblocktemplate->vTxSigOpsCost[index_in_template]; if (fPreSegWit) { assert(nTxSigOps % WITNESS_SCALE_FACTOR == 0); nTxSigOps /= WITNESS_SCALE_FACTOR; } entry.pushKV("sigops", nTxSigOps); entry.pushKV("weight", GetTransactionWeight(tx)); transactions.push_back(entry); } UniValue aux(UniValue::VOBJ); aux.pushKV("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end())); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.pushKV("capabilities", aCaps); UniValue aRules(UniValue::VARR); UniValue vbavailable(UniValue::VOBJ); for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); switch (state) { case ThresholdState::DEFINED: case ThresholdState::FAILED: // Not exposed to GBT at all break; case ThresholdState::LOCKED_IN: // Ensure bit is set in block version pblock->nVersion |= VersionBitsMask(consensusParams, pos); // FALL THROUGH to get vbavailable set... case ThresholdState::STARTED: { const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.pushKV(gbt_vb_name(pos), consensusParams.vDeployments[pos].bit); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { if (!vbinfo.gbt_force) { // If the client doesn't support this, don't indicate it in the [default] version pblock->nVersion &= ~VersionBitsMask(consensusParams, pos); } } break; } case ThresholdState::ACTIVE: { // Add to rules only const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; aRules.push_back(gbt_vb_name(pos)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { // Not supported by the client; make sure it's safe to proceed if (!vbinfo.gbt_force) { // If we do anything other than throw an exception here, be sure version/force isn't sent to old clients throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Support for '%s' rule requires explicit client support", vbinfo.name)); } } break; } } } result.pushKV("version", pblock->nVersion); result.pushKV("rules", aRules); result.pushKV("vbavailable", vbavailable); result.pushKV("vbrequired", int(0)); if (nMaxVersionPreVB >= 2) { // If VB is supported by the client, nMaxVersionPreVB is -1, so we won't get here // Because BIP 34 changed how the generation transaction is serialized, we can only use version/force back to v2 blocks // This is safe to do [otherwise-]unconditionally only because we are throwing an exception above if a non-force deployment gets activated // Note that this can probably also be removed entirely after the first BIP9 non-force deployment (ie, probably segwit) gets activated aMutable.push_back("version/force"); } result.pushKV("previousblockhash", pblock->hashPrevBlock.GetHex()); result.pushKV("transactions", transactions); result.pushKV("coinbaseaux", aux); result.pushKV("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue); result.pushKV("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast)); result.pushKV("target", hashTarget.GetHex()); result.pushKV("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1); result.pushKV("mutable", aMutable); result.pushKV("noncerange", "00000000ffffffff"); int64_t nSigOpLimit = MAX_BLOCK_SIGOPS_COST; int64_t nSizeLimit = MAX_BLOCK_SERIALIZED_SIZE; if (fPreSegWit) { assert(nSigOpLimit % WITNESS_SCALE_FACTOR == 0); nSigOpLimit /= WITNESS_SCALE_FACTOR; assert(nSizeLimit % WITNESS_SCALE_FACTOR == 0); nSizeLimit /= WITNESS_SCALE_FACTOR; } result.pushKV("sigoplimit", nSigOpLimit); result.pushKV("sizelimit", nSizeLimit); if (!fPreSegWit) { result.pushKV("weightlimit", (int64_t)MAX_BLOCK_WEIGHT); } result.pushKV("curtime", pblock->GetBlockTime()); result.pushKV("bits", strprintf("%08x", pblock->nBits)); result.pushKV("height", (int64_t)(pindexPrev->nHeight+1)); if (!pblocktemplate->vchCoinbaseCommitment.empty() && fSupportsSegwit) { result.pushKV("default_witness_commitment", HexStr(pblocktemplate->vchCoinbaseCommitment.begin(), pblocktemplate->vchCoinbaseCommitment.end())); } return result; }
bool nmethod::verify() { bool r = true; ResourceMark rm; r &= OopNCode::verify2("nmethod"); if (insts() != (char*)(this + 1)) { error1("nmethod at 0x%lx has incorrect insts pointer", this); r = false; } if (!Memory->code->contains(this)) { error1("nmethod at 0x%lx not in zone", this); r = false; } if (!zoneLink.verify_list_integrity()) { lprintf("\tof zoneLink of nmethod 0x%lx\n", this); r = false; } { FOR_MY_CODETABLE_ENTRIES(e) if (e->nm != this) { error1("bad code table link for nmethod %#lx\n", this); r = false; } } bool isAligned = (frame_size & (frame_word_alignment-1)) == 0; if (!isAligned) { lprintf("nmethod at %#lx: frame size is not multiple of %d words\n", (long unsigned)this, frame_word_alignment); r = false; } if (codeTableLink != NULL) { nmethod *tableResult = isDebug() ? Memory->code->debugTable->lookup(key) : Memory->code->table ->lookup(key); if (tableResult != this) { error1("nmethod at %#lx: code table lookup failed", this); r = false; } } if (!key.verify()) { lprintf("\tof key of nmethod 0x%lx\n", this); r = false; } { FOR_MY_CODETABLE_ENTRIES(e) if (!e->key.verify()) { lprintf("\tof code table key %#lx of nmethod 0x%lx\n", (long unsigned)&e->key, (long unsigned)this); r = false; } } if (!linkedSends.verify_list_integrity()) { lprintf("\tof linkedSends of nmethod 0x%lx\n", this); r = false; } if (!diLink.verify_list_integrity()) { lprintf("\tof diLink of nmethod 0x%lx\n", this); r = false; } r &= scopes->verify(); for (PcDesc* p = pcs(); p < pcsEnd(); p++) { if (! p->verify(this)) { lprintf("\t\tin nmethod at %#lx (pcs)\n", this); r = false; } } // more checks in ncode::verify called above bool shouldBeDI = diLink.notEmpty(); for (addrDesc* l = locs(); l < locsEnd(); l++) { if (l->isDIDesc()) { shouldBeDI = true; } } if (shouldBeDI && !isDI()) { error1("nmethod %#lx should be marked isDI", this); r = false; } else if (!shouldBeDI && isDI()) { error1("nmethod %#lx should not be marked isDI", this); r = false; } if (! key.receiverMap()->is_block() ) { for (nmln* d = deps(); d < depsEnd(); d++) { if (! d->verify_list_integrity()) { lprintf("\tin nmethod at %#lx (deps)\n", this); r = false; } } } if (frame_chain != NoFrameChain) { error1("nmethod %#lx has non-zero frame chain value", this); r = false; } if (findNMethod( instsEnd() - oopSize) != this) { error1("findNMethod did not find this nmethod (%#lx)", this); r = false; } return r; }
nmethod::nmethod(AbstractCompiler* c, bool generateDebugCode) { CHECK_VTBL_VALUE; _instsLen = roundTo(iLen, oopSize); _locsLen = ilLen; depsLen = dLen; // backpointer is just before deps depsAddr = (nmln*) ((char*)dAddr + sizeof(nmethod*)); *dBackLinkAddr() = this; // Copy the nmethodScopes scopeDescs generated by the ScopeDescRecorder // to the allocation area. c->scopeDescRecorder()->copyTo((VtblPtr_t*)sAddr, (int32)this); this->scopes = (nmethodScopes*) sAddr; oldCount = 0; flags.clear(); flags.isDebug = generateDebugCode; setCompiler(c->name()); flags.isUncommonRecompiled = currentProcess->isUncommon(); verifiedOffset = c->verifiedOffset(); diCheckOffset = c->diCheckOffset(); frameCreationOffset = c->frameCreationOffset(); rememberLink.init(); codeTableLink= NULL; diLink.init(c->diLink); if (diLink.notEmpty()) flags.isDI = true; flags.level = c->level(); if (flags.level >= MaxRecompilationLevels) { // added = zzzz warning1("setting invalid nmethod level %ld", flags.level); // fix this flags.level = 0; } flags.version = c->version(); if (c->nmName() == nm_nic && ((FCompiler*)c)->isImpure) makeImpureNIC(); key.set_from(c->L->key); check_store(); clear_frame_chain(); assert(c->frameSize() >= 0, "frame size cannot be negative"); frame_size = c->frameSize(); _incoming_arg_count = c->incoming_arg_count(); get_platform_specific_data(c); Assembler* instsA = c->instructions(); copy_bytes( instsA->instsStart, insts(), instsLen()); copy_words((int32*)instsA->locsStart, (int32*)locs(), ilLen/4); copy_words((int32*)depsStart, (int32*)deps(), depsLen/4); addrDesc *l, *lend; for (l = locs(), lend = locsEnd(); l < lend; l++) { l->initialShift(this, (char*)insts() - (char*)instsA->instsStart, 0); } char* bound = Memory->new_gen->boundary(); for (l = locs(), lend = locsEnd(); l < lend; l++) { if (l->isOop()) OopNCode::check_store(oop(l->referent(this)), bound); // cfront garbage else if (l->isSendDesc()) { l->asSendDesc(this)->dependency()->init(); } else if (l->isDIDesc()) { l->asDIDesc(this)->dependency()->init(); flags.isDI = true; } } for (nmln* d = deps(), *dend = depsEnd(); d < dend; d++) { d->relocate(); } MachineCache::flush_instruction_cache_range(insts(), instsEnd()); MachineCache::flush_instruction_cache_for_debugging(); if (this == (nmethod*)catchThisOne) warning("caught nmethod"); }
Tomasulo::Tomasulo(MemoryPtr memory, bool verbose) : verbose(verbose), instructionFactory(nullptr), halted(false), stallIssue(false), clockCounter(0), pc(0), memory(memory), registerFile(nullptr), renameRegisterFile(nullptr), commonDataBus(nullptr), functionalUnits() { assert(memory != nullptr); registerFile = RegisterFilePtr( new RegisterFile(GPR_REGISTERS, FPR_REGISTERS) ); renameRegisterFile = RenameRegisterFilePtr(new RenameRegisterFile); commonDataBus = CommonDataBusPtr( new CommonDataBus(registerFile, renameRegisterFile) ); instructionFactory = InstructionFactoryPtr( new InstructionFactory(pc, memory, registerFile) ); // create all functional units ReservationStationDependencies deps( registerFile, renameRegisterFile, memory, pc, stallIssue, commonDataBus ); functionalUnits[FunctionalUnitType::Integer] = FunctionalUnitPtr( new FunctionalUnit( FunctionalUnitType::Integer, false, INTEGER_CYCLES, INTEGER_STATIONS, INTEGER_UNITS, deps ) ); functionalUnits[FunctionalUnitType::Trap] = FunctionalUnitPtr( new FunctionalUnit( FunctionalUnitType::Trap, true, TRAP_CYCLES, TRAP_STATIONS, TRAP_UNITS, deps ) ); functionalUnits[FunctionalUnitType::Branch] = FunctionalUnitPtr( new FunctionalUnit( FunctionalUnitType::Branch, true, BRANCH_CYCLES, BRANCH_STATIONS, BRANCH_UNITS, deps ) ); functionalUnits[FunctionalUnitType::Memory] = FunctionalUnitPtr( new FunctionalUnit( FunctionalUnitType::Memory, true, MEMORY_CYCLES, MEMORY_STATIONS, MEMORY_UNITS, deps ) ); functionalUnits[FunctionalUnitType::FloatingPoint] = FunctionalUnitPtr( new FunctionalUnit( FunctionalUnitType::FloatingPoint, false, FLOAT_CYCLES, FLOAT_STATIONS, FLOAT_UNITS, deps ) ); }
void nmethod::moveDeps(nmln* newDeps, int32 delta) { for (nmln* d = deps(), *dend = depsEnd(); d < dend; d++) { d->shift(delta); } depsAddr = newDeps; }
void CodeBlocksImporter::GenerateFromProject(GenericWorkspacePtr genericWorkspace, GenericProjectDataType& genericProjectData) { wxXmlDocument codeBlocksProject; if(codeBlocksProject.Load(genericProjectData[wxT("projectFullPath")])) { wxXmlNode* root = codeBlocksProject.GetRoot(); if(root) { wxXmlNode* rootChild = root->GetChildren(); while(rootChild) { if(rootChild->GetName() == wxT("Project")) { wxString globalCompilerOptions = wxT(""), globalIncludePath = wxT(""), globalLinkerOptions = wxT(""), globalLibPath = wxT(""), globalLibraries = wxT(""); wxFileName projectInfo(genericProjectData[wxT("projectFullPath")]); GenericProjectPtr genericProject = std::make_shared<GenericProject>(); genericProject->path = projectInfo.GetPath(); wxStringTokenizer deps(genericProjectData[wxT("projectDeps")], wxT(";")); while(deps.HasMoreTokens()) { wxString projectNameDep = deps.GetNextToken().Trim().Trim(false); genericProject->deps.Add(projectNameDep); } genericWorkspace->projects.push_back(genericProject); wxXmlNode* projectChild = rootChild->GetChildren(); while(projectChild) { if(projectChild->GetName() == wxT("Option") && projectChild->HasAttribute(wxT("title"))) { genericProject->name = projectChild->GetAttribute(wxT("title")); } if(projectChild->GetName() == wxT("Build")) { wxXmlNode* buildChild = projectChild->GetChildren(); while(buildChild) { if(buildChild->GetName() == wxT("Target") && buildChild->HasAttribute(wxT("title"))) { GenericProjectCfgPtr genericProjectCfg = std::make_shared<GenericProjectCfg>(); genericProjectCfg->name = buildChild->GetAttribute(wxT("title")); genericProject->cfgs.push_back(genericProjectCfg); wxXmlNode* targetChild = buildChild->GetChildren(); while(targetChild) { if(targetChild->GetName() == wxT("Option") && targetChild->HasAttribute(wxT("output"))) { wxString output = targetChild->GetAttribute(wxT("output")); genericProjectCfg->outputFilename = output; } if(targetChild->GetName() == wxT("Option") && targetChild->HasAttribute(wxT("working_dir"))) { wxString working_dir = targetChild->GetAttribute(wxT("working_dir")); genericProjectCfg->workingDirectory = working_dir; } if(targetChild->GetName() == wxT("Option") && targetChild->HasAttribute(wxT("type"))) { wxString projectType = targetChild->GetAttribute(wxT("type")); if(projectType == wxT("2")) { genericProject->cfgType = GenericCfgType::STATIC_LIBRARY; } else if(projectType == wxT("3")) { genericProject->cfgType = GenericCfgType::DYNAMIC_LIBRARY; } else { genericProject->cfgType = GenericCfgType::EXECUTABLE; } genericProjectCfg->type = genericProject->cfgType; } if(targetChild->GetName() == wxT("Compiler")) { wxString compilerOptions = wxT(""), includePath = wxT(""); wxXmlNode* compilerChild = targetChild->GetChildren(); while(compilerChild) { if(compilerChild->GetName() == wxT("Add") && compilerChild->HasAttribute(wxT("option"))) { compilerOptions += compilerChild->GetAttribute(wxT("option")) + wxT(" "); } if(compilerChild->GetName() == wxT("Add") && compilerChild->HasAttribute(wxT("directory"))) { includePath += compilerChild->GetAttribute(wxT("directory")) + wxT(";"); } compilerChild = compilerChild->GetNext(); } if(includePath.Contains(wxT("#"))) includePath.Replace(wxT("#"), wxT("")); genericProjectCfg->cCompilerOptions = compilerOptions; genericProjectCfg->cppCompilerOptions = compilerOptions; genericProjectCfg->includePath = includePath; } if(targetChild->GetName() == wxT("Linker")) { wxString linkerOptions = wxT(""), libPath = wxT(""), libraries = wxT(""); wxXmlNode* linkerChild = targetChild->GetChildren(); while(linkerChild) { if(linkerChild->GetName() == wxT("Add") && linkerChild->HasAttribute(wxT("option"))) { linkerOptions += linkerChild->GetAttribute(wxT("option")) + wxT(" "); } if(linkerChild->GetName() == wxT("Add") && linkerChild->HasAttribute(wxT("directory"))) { libPath += linkerChild->GetAttribute(wxT("directory")) + wxT(";"); } if(linkerChild->GetName() == wxT("Add") && linkerChild->HasAttribute(wxT("library"))) { libraries += linkerChild->GetAttribute(wxT("library")) + wxT(";"); } linkerChild = linkerChild->GetNext(); } if(libPath.Contains(wxT("#"))) libPath.Replace(wxT("#"), wxT("")); genericProjectCfg->linkerOptions = linkerOptions; genericProjectCfg->libPath = libPath; genericProjectCfg->libraries = libraries; } targetChild = targetChild->GetNext(); } } buildChild = buildChild->GetNext(); } } if(projectChild->GetName() == wxT("Compiler")) { wxXmlNode* compilerChild = projectChild->GetChildren(); while(compilerChild) { if(compilerChild->GetName() == wxT("Add") && compilerChild->HasAttribute(wxT("option"))) { globalCompilerOptions += compilerChild->GetAttribute(wxT("option")) + wxT(" "); } if(compilerChild->GetName() == wxT("Add") && compilerChild->HasAttribute(wxT("directory"))) { globalIncludePath += compilerChild->GetAttribute(wxT("directory")) + wxT(";"); } compilerChild = compilerChild->GetNext(); } if(globalIncludePath.Contains(wxT("#"))) globalIncludePath.Replace(wxT("#"), wxT("")); } if(projectChild->GetName() == wxT("Linker")) { wxXmlNode* linkerChild = projectChild->GetChildren(); while(linkerChild) { if(linkerChild->GetName() == wxT("Add") && linkerChild->HasAttribute(wxT("option"))) { globalLinkerOptions += linkerChild->GetAttribute(wxT("option")) + wxT(" "); } if(linkerChild->GetName() == wxT("Add") && linkerChild->HasAttribute(wxT("directory"))) { globalLibPath += linkerChild->GetAttribute(wxT("directory")) + wxT(";"); } if(linkerChild->GetName() == wxT("Add") && linkerChild->HasAttribute(wxT("library"))) { globalLibraries += linkerChild->GetAttribute(wxT("library")) + wxT(";"); } linkerChild = linkerChild->GetNext(); } if(globalLibPath.Contains(wxT("#"))) globalLibPath.Replace(wxT("#"), wxT("")); } if(projectChild->GetName() == wxT("Unit") && projectChild->HasAttribute(wxT("filename"))) { wxString vpath = wxT(""); wxXmlNode* unitChild = projectChild->GetChildren(); while(unitChild) { if(unitChild->GetName() == wxT("Option") && unitChild->HasAttribute(wxT("virtualFolder"))) { vpath = unitChild->GetAttribute(wxT("virtualFolder")); } unitChild = unitChild->GetNext(); } wxString projectFilename = projectChild->GetAttribute(wxT("filename")); GenericProjectFilePtr genericProjectFile = std::make_shared<GenericProjectFile>(); genericProjectFile->name = projectFilename; genericProjectFile->vpath = vpath; genericProject->files.push_back(genericProjectFile); } projectChild = projectChild->GetNext(); } for(GenericProjectCfgPtr genericProjectCfg : genericProject->cfgs) { genericProjectCfg->cCompilerOptions += globalCompilerOptions; genericProjectCfg->cppCompilerOptions += globalCompilerOptions; genericProjectCfg->includePath += globalIncludePath; genericProjectCfg->linkerOptions += globalLinkerOptions; genericProjectCfg->libPath += globalLibPath; genericProjectCfg->libraries += globalLibraries; } } rootChild = rootChild->GetNext(); } } } }
UniValue getblocktemplate(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() > 1) throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to explicitly select between the default 'template' request or a 'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, and 9:\n" " https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki#getblocktemplate_changes\n" "\nArguments:\n" "1. template_request (json object, optional) A json object in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of strings\n" " \"support\" (string) client side supported feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', 'serverlist', 'workid'\n" " ,...\n" " ],\n" " \"rules\":[ (array, optional) A list of strings\n" " \"support\" (string) client side supported softfork deployment\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"capabilities\" : [ \"capability\", ... ], (array of strings) specific client side supported features\n" " \"version\" : n, (numeric) The preferred block version\n" " \"rules\" : [ \"rulename\", ... ], (array of strings) specific block rules that are to be enforced\n" " \"vbavailable\" : { (json object) set of pending, supported versionbit (BIP 9) softfork deployments\n" " \"rulename\" : bitnumber (numeric) identifies the bit number as indicating acceptance and readiness for the named softfork rule\n" " ,...\n" " },\n" " \"vbrequired\" : n, (numeric) bit mask of versionbits the server requires set in submissions\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n" " \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n" " \"hash\" : \"xxxx\", (string) hash/id encoded in little-endian hexadecimal\n" " \"depends\" : [ (array) array of numbers \n" " n (numeric) transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in value between transaction inputs and outputs (in duffs); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n" " \"sigops\" : n, (numeric) total number of SigOps, as counted for purposes of block limits; if key is not present, sigop count is unknown and clients MUST NOT assume there aren't any\n" " \"required\" : true|false (boolean) if provided and true, this transaction must be in the final block\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable input to coinbase transaction, including the generation award and transaction fees (in duffs)\n" " \"coinbasetxn\" : { ... }, (json object) information for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum timestamp appropriate for next block time in seconds since epoch (Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of ways the block template may be changed \n" " \"value\" (string) A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops in blocks\n" " \"sizelimit\" : n, (numeric) limit of block size\n" " \"curtime\" : ttt, (numeric) current timestamp in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed target of next block\n" " \"previousbits\" : \"xxxxxxxx\", (string) compressed target of current highest block\n" " \"height\" : n (numeric) The height of the next block\n" " \"masternode\" : [ (array) required masternode payments that must be included in the next block\n" " {\n" " \"payee\" : \"xxxx\", (string) payee address\n" " \"script\" : \"xxxx\", (string) payee scriptPubKey\n" " \"amount\": n (numeric) required amount to pay\n" " }\n" " },\n" " \"masternode_payments_started\" : true|false, (boolean) true, if masternode payments started\n" " \"masternode_payments_enforced\" : true|false, (boolean) true, if masternode payments are enforced\n" " \"superblock\" : [ (array) required superblock payees that must be included in the next block\n" " {\n" " \"payee\" : \"xxxx\", (string) payee address\n" " \"script\" : \"xxxx\", (string) payee scriptPubKey\n" " \"amount\": n (numeric) required amount to pay\n" " }\n" " ,...\n" " ],\n" " \"superblocks_started\" : true|false, (boolean) true, if superblock payments started\n" " \"superblocks_enabled\" : true|false, (boolean) true, if superblock payments are enabled\n" " \"coinbase_payload\" : \"xxxxxxxx\" (string) coinbase transaction payload data encoded in hexadecimal\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "") + HelpExampleRpc("getblocktemplate", "") ); LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set<std::string> setClientRules; int64_t nMaxVersionPreVB = -1; if (request.params.size() > 0) { const UniValue& oparam = request.params[0].get_obj(); const UniValue& modeval = find_value(oparam, "mode"); if (modeval.isStr()) strMode = modeval.get_str(); else if (modeval.isNull()) { /* Do nothing */ } else throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue& dataval = find_value(oparam, "data"); if (!dataval.isStr()) throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); uint256 hash = block.GetHash(); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) return "duplicate"; if (pindex->nStatus & BLOCK_FAILED_MASK) return "duplicate-invalid"; return "duplicate-inconclusive"; } CBlockIndex* const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) return "inconclusive-not-best-prevblk"; CValidationState state; TestBlockValidity(state, Params(), block, pindexPrev, false, true); return BIP22ValidationResult(state); } const UniValue& aClientRules = find_value(oparam, "rules"); if (aClientRules.isArray()) { for (unsigned int i = 0; i < aClientRules.size(); ++i) { const UniValue& v = aClientRules[i]; setClientRules.insert(v.get_str()); } } else { // NOTE: It is important that this NOT be read if versionbits is supported const UniValue& uvMaxVersion = find_value(oparam, "maxversion"); if (uvMaxVersion.isNum()) { nMaxVersionPreVB = uvMaxVersion.get_int64(); } } } if (strMode != "template") throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); if (Params().MiningRequiresPeers()) { if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Dash Core is not connected!"); if (IsInitialBlockDownload()) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Dash Core is downloading blocks..."); } // when enforcement is on we need information about a masternode payee or otherwise our block is going to be orphaned by the network std::vector<CTxOut> voutMasternodePayments; if (sporkManager.IsSporkActive(SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT) && !masternodeSync.IsWinnersListSynced() && !mnpayments.GetBlockTxOuts(chainActive.Height() + 1, 0, voutMasternodePayments)) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Dash Core is downloading masternode winners..."); // next bock is a superblock and we need governance info to correctly construct it if (sporkManager.IsSporkActive(SPORK_9_SUPERBLOCKS_ENABLED) && !masternodeSync.IsSynced() && CSuperblock::IsValidBlockHeight(chainActive.Height() + 1)) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Dash Core is syncing with network..."); static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has passed and there are more transactions uint256 hashWatchedChain; boost::system_time checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: <hashBestChain><nTransactionsUpdatedLast> std::string lpstr = lpval.get_str(); hashWatchedChain.SetHex(lpstr.substr(0, 64)); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = boost::get_system_time() + boost::posix_time::minutes(1); boost::unique_lock<boost::mutex> lock(csBestBlock); while (chainActive.Tip()->GetBlockHash() == hashWatchedChain && IsRPCRunning()) { if (!cvBlockChange.timed_wait(lock, checktxtime)) { // Timeout: Check transactions for update if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) break; checktxtime += boost::posix_time::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); // TODO: Maybe recheck connections/IBD and (if something wrong) send an expires-immediately template to stop miners? } // Update block static CBlockIndex* pindexPrev; static int64_t nStart; static std::unique_ptr<CBlockTemplate> pblocktemplate; if (pindexPrev != chainActive.Tip() || (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5)) { // Clear pindexPrev so future calls make a new block, despite any failures from here on pindexPrev = nullptr; // Store the chainActive.Tip() used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = mempool.GetTransactionsUpdated(); CBlockIndex* pindexPrevNew = chainActive.Tip(); nStart = GetTime(); // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(Params()).CreateNewBlock(scriptDummy); if (!pblocktemplate) throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } CBlock* pblock = &pblocktemplate->block; // pointer for convenience const Consensus::Params& consensusParams = Params().GetConsensus(); // Update nTime UpdateTime(pblock, consensusParams, pindexPrev); pblock->nNonce = 0; UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); std::map<uint256, int64_t> setTxIndex; int i = 0; for (const auto& it : pblock->vtx) { const CTransaction& tx = *it; uint256 txHash = tx.GetHash(); setTxIndex[txHash] = i++; if (tx.IsCoinBase()) continue; UniValue entry(UniValue::VOBJ); entry.push_back(Pair("data", EncodeHexTx(tx))); entry.push_back(Pair("hash", txHash.GetHex())); UniValue deps(UniValue::VARR); BOOST_FOREACH (const CTxIn &in, tx.vin) { if (setTxIndex.count(in.prevout.hash)) deps.push_back(setTxIndex[in.prevout.hash]); } entry.push_back(Pair("depends", deps)); int index_in_template = i - 1; entry.push_back(Pair("fee", pblocktemplate->vTxFees[index_in_template])); entry.push_back(Pair("sigops", pblocktemplate->vTxSigOps[index_in_template])); transactions.push_back(entry); } UniValue aux(UniValue::VOBJ); aux.push_back(Pair("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end()))); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.push_back(Pair("capabilities", aCaps)); UniValue aRules(UniValue::VARR); UniValue vbavailable(UniValue::VOBJ); for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); switch (state) { case THRESHOLD_DEFINED: case THRESHOLD_FAILED: // Not exposed to GBT at all break; case THRESHOLD_LOCKED_IN: // Ensure bit is set in block version pblock->nVersion |= VersionBitsMask(consensusParams, pos); // FALL THROUGH to get vbavailable set... case THRESHOLD_STARTED: { const struct BIP9DeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.push_back(Pair(gbt_vb_name(pos), consensusParams.vDeployments[pos].bit)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { if (!vbinfo.gbt_force) { // If the client doesn't support this, don't indicate it in the [default] version pblock->nVersion &= ~VersionBitsMask(consensusParams, pos); } } break; } case THRESHOLD_ACTIVE: { // Add to rules only const struct BIP9DeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; aRules.push_back(gbt_vb_name(pos)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { // Not supported by the client; make sure it's safe to proceed if (!vbinfo.gbt_force) { // If we do anything other than throw an exception here, be sure version/force isn't sent to old clients throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Support for '%s' rule requires explicit client support", vbinfo.name)); } } break; } } } result.push_back(Pair("version", pblock->nVersion)); result.push_back(Pair("rules", aRules)); result.push_back(Pair("vbavailable", vbavailable)); result.push_back(Pair("vbrequired", int(0))); if (nMaxVersionPreVB >= 2) { // If VB is supported by the client, nMaxVersionPreVB is -1, so we won't get here // Because BIP 34 changed how the generation transaction is serialized, we can only use version/force back to v2 blocks // This is safe to do [otherwise-]unconditionally only because we are throwing an exception above if a non-force deployment gets activated // Note that this can probably also be removed entirely after the first BIP9 non-force deployment (ie, probably segwit) gets activated aMutable.push_back("version/force"); } result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); result.push_back(Pair("transactions", transactions)); result.push_back(Pair("coinbaseaux", aux)); result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0]->GetValueOut())); result.push_back(Pair("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast))); result.push_back(Pair("target", hashTarget.GetHex())); result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); result.push_back(Pair("sigoplimit", (int64_t)MaxBlockSigOps(fDIP0001ActiveAtTip))); result.push_back(Pair("sizelimit", (int64_t)MaxBlockSize(fDIP0001ActiveAtTip))); result.push_back(Pair("curtime", pblock->GetBlockTime())); result.push_back(Pair("bits", strprintf("%08x", pblock->nBits))); result.push_back(Pair("previousbits", strprintf("%08x", pblocktemplate->nPrevBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1))); UniValue masternodeObj(UniValue::VARR); for (const auto& txout : pblocktemplate->voutMasternodePayments) { CTxDestination address1; ExtractDestination(txout.scriptPubKey, address1); CBitcoinAddress address2(address1); UniValue obj(UniValue::VOBJ); obj.push_back(Pair("payee", address2.ToString().c_str())); obj.push_back(Pair("script", HexStr(txout.scriptPubKey))); obj.push_back(Pair("amount", txout.nValue)); masternodeObj.push_back(obj); } result.push_back(Pair("masternode", masternodeObj)); result.push_back(Pair("masternode_payments_started", pindexPrev->nHeight + 1 > consensusParams.nMasternodePaymentsStartBlock)); result.push_back(Pair("masternode_payments_enforced", deterministicMNManager->IsDeterministicMNsSporkActive() || sporkManager.IsSporkActive(SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT))); UniValue superblockObjArray(UniValue::VARR); if(pblocktemplate->voutSuperblockPayments.size()) { for (const auto& txout : pblocktemplate->voutSuperblockPayments) { UniValue entry(UniValue::VOBJ); CTxDestination address1; ExtractDestination(txout.scriptPubKey, address1); CBitcoinAddress address2(address1); entry.push_back(Pair("payee", address2.ToString().c_str())); entry.push_back(Pair("script", HexStr(txout.scriptPubKey))); entry.push_back(Pair("amount", txout.nValue)); superblockObjArray.push_back(entry); } } result.push_back(Pair("superblock", superblockObjArray)); result.push_back(Pair("superblocks_started", pindexPrev->nHeight + 1 > consensusParams.nSuperblockStartBlock)); result.push_back(Pair("superblocks_enabled", sporkManager.IsSporkActive(SPORK_9_SUPERBLOCKS_ENABLED))); result.push_back(Pair("coinbase_payload", HexStr(pblock->vtx[0]->vExtraPayload))); return result; }
boost::optional<BSONObj> DocumentSourceGroup::findRelevantInputSort() const { if (true) { // Until streaming $group correctly handles nullish values, the streaming behavior is // disabled. See SERVER-23318. return boost::none; } if (!pSource) { // Sometimes when performing an explain, or using $group as the merge point, 'pSource' will // not be set. return boost::none; } BSONObjSet sorts = pSource->getOutputSorts(); // 'sorts' is a BSONObjSet. We need to check if our group pattern is compatible with one of the // input sort patterns. // We will only attempt to take advantage of a sorted input stream if the _id given to the // $group contained only FieldPaths or constants. Determine if this is the case, and extract // those FieldPaths if it is. DepsTracker deps(DepsTracker::MetadataAvailable::kNoMetadata); // We don't support streaming // based off a text score. for (auto&& exp : _idExpressions) { if (dynamic_cast<ExpressionConstant*>(exp.get())) { continue; } ExpressionObject* obj; if ((obj = dynamic_cast<ExpressionObject*>(exp.get()))) { // We can only perform an optimization if there are no operators in the _id expression. if (!containsOnlyFieldPathsAndConstants(obj)) { return boost::none; } } else if (!dynamic_cast<ExpressionFieldPath*>(exp.get())) { return boost::none; } exp->addDependencies(&deps); } if (deps.needWholeDocument) { // We don't swap to streaming if we need the entire document, which is likely because of // $$ROOT. return boost::none; } if (deps.fields.empty()) { // Our _id field is constant, so we should stream, but the input sort we choose is // irrelevant since we will output only one document. return BSONObj(); } for (auto&& obj : sorts) { // Note that a sort order of, e.g., {a: 1, b: 1, c: 1} allows us to do a non-blocking group // for every permutation of group by (a, b, c), since we are guaranteed that documents with // the same value of (a, b, c) will be consecutive in the input stream, no matter what our // _id is. std::set<std::string> fieldNames; obj.getFieldNames(fieldNames); if (fieldNames == deps.fields) { return obj; } } return boost::none; }
//======================>>> makefileMaker::findRule <<<============================ void makefileMaker::findRule(char* name) { //finds the rules to build object file pointed by name vOS vos; // To execute g++ char command[500]; // command line char hvbuff[256]; cmdw = ((videApp*)theApp)->GetMsgWindow(); strcpy(command,cc); for (int ix = 0 ; incDirs.list[ix] != 0 ; ++ix) { char *incdir = incDirs.list[ix]; char *hvp = strstr(incdir,"$(HOMEV)/"); if (hvp != 0) { // Have $(HOMEV) in the path, so we must replace the // $(HOMEV) with the real homeV path. strcpy(hvbuff,homeV); strcat(hvbuff, hvp+strlen("$(HOMEV)") ); incdir = hvbuff; } strcat(command," -I"); strcat(command,incdir); if (strlen(command) > 480) break; } strcat(command," -MM "); if (strcmp(SrcDir,".") != 0) { strcat(command, SrcDir); strcat(command, "/"); } strcat(command, name); static char makedep[] = "makedep.vtm"; static char makeerr[] = "makeerr.vtm"; char currlin[256]; vos.vDeleteFile(makedep); // be sure these are gone... vos.vDeleteFile(makeerr); if (vos.vRunProcess(command, makedep, makeerr, 1, 1) != 0) { CANTMAKE: cmdw->AddLine(command); vos.vDeleteFile(makedep); ifstream em(makeerr); if (em) { while(em.getline(currlin,256,'\n')) cmdw->AddLine(currlin); cmdw->AddLine("Build of Makefile failed - usually in wrong directory or missing files..."); cmdw->AddLine(" Open a file in the source directory, then try this again."); cmdw->AddLine("--------------"); em.close(); } vos.vDeleteFile(makeerr); return; } ifstream deps(makedep); if (!deps) goto CANTMAKE; while ( deps.getline(currlin, 256, '\n')) { rules.insert(-1,currlin); } deps.close(); vos.vDeleteFile(makedep); vos.vDeleteFile(makeerr); }
bool ElasticCable::evalInt (LocalIntegral& elmInt, const FiniteElement& fe, const Vec3& X) const { size_t a, aa, b, bb; unsigned char i, j, k, l, o; const size_t nen = fe.N.size(); // Set up reference configuration Vec3 dX(fe.G.getColumn(1)); Vec3 ddX(fe.G.getColumn(2)); #if INT_DEBUG > 1 std::cout <<"ElasticCable: X = "<< X <<" dX = "<< dX <<" ddX = "<< ddX <<"\n"; #endif // Compute current configuration ElmMats& elMat = static_cast<ElmMats&>(elmInt); const Vector& eV = elMat.vec.front(); // Current displacement vector Vec3 x(X); Vec3 dx(dX); Vec3 ddx(ddX); for (i = 0; i < 3; i++) { x[i] += eV.dot(fe.N,i,3); dx[i] += eV.dot(fe.dNdX,i,3); ddx[i] += eV.dot(fe.d2NdX2,i,3); } #if INT_DEBUG > 1 std::cout <<"ElasticCable: x = "<< x <<" dx = "<< dx <<" ddx = "<< ddx <<"\n"; #endif // Compute local coordinate systems of the reference and current configuration Vec3 B_unit, N_unit; double B_len, N_len; if (!evalLocalAxes(dX,ddX,B_unit,N_unit,B_len,N_len)) return false; #if INT_DEBUG > 1 std::cout <<"ElasticCable: B_unit = "<< B_unit <<" N_unit = "<< N_unit <<"\n"; #endif Vec3 b_unit, n_unit; double b_len, n_len; if (!evalLocalAxes(dx,ddx,b_unit,n_unit,b_len,n_len)) return false; Vec3 bin = b_unit * b_len; double b_len2 = b_len * b_len; Vec3 n = n_unit * n_len; double n_len2 = n_len * n_len; #if INT_DEBUG > 1 std::cout <<"ElasticCable: b = "<< bin <<" b_unit = "<< b_unit <<"\n n = "<< n <<" n_unit = "<< n_unit << std::endl; #endif // Calculate derivative of b_unit std::vector<Matrix> db(nen,Matrix(3,3)), db_unit(nen,Matrix(3,3)); std::vector<Vec3> db_normal(nen); for (i = 1; i <= 3; i++) for (k = 1; k <= 3; k++) for (l = 1; l <= 3; l++) { double eps_kli = 0.5*(k-l)*(l-i)*(i-k); double eps_kil = 0.5*(k-i)*(i-l)*(l-k); for (a = 1; a <= nen; a++) db[a-1](k,i) += (eps_kil*fe.dNdX(a,1)*ddx[l-1] + eps_kli*dx[l-1]*fe.d2NdX2(a,1,1)); } for (i = 1; i <= 3; i++) for (a = 0; a < nen; a++) for (k = 1; k <= 3; k++) db_normal[a][i-1] += b_unit[k-1]*db[a](k,i); for (i = 1; i <= 3; i++) for (k = 1; k <= 3; k++) for (a = 0; a < nen; a++) db_unit[a](k,i) += (db[a](k,i) - b_unit[k-1]*db_normal[a][i-1])/b_len; #if INT_DEBUG > 2 std::cout <<"ElasticCable: db_unit:\n"; for (a = 0; a < nen; a++) std::cout <<"node "<< a+1 << db_unit[a]; #endif // Calculate second derivative of b_unit std::vector< std::vector<Matrix3D> > ddb(nen), ddb_unit(nen); std::vector< std::vector<Matrix> > ddb_normal(nen); for (a = 0; a < nen; a++) { ddb[a].resize(nen,Matrix3D(3,3,3)); ddb_unit[a].resize(nen,Matrix3D(3,3,3)); ddb_normal[a].resize(nen,Matrix(3,3)); } for (i = 1; i <= 3; i++) for (j = 1; j <= 3; j++) for (k = 1; k <= 3; k++) { double eps_kij = 0.5*(k-i)*(i-j)*(j-k); double eps_kji = 0.5*(k-j)*(j-i)*(i-k); for (a = 1; a <= nen; a++) for (b = 1; b <= nen; b++) ddb[a-1][b-1](k,i,j) = (eps_kji*fe.d2NdX2(a,1,1)*fe.dNdX(b,1) + eps_kij*fe.d2NdX2(b,1,1)*fe.dNdX(a,1)); } #if INT_DEBUG > 3 std::cout <<"ElasticCable: ddb:\n"; for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) std::cout <<"nodes "<< a+1 <<","<< b+1 << ddb[a][b]; #endif for (i = 1; i <= 3; i++) for (j = 1; j <= 3; j++) for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) for (k = 1; k <= 3; k++) ddb_normal[a][b](i,j) += (ddb[a][b](k,i,j)*bin[k-1] + db[a](k,i)*db[b](k,j) - bin[k-1]*db[a](k,i)*bin[k-1]*db[b](k,j) / b_len2) / b_len; #if INT_DEBUG > 3 std::cout <<"ElasticCable: ddb_normal:\n"; for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) std::cout <<"nodes "<< a+1 <<","<< b+1 << ddb_normal[a][b]; #endif for (i = 1; i <= 3; i++) for (j = 1; j <= 3; j++) for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) for (k = 1; k <= 3; k++) ddb_unit[a][b](k,i,j) = (ddb[a][b](k,i,j)/b_len - db[a](k,i)*db_normal[b][j-1]/b_len2 - db[b](k,j)*db_normal[a][i-1]/b_len2 - bin[k-1]*(ddb_normal[a][b](i,j) - db_normal[a][i-1]* db_normal[b][j-1]*2.0 / b_len) / b_len2); #if INT_DEBUG > 2 std::cout <<"ElasticCable: ddb_unit:\n"; for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) std::cout <<"nodes "<< a+1 <<","<< b+1 << ddb_unit[a][b]; #endif // Calculate derivative of n_unit std::vector<Matrix> dn(nen,Matrix(3,3)), dn_unit(nen,Matrix(3,3)); std::vector<Vec3> dn_normal(nen); for (i = 1; i <= 3; i++) for (k = 1; k <= 3; k++) for (l = 1; l <= 3; l++) { double eps_kli = 0.5*(k-l)*(l-i)*(i-k); for (a = 0; a < nen; a++) { dn[a](k,i) += eps_kli*b_unit[l-1]*fe.dNdX(1+a,1); for (o = 1; o <= 3; o++) { double eps_kol = 0.5*(k-o)*(o-l)*(l-k); dn[a](k,i) += eps_kol*db_unit[a](o,i)*dx[l-1]; } } } for (i = 1; i <= 3; i++) for (a = 0; a < nen; a++) for (k = 1; k <= 3; k++) dn_normal[a][i-1] += n_unit[k-1]*dn[a](k,i); for (i = 1; i <= 3; i++) for (k = 1; k <= 3; k++) for (a = 0; a < nen; a++) dn_unit[a](k,i) += (dn[a](k,i) - n_unit[k-1]*dn_normal[a][i-1])/n_len; #if INT_DEBUG > 2 std::cout <<"\nElasticCable: dn_unit:\n"; for (a = 0; a < nen; a++) std::cout <<"node "<< a+1 << dn_unit[a]; #endif // Calculate second derivative of n_unit std::vector< std::vector<Matrix3D> > ddn(nen), ddn_unit(nen); std::vector< std::vector<Matrix> > ddn_normal(nen); for (a = 0; a < nen; a++) { ddn[a].resize(nen,Matrix3D(3,3,3)); ddn_unit[a].resize(nen,Matrix3D(3,3,3)); ddn_normal[a].resize(nen,Matrix(3,3)); } for (i = 1; i <= 3; i++) for (j = 1; j <= 3; j++) for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) for (k = 1; k <= 3; k++) for (o = 1; o <= 3; o++) { double eps_koj = 0.5*(k-o)*(o-j)*(j-k); double eps_koi = 0.5*(k-o)*(o-i)*(i-k); ddn[a][b](k,i,j) += (eps_koj*db_unit[a](o,i)*fe.dNdX(1+b,1) + eps_koi*db_unit[b](o,j)*fe.dNdX(1+a,1)); for (l = 1; l <= 3; l++) { double eps_kol = 0.5*(k-o)*(o-l)*(l-k); ddn[a][b](k,i,j) += eps_kol*ddb_unit[a][b](o,i,j)*dx[l-1]; } } for (i = 1; i <= 3; i++) for (j = 1; j <= 3; j++) for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) for (k = 1; k <= 3; k++) ddn_normal[a][b](i,j) += (ddn[a][b](k,i,j)*n[k-1] + dn[a](k,i)*dn[b](k,j) - n[k-1]*dn[a](k,i)* n[k-1]*dn[b](k,j)/n_len2) / n_len; for (i = 1; i <= 3; i++) for (j = 1; j <= 3; j++) for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) for (k = 1; k <= 3; k++) ddn_unit[a][b](k,i,j) = (ddn[a][b](k,i,j)/n_len - dn[a](k,i)*dn_normal[b][j-1]/n_len2 - dn[b](k,j)*dn_normal[a][i-1]/n_len2 - n[k-1]*(ddn_normal[a][b](i,j) - dn_normal[a][i-1]* dn_normal[b][j-1]*2.0 / n_len) / n_len2); #if INT_DEBUG > 2 std::cout <<"ElasticCable: ddn_unit:\n"; for (a = 0; a < nen; a++) for (b = 0; b < nen; b++) std::cout <<"nodes "<< a+1 <<","<< b+1 << ddn_unit[a][b]; #endif // Axial strain double eps = 0.5*(dx*dx - dX*dX); // Derivative of the axial strain Vector deps(3*nen); for (a = aa = 1; a <= nen; a++) for (i = 1; i <= 3; i++, aa++) deps(aa) = fe.dNdX(a,1)*dx[i-1]; // Second derivative of the axial strain Matrix ddeps(3*nen,3*nen); for (a = 1; a <= nen; a++) for (b = 1; b <= nen; b++) for (i = 1; i <= 3; i++) ddeps(3*(a-1)+i,3*(b-1)+i) = fe.dNdX(a,1)*fe.dNdX(b,1); // Curvature double kappa = (ddx*n_unit - ddX*N_unit); // Derivative of the curvature Vector dkappa(3*nen); for (a = aa = 1; a <= nen; a++) for (i = 1; i <= 3; i++, aa++) { dkappa(aa) = fe.d2NdX2(a,1,1)*n_unit[i-1]; for (k = 1; k <= 3; k++) dkappa(aa) += ddx[k-1]*dn_unit[a-1](k,i); } // Second derivative of the curvature Matrix ddkappa(3*nen,3*nen); for (a = 0, aa = 1; a < nen; a++) for (i = 1; i <= 3; i++, aa++) for (b = 0, bb = 1; b < nen; b++) for (j = 1; j <= 3; j++, bb++) { ddkappa(aa,bb) = (fe.d2NdX2(1+a,1,1)*dn_unit[b](i,j) + fe.d2NdX2(1+b,1,1)*dn_unit[a](j,i)); for (k = 1; k <= 3; k++) ddkappa(aa,bb) += ddx[k-1]*ddn_unit[a][b](k,i,j); } #if INT_DEBUG > 1 std::cout <<"ElasticCable: eps = "<< eps <<" kappa = "<< kappa <<"\ndeps:"<< deps <<"dkappa:"<< dkappa <<"ddeps:"<< ddeps <<"ddkappa:"<< ddkappa; #endif // Norm of initial contravariant basis (G^1) double normG1contr2 = 1.0 / (dX.x*dX.x + dX.y*dX.y + dX.z*dX.z); double normG1contr4JW = normG1contr2 * normG1contr2 * fe.detJxW; double EAxJW = EA * normG1contr4JW; // volume-weighted axial stiffness double EIxJW = EI * normG1contr4JW; // volume-weighted bending stiffness if (iS) { // Integrate the internal forces (note the negative sign here) elMat.b[iS-1].add(deps,-eps*EAxJW); elMat.b[iS-1].add(dkappa,-kappa*EIxJW); } if (eKm) { // Integrate the material stiffness matrix elMat.A[eKm-1].outer_product(deps,deps*EAxJW,true); elMat.A[eKm-1].outer_product(dkappa,dkappa*EIxJW,true); } if (eKg) { // Integrate the geometric stiffness matrix elMat.A[eKg-1].add(ddeps,eps*EAxJW); elMat.A[eKg-1].add(ddkappa,kappa*EIxJW); } if (lineMass > 0.0) { double dMass = lineMass*fe.detJxW; if (eM) { // Integrate the mass matrix Matrix& M = elMat.A[eM-1]; for (a = 1; a <= nen; a++) for (b = 1; b <= nen; b++) for (i = 1; i <= 3; i++) M(3*(a-1)+i,3*(b-1)+i) += fe.N(a)*fe.N(b)*dMass; } if (eS && !gravity.isZero()) { // Integrate the external (gravitation) forces Vector& S = elMat.b[eS-1]; for (a = 1; a <= nen; a++) for (i = 1; i <= 3; i++) S(3*(a-1)+i) += fe.N(a)*gravity[i-1]*dMass; } } return true; }
void CCodeGenerator::operator()(File* file) { // Outputs code for the given file in a single C translation unit. with no // external dependencies. if (env_->errors()) { return; } file_ = file; out_ << "#include <Object.h>\n"; out_ << "#include <Primitives.h>\n"; out_ << "#include <String.h>\n\n"; out_ << "#define nil 0\n\n"; out_ << "VoidPtr Boot_calloc(Int size);\n"; out_ << "void Boot_free(VoidPtr memory);\n"; /* for (Constant::Itr cons = env_->constants(); cons; ++cons) { if (cons->type()->is_value() && !cons->type()->is_primitive()) { assert(!"Not supported"); } operator()(cons->type()); out_ << " " << cons->label() << " = 0;\n"; } */ // Output a typedef for the pointer to an object, if the type is a // reference type. This needs to come first, because reference types // can refer to eachother circularly. /* for (int i = 0; i < file->dependencies(); i++) { TreeNode* dep = file->dependency(i); if (Class* cls = dynamic_cast<Class*>(dep)) { class_decl(cls); } } */ // Now output the data layout of the class as a C type definition.. // for (int i = 0; i < file->dependencies(); i++) { // Feature* feat = file->dependency(i); // if (Class* cls = dynamic_cast<Class*>(feat)) { // //class_def(cls); // } // } // Output declarations for any functions used by this file. DepScanner::Ptr deps(new DepScanner(env_)); deps->operator()(file); for (TreeNode::Itr i = deps->dependencies(); i; ++i) { if (i->location().file == file) { } else if (Function* func = dynamic_cast<Function*>(i.pointer())) { func_sig(func); out_ << ";\n"; } else if (Constant* con = dynamic_cast<Constant*>(i.pointer())) { out_ << "extern "; operator()(con->type()); // Constant type out_ << " " << con->label() << ";\n"; } } for (String::Itr i = deps->strings(); i; ++i) { out_ << "static struct String lit" << (void*)i.pointer() << " = { "; out_ << "String__vtable, "; // Vtable out_ << "1, "; // Refcount out_ << (int)i->string().length() << ", "; out_ << "\"" << i->string() << "\" };\n"; } out_ << "\n"; for (Feature::Itr f = file->features(); f; ++f) { f(this); } file_ = 0; out_->flush(); }
Pipeline::SourceContainer::iterator DocumentSourceSequentialDocumentCache::doOptimizeAt( Pipeline::SourceContainer::iterator itr, Pipeline::SourceContainer* container) { // The DocumentSourceSequentialDocumentCache should always be the last stage in the pipeline // pre-optimization. By the time optimization reaches this point, all preceding stages are in // the final positions which they would have occupied if no cache stage was present. invariant(_hasOptimizedPos || std::next(itr) == container->end()); invariant((*itr).get() == this); // If we have already optimized our position, stay where we are. if (_hasOptimizedPos) { return std::next(itr); } // Mark this stage as having optimized itself. _hasOptimizedPos = true; // If the cache is the only stage in the pipeline, return immediately. if (itr == container->begin()) { return container->end(); } // Pop the cache stage off the back of the pipeline. auto cacheStage = std::move(*itr); container->erase(itr); // Get all variable IDs defined in this scope. auto varIDs = pExpCtx->variablesParseState.getDefinedVariableIDs(); auto prefixSplit = container->begin(); // In the context of this optimization, we are only interested in figuring out // which external variables are referenced in the pipeline. We are not attempting // to enforce that any referenced metadata are in fact available, this is done // elsewhere. So without knowledge of what metadata is in fact available, here // we "lie" and say that all metadata is available to avoid tripping any // assertions. DepsTracker deps(DepsTracker::kAllMetadataAvailable); // Iterate through the pipeline stages until we find one which references an external variable. for (; prefixSplit != container->end(); ++prefixSplit) { (*prefixSplit)->getDependencies(&deps); if (deps.hasVariableReferenceTo(varIDs)) { break; } } // The 'prefixSplit' iterator is now pointing to the first stage of the correlated suffix. If // the split point is the first stage, then the entire pipeline is correlated and we should not // attempt to perform any caching. Abandon the cache and return. if (prefixSplit == container->begin()) { _cache->abandon(); return container->end(); } // If the cache has been populated and is serving results, remove the non-correlated prefix. if (_cache->isServing()) { container->erase(container->begin(), prefixSplit); } container->insert(prefixSplit, std::move(cacheStage)); return container->end(); }