int main() { const float PIF = 3.141592653589793238; for (float x=-2; x<2.2; x+=0.5) for (float y=-2.6; y<2.6; y+=0.5) { auto in = octantIndex(x,y); float oo = 4.f*std::atan2(y,x)/PIF; if (oo<0) oo = 8+oo; printf("%f %f %f %d %d %d\n",x,y, std::atan2(y,x), int(oo), in,octant(in)); } std::mt19937 eng; std::uniform_real_distribution<float> rgen(-5.,5.); //std::cout << rgen(eng) << std::endl; for (int i=0; i!=100000; ++i) { float x1=rgen(eng); float y1=rgen(eng); float x2=rgen(eng); float y2=rgen(eng); float p1 = std::atan2(y1,x1); float p2 = std::atan2(y2,x2); float dp = std::abs(p2-p1); if (dp>PIF) dp = (2.f*PIF)-dp; if (dp<(PIF/4.f) && !sameQuadrant( x1,y1, x2,y2) ) printf("%f %f %f\n", p1*180./PIF, p2*180./PIF, dp*180./PIF); auto o1 = octant(octantIndex(x1,y1)); auto o2 = octant(octantIndex(x2,y2)); if (dp<(PIF/4.f) && !sameQuadrant(o1,o2) ) printf("%f %f %f\n", p1*180./PIF, p2*180./PIF, dp*180./PIF); } return 0; }
void run_app() { std::mt19937 rng(time(NULL)); std::uniform_int_distribution<int> rgen(100, 400); sf::RenderWindow window(sf::VideoMode(WINDOW_X, WINDOW_Y), "Classic Games"); float step_size = 0.005f; double frame_time = 0.0, elapsed_time = 0.0, delta_time = 0.0, accumulator_time = 0.0, current_time = 0.0; while (window.isOpen()) { EventDispatcher::getInstance()->stackFrameEvents(&window); EventDispatcher::getInstance()->dispatchEvents(); elapsed_time = elap_time(); delta_time = elapsed_time - current_time; current_time = elapsed_time; if (delta_time > 0.02f) delta_time = 0.02f; accumulator_time += delta_time; while ((accumulator_time - step_size) >= step_size) { accumulator_time -= step_size; // Update(step_size); } window.clear(sf::Color::Black); PanelContainer::getInstance()->renderPanels(&window); window.display(); } }
void set_lambdas(Graph &g, int min, int max, T &gen) { boost::uniform_int<> range(min, max); boost::variate_generator<T &, boost::uniform_int<> > rgen(gen, range); boost::randomize_property<edge_weight2_t>(g, rgen); }
NonlinearRBFFactorType::NonlinearRBFFactorType(const std::string& name, const std::vector<unsigned int>& card, unsigned int data_size, unsigned int rbf_basis_count, double log_beta) : FactorType(name, card, data_size), rbfnet(rbf_basis_count, data_size), rbf_basis_count(rbf_basis_count) { InitializeProdCard(); assert(rbf_basis_count > 0); assert((boost::math::isnan)(log_beta) == false); rbfnet.FixBeta(log_beta); size_t wdim = prod_card * rbfnet.ParameterDimension(); // Initialize weight vector randomly boost::mt19937 rgen(static_cast<const boost::uint32_t>(std::time(0))+1); boost::uniform_real<double> rdestu; // range [0,1] boost::variate_generator<boost::mt19937, boost::uniform_real<double> > randu(rgen, rdestu); // FIXME: better initialization concepts w.resize(wdim); std::fill(w.begin(), w.end(), 0.0); size_t wbase = 0; for (unsigned int ri = 0; ri < prod_card; ++ri) { // Initialize alpha_n for (unsigned int wi = 0; wi < rbf_basis_count; ++wi) w[wbase + wi] = randu() - 0.5; // Initialize c_n for (unsigned int wi = 0; wi < (data_size*rbf_basis_count); ++wi) w[wbase + rbf_basis_count + wi] = randu() - 0.5; wbase += rbfnet.ParameterDimension(); } }
void NonlinearRBFFactorType::InitializeUsingTrainingData(const std::vector< ParameterEstimationMethod::labeled_instance_type>& training_data) { boost::mt19937 rgen(static_cast<const boost::uint32_t>(std::time(0))+1); boost::uniform_real<double> rdestu; // range [0,1] boost::variate_generator<boost::mt19937, boost::uniform_real<double> > randu(rgen, rdestu); size_t wbase = 0; for (unsigned int ri = 0; ri < prod_card; ++ri) { // 1. Collect all factors that are labeled with the corresponding // ground truth label std::vector<Factor*> m_factors; for (unsigned int n = 0; n < training_data.size(); ++n) { const FactorGraph* fg = training_data[n].first; const FactorGraphObservation* obs = training_data[n].second; assert(obs->Type() == FactorGraphObservation::DiscreteLabelingType); const std::vector<Factor*>& factors = fg->Factors(); for (unsigned int fi = 0; fi < factors.size(); ++fi) { if (factors[fi]->Type()->Name() != Name()) continue; unsigned int ei_obs = factors[fi]->ComputeAbsoluteIndex(obs->State()); if (ei_obs != ri) continue; m_factors.push_back(factors[fi]); } } // Need to be sure there is at least one observation assert(m_factors.size() >= 1); std::cout << m_factors.size() << " samples for statepair " << ri << std::endl; // Initialize alpha_n for (unsigned int wi = 0; wi < rbf_basis_count; ++wi) w[wbase + wi] = 1.0; // Initialize c_n as sample from the training set size_t wbi_base = 0; for (unsigned int bi = 0; bi < rbf_basis_count; ++bi) { unsigned int mi = static_cast<unsigned int>( randu() * static_cast<double>(m_factors.size())); assert(mi < m_factors.size()); const std::vector<double>& H = m_factors[mi]->Data(); // Copy selected training instance, perturbed assert(H.size() == data_size); for (unsigned int wi = 0; wi < data_size; ++wi) { w[wbase + rbf_basis_count + wbi_base + wi] = H[wi] + randu()*1.0e-8; } wbi_base += data_size; } wbase += rbfnet.ParameterDimension(); } assert(wbase == w.size()); }
std::string MyUtil::makeUniqueID() { boost::uuids::random_generator rgen; boost::uuids::uuid u = rgen(); std::stringstream ss; ss << u; std::string id = ss.str(); id.erase(8, 1); id.erase(12, 1); id.erase(16, 1); id.erase(20, 1); return id; }
std::map<int,size_t> getMap(int seed, std::map<int,size_t>& store) { BENCH(benchMap); enum {NumElem = 1023}; std::mt19937 rgen(seed); std::mt19937 gen(NumElem); std::uniform_int_distribution<> dis(0, NumElem); for(size_t i = 0; i < NumElem; ++i) { int v = dis(gen); auto it(store.insert(std::make_pair(v,i))); } return store; }
void CPPMSimulationDataGenerator::Pulse() { std::transform(pulses.begin(), pulses.end(), pulses.begin(), [&](double x) { double by = 10*rdist(rgen)*(rgen()&1?1:-1); return (x+by > 2000 || x+by < 1000) ? x-by : x+by; }); mCPPMSimulationData.Advance(mClockGenerator.AdvanceByTimeS(.005)); mCPPMSimulationData.Transition(); std::for_each(pulses.begin(), pulses.end(), [&](double pulseLen) { mCPPMSimulationData.Advance(mClockGenerator.AdvanceByTimeS(.0003)); mCPPMSimulationData.Transition(); mCPPMSimulationData.Advance(mClockGenerator.AdvanceByTimeS((pulseLen * 1E-6) - 0.0003)); mCPPMSimulationData.Transition(); }); mCPPMSimulationData.Advance(mClockGenerator.AdvanceByTimeS(.0003)); mCPPMSimulationData.Transition(); }
U32 PWMSimulationDataGenerator::GenerateSimulationData(U64 largest_sample_requested, U32 sample_rate, SimulationChannelDescriptor **simulation_channel) { U64 adjusted_largest_sample_requested = AnalyzerHelpers::AdjustSimulationTargetSample(largest_sample_requested, sample_rate, mSimulationSampleRateHz); while (mPWMSimulationData.GetCurrentSampleNumber() < adjusted_largest_sample_requested) { double by = 10*rdist(rgen)*(rgen()&1?1:-1); pulseLen += (pulseLen+by > 2000 || pulseLen+by < 1000) ? -by : by; mPWMSimulationData.Advance(mSimulationSampleRateHz / 50); mPWMSimulationData.TransitionIfNeeded(BIT_HIGH); mPWMSimulationData.Advance(mClockGenerator.AdvanceByTimeS(pulseLen * 1E-6)); mPWMSimulationData.TransitionIfNeeded(BIT_LOW); } *simulation_channel = &mPWMSimulationData; return 1; }
void SorterVector(){ std::vector<int> Vector; int i = 0; //srand(time(NULL)); std::random_device rseed; std::mt19937 rgen(rseed()); // mersenne_twister std::uniform_int_distribution<int>dist(0,100); //std::mt19937 mt(1729); //std::uniform_int_distribution<int> dist(0,99); while(i < 10){ Vector.push_back(dist(rgen)); i++; } std::cout<< "The random numbers are: " << std::endl; std::copy(Vector.begin(), Vector.end(), std::ostream_iterator<int>(std::cout, "\n")); std::sort(Vector.begin(), Vector.end()); std:: cout << "Vector sorted is: " <<std::endl; std::copy(Vector.begin(), Vector.end(), std::ostream_iterator<int>(std::cout, "\n")); std::cout << "The maximum value is " << *std::max_element(Vector.begin(),Vector.end()) << std::endl; std::cout << "The minimum value is " << *std::min_element(Vector.begin(),Vector.end()) << std::endl; return; }
quest13::Print(TList* plist) { int i, j, k; int a[3], b[3], c[3]; int plane[4], vert[4][10], vert_t[4][10]; char* buf = new char[256]; char* buf1 = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } srand( keygen ); for( i = 0; i < 3; i ++ ) { a[i] = rgen( keygen, 1, amin, amax ); b[i] = rgen( keygen, 1, amin, amax ); while ( b[i] == a[i] ) b[i] = rgen( keygen, 1, amin, amax ); c[i] = rgen( keygen, 1, amin, amax ); while ( c[i] == b[i] || c[i] == a[i] ) c[i] = rgen( keygen, 1, amin, amax ); } if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); } else { sprintf( buf, "String(#)" ); plist->Add( strdup(buf) ); } sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(Найти общее уравнение плоскости зная 3 точки.)" ); plist->Add( strdup(buf) ); } sprintf( buf, "A!(%d", a[0] ); for ( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", a[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); sprintf( buf, "B!(%d", b[0] ); for ( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", b[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); sprintf( buf, "C!(%d", c[0] ); for ( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", c[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); sprintf( buf, "String(@Часть преподавателя )" ); plist->Add( strdup(buf) ); sprintf( buf, "String(\"Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(ВАРИАНТ %i, решение задачи %i, ключ %i)", nvar, nzad, keygen ); plist->Add( strdup(buf) ); for ( i = 0; i < 3; i ++ ) { vert[0][i] = b[i]; vert[1][i] = a[i] - b[i]; vert[2][i] = c[i] - b[i]; } for ( i = 0; i < 3; i ++ ) { k = 0; for ( j = 0; j < 3; j ++ ) { if ( j != i ) { vert_t[0][k] = vert[1][j]; vert_t[1][k] = vert[2][j]; k ++; } } plane[i] = pow(-1,i) * determ( 2, vert_t ); } plane[3] = - determ ( 3, vert ); sprintf( buf, "String(Искомая плоскость: )" ); plist->Add( strdup(buf) ); sprintf( buf, "|Matrix(3,3" ); for ( i = 0; i < 3; i ++ ) for ( j = 0; j < 3; j ++ ) { switch ( i ) { case 0: sprintf( buf1, ",%d", a[j] - b[j] ); break; case 1: sprintf( buf1, ",%d", c[j] - b[j] ); break; case 2: if ( j == 0 ) if ( b[j] ) sprintf( buf1, ",x%+d", - b[j] ); else sprintf( buf1, ",x" ); else if ( j == 1 ) if ( b[j] ) sprintf( buf1, ",y%+d", - b[j] ); else sprintf( buf1, ",y" ); else if ( b[j] ) sprintf( buf1, ",z%+d", - b[j] ); else sprintf( buf1, ",z" ); } strcat( buf, buf1 ); } strcat( buf, ")|=0" ); plist->Add( strdup(buf) ); strcpy ( buf, "" ); if ( plane[0] ) { if ( plane[0] == 1 ) sprintf( buf1, "x" ); else if ( plane[0] == -1 ) sprintf( buf1, "-x" ); else sprintf( buf1, "%d*x", plane[0] ); strcat ( buf, buf1 ); } if ( plane[1] ) { if ( plane[1] == 1 ) sprintf( buf1, "+y" ); else if ( plane[1] == -1 ) sprintf( buf1, "-y" ); else sprintf( buf1, "%+d*y", plane[1] ); strcat ( buf, buf1 ); } if ( plane[2] ) { if ( plane[2] == 1 ) sprintf( buf1, "+z" ); else if ( plane[2] == -1 ) sprintf( buf1, "-z" ); else sprintf( buf1, "%+d*z", plane[2] ); strcat ( buf, buf1 ); } if ( plane[3] ) { sprintf( buf1, "%+d", plane[3] ); strcat( buf, buf1 ); } if ( plane [0] || plane[1] || plane[2] || plane[3] ) { strcat( buf, "=0" ); plist->Add( strdup(buf) ); } keygen = 0; delete buf; delete buf1; return 0; }
PClassifier TTreeSplitConstructor_ExhaustiveBinary::operator()( PStringList &descriptions, PDiscDistribution &subsetSizes, float &quality, int &spentAttribute, PExampleGenerator gen, const int &weightID , PDomainContingency dcont, PDistribution apriorClass, const vector<bool> &candidates, PClassifier ) { checkProperty(measure); measure->checkClassTypeExc(gen->domain->classVar->varType); PIntList bestMapping; int wins, bestAttr; PVariable bvar; if (measure->needs==TMeasureAttribute::Generator) { bool cse = candidates.size()==0; bool haveCandidates = false; vector<bool> myCandidates; myCandidates.reserve(gen->domain->attributes->size()); vector<bool>::const_iterator ci(candidates.begin()), ce(candidates.end()); TVarList::const_iterator vi, ve(gen->domain->attributes->end()); for(vi = gen->domain->attributes->begin(); vi != ve; vi++) { bool co = (*vi)->varType == TValue::INTVAR && (!cse || (ci!=ce) && *ci); myCandidates.push_back(co); haveCandidates = haveCandidates || co; } if (!haveCandidates) return returnNothing(descriptions, subsetSizes, quality, spentAttribute); PDistribution thisSubsets; float thisQuality; wins = 0; int thisAttr = 0; int N = gen->numberOfExamples(); TSimpleRandomGenerator rgen(N); ci = myCandidates.begin(); for(vi = gen->domain->attributes->begin(); vi != ve; ci++, vi++, thisAttr++) { if (*ci) { thisSubsets = NULL; PIntList thisMapping = /*throughCont ? measure->bestBinarization(thisSubsets, thisQuality, *dci, dcont->classes, apriorClass, minSubset) : */measure->bestBinarization(thisSubsets, thisQuality, *vi, gen, apriorClass, weightID, minSubset); if (thisMapping && ( (!wins || (thisQuality>quality)) && ((wins=1)==1) || (thisQuality==quality) && rgen.randbool(++wins))) { bestAttr = thisAttr; quality = thisQuality; subsetSizes = thisSubsets; bestMapping = thisMapping; } } /*if (thoughCont) dci++; */ } if (!wins) return returnNothing(descriptions, subsetSizes, quality, spentAttribute); if (quality<worstAcceptable) return returnNothing(descriptions, subsetSizes, spentAttribute); if (subsetSizes && subsetSizes->variable) bvar = subsetSizes->variable; else { TEnumVariable *evar = mlnew TEnumVariable(""); evar->addValue("0"); evar->addValue("1"); bvar = evar; } } else { bool cse = candidates.size()==0; if (!cse && noCandidates(candidates)) return returnNothing(descriptions, subsetSizes, quality, spentAttribute); if (!dcont || dcont->classIsOuter) { dcont = PDomainContingency(mlnew TDomainContingency(gen, weightID)); // raiseWarningWho("TreeSplitConstructor_ExhaustiveBinary", "this class is not optimized for 'candidates' list and can be very slow"); } int N = gen ? gen->numberOfExamples() : -1; if (N<0) N = dcont->classes->cases; TSimpleRandomGenerator rgen(N); PDistribution classDistribution = dcont->classes; vector<bool>::const_iterator ci(candidates.begin()), ce(candidates.end()); TDiscDistribution *dis0, *dis1; TContDistribution *con0, *con1; int thisAttr = 0; bestAttr = -1; wins = 0; quality = 0.0; float leftExamples, rightExamples; TDomainContingency::iterator dci(dcont->begin()), dce(dcont->end()); for(; (cse || (ci!=ce)) && (dci!=dce); dci++, thisAttr++) { // We consider the attribute only if it is a candidate, discrete and has at least two values if ((cse || *(ci++)) && ((*dci)->outerVariable->varType==TValue::INTVAR) && ((*dci)->discrete->size()>=2)) { const TDistributionVector &distr = *(*dci)->discrete; if (distr.size()>16) raiseError("'%s' has more than 16 values, cannot exhaustively binarize", gen->domain->attributes->at(thisAttr)->get_name().c_str()); // If the attribute is binary, we check subsetSizes and assess the quality if they are OK if (distr.size()==2) { if ((distr.front()->abs<minSubset) || (distr.back()->abs<minSubset)) continue; // next attribute else { float thisMeas = measure->call(thisAttr, dcont, apriorClass); if ( ((!wins || (thisMeas>quality)) && ((wins=1)==1)) || ((thisMeas==quality) && rgen.randbool(++wins))) { bestAttr = thisAttr; quality = thisMeas; leftExamples = distr.front()->abs; rightExamples = distr.back()->abs; bestMapping = mlnew TIntList(2, 0); bestMapping->at(1) = 1; } continue; } } vector<int> valueIndices; int ind = 0; for(TDistributionVector::const_iterator dvi(distr.begin()), dve(distr.end()); (dvi!=dve); dvi++, ind++) if ((*dvi)->abs>0) valueIndices.push_back(ind); if (valueIndices.size()<2) continue; PContingency cont = prepareBinaryCheat(classDistribution, *dci, bvar, dis0, dis1, con0, con1); // A real job: go through all splits int binWins = 0; float binQuality = -1.0; float binLeftExamples = -1.0, binRightExamples = -1.0; // Selection: each element correspons to a value of the original attribute and is 1, if the value goes right // The first value always goes left (and has no corresponding bit in selection. TBoolCount selection(valueIndices.size()-1), bestSelection(0); // First for discrete classes if (dis0) { do { *dis0 = CAST_TO_DISCDISTRIBUTION(distr[valueIndices[0]]); *dis1 *= 0; vector<int>::const_iterator ii(valueIndices.begin()); ii++; for(TBoolCount::const_iterator bi(selection.begin()), be(selection.end()); bi!=be; bi++, ii++) *(*bi ? dis1 : dis0) += distr[*ii]; cont->outerDistribution->setint(0, dis0->abs); cont->outerDistribution->setint(1, dis1->abs); if ((dis0->abs < minSubset) || (dis1->abs < minSubset)) continue; // cannot split like that, to few examples in one of the branches float thisMeas = measure->operator()(cont, classDistribution, apriorClass); if ( ((!binWins) || (thisMeas>binQuality)) && ((binWins=1) ==1) || (thisMeas==binQuality) && rgen.randbool(++binWins)) { bestSelection = selection; binQuality = thisMeas; binLeftExamples = dis0->abs; binRightExamples = dis1->abs; } } while (selection.next()); } // And then exactly the same for continuous classes else { do { *con0 = CAST_TO_CONTDISTRIBUTION(distr[0]); *con1 = TContDistribution(); vector<int>::const_iterator ii(valueIndices.begin()); for(TBoolCount::const_iterator bi(selection.begin()), be(selection.end()); bi!=be; bi++, ii++) *(*bi ? con1 : con0) += distr[*ii]; if ((con0->abs<minSubset) || (con1->abs<minSubset)) continue; // cannot split like that, to few examples in one of the branches float thisMeas = measure->operator()(cont, classDistribution, apriorClass); if ( ((!binWins) || (thisMeas>binQuality)) && ((binWins=1) ==1) || (thisMeas==binQuality) && rgen.randbool(++binWins)) { bestSelection = selection; binQuality = thisMeas; binLeftExamples = con0->abs; binRightExamples = con1->abs; } } while (selection.next()); } if ( binWins && ( (!wins || (binQuality>quality)) && ((wins=1)==1) || (binQuality==quality) && rgen.randbool(++wins))) { bestAttr = thisAttr; quality = binQuality; leftExamples = binLeftExamples; rightExamples = binRightExamples; bestMapping = mlnew TIntList(distr.size(), -1); vector<int>::const_iterator ii = valueIndices.begin(); bestMapping->at(*(ii++)) = 0; ITERATE(TBoolCount, bi, bestSelection) bestMapping->at(*(ii++)) = *bi ? 1 : 0; } } } if (!wins) return returnNothing(descriptions, subsetSizes, quality, spentAttribute); subsetSizes = mlnew TDiscDistribution(); subsetSizes->addint(0, leftExamples); subsetSizes->addint(1, rightExamples); } PVariable attribute = gen->domain->attributes->at(bestAttr); if (attribute->noOfValues() == 2) { spentAttribute = bestAttr; descriptions = mlnew TStringList(attribute.AS(TEnumVariable)->values.getReference()); TClassifierFromVarFD *cfv = mlnew TClassifierFromVarFD(attribute, gen->domain, bestAttr, subsetSizes); cfv->transformUnknowns = false; return cfv; } string s0, s1; int ns0 = 0, ns1 = 0; TValue ev; attribute->firstValue(ev); PITERATE(TIntList, mi, bestMapping) { string str; attribute->val2str(ev, str); if (*mi==1) { s1 += string(ns1 ? ", " : "") + str; ns1++; } else if (*mi==0) { s0 += string(ns0 ? ", " : "") + str; ns0++; } attribute->nextValue(ev); }
quest13::Print(TList* plist, class test &t) { int i, j, k; int a[3], b[3], c[3]; int plane[4], vert[4][10], vert_t[4][10]; int n, Right_Numb; char* buf = new char[256]; char* buf1 = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } Right_Numb = random( 5 ) + 1; srand( keygen ); for( i = 0; i < 3; i ++ ) { a[i] = rgen( keygen, 1, amin, amax ); b[i] = rgen( keygen, 1, amin, amax ); while ( b[i] == a[i] ) b[i] = rgen( keygen, 1, amin, amax ); c[i] = rgen( keygen, 1, amin, amax ); while ( c[i] == b[i] || c[i] == a[i] ) c[i] = rgen( keygen, 1, amin, amax ); } sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); sprintf( buf, "String(Найти общее уравнение плоскости зная 3 точки.)" ); plist->Add( strdup(buf) ); sprintf( buf, "A!(%d", a[0] ); for ( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", a[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); sprintf( buf, "B!(%d", b[0] ); for ( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", b[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); sprintf( buf, "C!(%d", c[0] ); for ( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", c[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); for ( i = 0; i < 3; i ++ ) { vert[0][i] = b[i]; vert[1][i] = a[i] - b[i]; vert[2][i] = c[i] - b[i]; } for ( i = 0; i < 3; i ++ ) { k = 0; for ( j = 0; j < 3; j ++ ) { if ( j != i ) { vert_t[0][k] = vert[1][j]; vert_t[1][k] = vert[2][j]; k ++; } } plane[i] = pow(-1,i) * determ( 2, vert_t ); } plane[3] = - determ ( 3, vert ); for( n = 0; n < 5; n ++ ) { sprintf( buf, "String(\"Вариант %c):\")", 'a' + n ); plist->Add( strdup(buf) ); strcpy ( buf, "" ); if ( plane[0] - ( Right_Numb - 1 ) + n ) { if ( plane[0] - ( Right_Numb - 1 ) + n == 1 ) sprintf( buf1, "x" ); else if ( plane[0] - ( Right_Numb - 1 ) + n == -1 ) sprintf( buf1, "-x" ); else sprintf( buf1, "%d*x", plane[0] - ( Right_Numb - 1 ) + n ); strcat ( buf, buf1 ); } if ( plane[1] - ( Right_Numb - 1 ) + n ) { if ( plane[1] - ( Right_Numb - 1 ) + n == 1 ) sprintf( buf1, "+y" ); else if ( plane[1] - ( Right_Numb - 1 ) + n == -1 ) sprintf( buf1, "-y" ); else sprintf( buf1, "%+d*y", plane[1] - ( Right_Numb - 1 ) + n ); strcat ( buf, buf1 ); } if ( plane[2] - ( Right_Numb - 1 ) + n ) { if ( plane[2] - ( Right_Numb - 1 ) + n == 1 ) sprintf( buf1, "+z" ); else if ( plane[2] - ( Right_Numb - 1 ) + n == -1 ) sprintf( buf1, "-z" ); else sprintf( buf1, "%+d*z", plane[2] - ( Right_Numb - 1 ) + n ); strcat ( buf, buf1 ); } if ( plane[3] - ( Right_Numb - 1 ) + n ) { sprintf( buf1, "%+d", plane[3] - ( Right_Numb - 1 ) + n ); strcat( buf, buf1 ); } if ( plane[0] || plane[1] || plane[2] || plane[3] ) { strcat( buf, "=0" ); plist->Add( strdup(buf) ); } } /*sprintf(buf,"String(@Часть преподавателя )"); plist->Add(strdup(buf)); sprintf(buf,"String(\"Тема - %s \")",selecttask->name); plist->Add(strdup(buf)); sprintf(buf,"String(ВАРИАНТ %i, решение задачи %i, ключ %i)", nvar, nzad, keygen ); plist->Add(strdup(buf)); sprintf(buf,"String( Правильный ответ - %c)", 'a' + Right_Numb - 1 ); plist->Add(strdup(buf));*/ t.pr_tst = 1; t.ch_ask = 5; t.right_ask = Right_Numb; t.msg = "Тест успешно сгенерирован."; keygen = 0; delete buf; delete buf1; return 0; }
quest24::Print(TList* plist) { drobi mtr[10][10]; int i, j, k, l, n; int ma[10][10], A[10][10]; int det_A, det_B; char* buf = new char[256]; char* buf1 = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } srand( keygen ); for( i = 0; i < dim; i++ ) { for( j = 0; j < dim; j++ ) { ma[i][j] = 0; } } while( !determ( dim, ma ) ) for( i = 0; i < dim; i++ ) { for( j = 0; j < dim; j++ ) { ma[i][j] = rgen( keygen, 1, amin, amax ); } } if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); } else { sprintf( buf, "String(#)" ); plist->Add( strdup(buf) ); } sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(Найти обратную матрицу к матрице:)" ); plist->Add( strdup(buf) ); } sprintf( buf, "A=!(Matrix(%d,%d", dim, dim ); for ( i = 0; i < dim; i ++ ) for ( j = 0; j < dim; j ++ ) { sprintf( buf1, ",%d", ma[i][j] ); strcat( buf, buf1 ); } strcat( buf, "))" ); plist->Add( strdup(buf) ); sprintf( buf, "pow(A,-1)=..." ); plist->Add( strdup(buf) ); sprintf( buf, "String(@Часть преподавателя )" ); plist->Add( strdup(buf) ); sprintf( buf, "String(\"Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(ВАРИАНТ %i, решение задачи %i, ключ %i)", nvar, nzad, keygen ); plist->Add( strdup(buf) ); det_A = determ ( dim, ma ); for ( i = 0; i < dim; i ++ ) for ( j = 0; j < dim; j ++ ) { int ai, aj; ai = 0; aj = 0; for ( k = 0; k < dim; k ++ ) { if ( k == j ) continue; else { for ( l = 0; l < dim; l ++ ) { if ( l != i ) { A[ai][aj] = ma[k][l]; aj ++; } else continue; } ai ++; aj = 0; } } mtr[i][j] = drobi ( pow( -1, i + j ) * determ( dim - 1, A ), det_A ); mtr[i][j].sokrat(); } sprintf( buf, "pow(A,-1)=!(Matrix(%d,%d", dim, dim ); for ( i = 0; i < dim; i ++ ) for ( j = 0; j < dim; j ++ ) { sprintf( buf1, ",%s", DrobiToStr( mtr[i][j] ) ); strcat( buf, buf1 ); } strcat( buf, "))" ); plist->Add( strdup(buf) ); keygen = 0; delete buf; delete buf1; return 0; }
PClassifier TTreeSplitConstructor_Attribute::operator()( PStringList &descriptions, PDiscDistribution &subsetSizes, float &quality, int &spentAttribute, PExampleGenerator gen, const int &weightID, PDomainContingency dcont, PDistribution apriorClass, const vector<bool> &candidates, PClassifier nodeClassifier ) { checkProperty(measure); measure->checkClassTypeExc(gen->domain->classVar->varType); bool cse = candidates.size()==0; vector<bool>::const_iterator ci(candidates.begin()), ce(candidates.end()); if (!cse) { if (noCandidates(candidates)) return returnNothing(descriptions, subsetSizes, quality, spentAttribute); ci = candidates.begin(); } int N = gen ? gen->numberOfExamples() : -1; if (N<0) N = dcont->classes->cases; TSimpleRandomGenerator rgen(N); int thisAttr = 0, bestAttr = -1, wins = 0; quality = 0.0; if (measure->needs == TMeasureAttribute::Contingency_Class) { vector<bool> myCandidates; if (cse) { myCandidates.reserve(gen->domain->attributes->size()); PITERATE(TVarList, vi, gen->domain->attributes) myCandidates.push_back((*vi)->varType == TValue::INTVAR); } else { myCandidates.reserve(candidates.size()); TVarList::const_iterator vi(gen->domain->attributes->begin()); for(; ci != ce; ci++, vi++) myCandidates.push_back(*ci && ((*vi)->varType == TValue::INTVAR)); } if (!dcont || dcont->classIsOuter) dcont = PDomainContingency(mlnew TDomainContingency(gen, weightID, myCandidates)); ci = myCandidates.begin(); ce = myCandidates.end(); TDomainContingency::iterator dci(dcont->begin()), dce(dcont->end()); for(; (ci != ce) && (dci!=dce); dci++, ci++, thisAttr++) if (*ci && checkDistribution((const TDiscDistribution &)((*dci)->outerDistribution.getReference()), minSubset)) { float thisMeas = measure->call(thisAttr, dcont, apriorClass); if ( ((!wins || (thisMeas>quality)) && ((wins=1)==1)) || ((thisMeas==quality) && rgen.randbool(++wins))) { quality = thisMeas; subsetSizes = (*dci)->outerDistribution; bestAttr = thisAttr; } } } else if (measure->needs == TMeasureAttribute::DomainContingency) { if (!dcont || dcont->classIsOuter) dcont = PDomainContingency(mlnew TDomainContingency(gen, weightID)); TDomainContingency::iterator dci(dcont->begin()), dce(dcont->end()); for(; (cse || (ci!=ce)) && (dci!=dce); dci++, thisAttr++) if ( (cse || *(ci++)) && ((*dci)->outerVariable->varType==TValue::INTVAR) && checkDistribution((const TDiscDistribution &)((*dci)->outerDistribution.getReference()), minSubset)) { float thisMeas = measure->call(thisAttr, dcont, apriorClass); if ( ((!wins || (thisMeas>quality)) && ((wins=1)==1)) || ((thisMeas==quality) && rgen.randbool(++wins))) { quality = thisMeas; subsetSizes = (*dci)->outerDistribution; bestAttr = thisAttr; } } } else { TDomainDistributions ddist(gen, weightID); TDomainDistributions::iterator ddi(ddist.begin()), dde(ddist.end()-1); for(; (cse || (ci!=ce)) && (ddi!=dde); ddi++, thisAttr++) if (cse || *(ci++)) { TDiscDistribution *discdist = (*ddi).AS(TDiscDistribution); if (discdist && checkDistribution(*discdist, minSubset)) { float thisMeas = measure->call(thisAttr, gen, apriorClass, weightID); if ( ((!wins || (thisMeas>quality)) && ((wins=1)==1)) || ((thisMeas==quality) && rgen.randbool(++wins))) { quality = thisMeas; subsetSizes = PDiscDistribution(*ddi); // not discdist - this would be double wrapping! bestAttr = thisAttr; } } } } if (!wins) return returnNothing(descriptions, subsetSizes, quality, spentAttribute); if (quality<worstAcceptable) return returnNothing(descriptions, subsetSizes, spentAttribute); PVariable attribute = gen->domain->attributes->at(bestAttr); TEnumVariable *evar = attribute.AS(TEnumVariable); if (evar) descriptions = mlnew TStringList(evar->values.getReference()); else descriptions = mlnew TStringList(subsetSizes->size(), ""); spentAttribute = bestAttr; TClassifierFromVarFD *cfv = mlnew TClassifierFromVarFD(attribute, gen->domain, bestAttr, subsetSizes); cfv->transformUnknowns = false; return cfv; }
int main() { // constexpr float PIO2 = 1.5707963267948966192; //constexpr float PIO4 = 0.7853981633974483096; std::mt19937 eng; std::uniform_real_distribution<float> rgen(-1.,1.); constexpr int NN=1000000; alignas(32) std::array<float, NN> x; alignas(32) std::array<float, NN> y; int ok=1; bool pr=true; // long long tl=0, ts0=0, ts1=0, tsc=0; for (int i=0;i!=NN;++i) { x[i]=rgen(eng); y[i]=rgen(eng); } { // reference loop long long tl = -refClock(); float sq=0.; for (int i=0;i!=NN;++i) sq+= x[i]+y[i]; tl += refClock(); if(pr) printf("sum %f : %f\n",double(tl)/double(NN*ok),sq); } { // pt cut long long tl = -refClock(); constexpr float ptcut = 0.5f; float sq=0.; // #pragma omp simd reduction(+: sq) for (int i=0; i<NN;++i) { // sq+= (pt(x[i],y[i])>ptcut) ? x[i]+y[i] : 0.f; if (pt(x[i],y[i])>ptcut) sq+= x[i]+y[i]; } tl += refClock(); if(pr) printf("pt %f : %f\n",double(tl)/double(NN*ok),sq); } auto dphi = [](float p1,float p2) { auto dp=std::abs(p1-p2); if (dp>float(M_PI)) dp-=float(2*M_PI); return std::abs(dp); }; { // phi cut long long tl = -refClock(); constexpr float phicut = 0.125f; float sq=0.; int tot =0; for (int i=0;i!=NN-1;++i) { for (int j=i+1;j<std::min(i+16,NN);++j) { if (dphi(phi(x[i],y[i]),phi(x[j],y[j]))<phicut) { sq+= x[j]+y[j]; ++tot;} } } tl += refClock(); if(pr) printf("phicut %f : %f %d\n",double(tl)/double(NN*ok),sq,tot); } return 0; }
int main(int argc, char**) { std::mt19937 eng; std::uniform_real_distribution<float> rgen(0.,1.); constexpr int NN = 1024*1024; // alignas(128) float r[NN]; float * r = (float*)__builtin_assume_aligned(::memalign(32,NN*sizeof(float)),32); std::cout << sizeof(r) << " " << alignof(r) << std::endl; PerfStat c12, c2, c11, c22; c12.header(std::cout,true); std::cout << std::endl; c11.startAll(); for (int i=0;i!=NN;++i) r[i]=rgen(eng); c11.stopAll(); std::cout << "|rgen " << std::endl; c11.print(std::cout); c12.startAll(); for (int i=0;i!=NN;++i) r[i]=rgen(eng); c12.stopAll(); std::cout << "|rgen " << std::endl; c12.print(std::cout); std::cout << std::endl; std::cout << std::endl; constexpr int KK=10000; bool err=false; float s[KK+3]; for (int ok=0; ok!=KK+3; ++ok) { s[ok]=0; c2.start(); for (int i=0;i!=NN;++i) s[ok]+=r[i]; c2.stop(); if (ok>0 && s[ok] != s[ok-1]) err=true; if ( (ok%1000)==2) { std::cout << "|sum " << ok << " "; c2.print(std::cout); } } if (err) std::cout << "a mess " << std::endl; std::cout << "end \n" << std::endl; c2.print(std::cout); ::free(r); return 0; }
std::pair<int, int> simulacion(int bloques, int bloque_size, int vias, int accesos, int pagina_size) { int paginas_disco, paginas_mem, fallos_pagina, fallos_cache, bits_offset, div_virt, div_fisica; std::cout << "Numero de bloques: " << bloques << std::endl; std::cout << "Tamano de bloque: " << bloque_size << std::endl; std::cout << "Numero de vias: " << vias << std::endl; std::cout << "Numero de accesos: " << accesos << std::endl; std::cout << "Tamanio de pagina: " << pagina_size << std::endl; std::cout << "Inicializando..."; std::random_device rseed; // Para numeros aleatorios std::mt19937 rgen(rseed()); // mersenne_twister std::uniform_int_distribution<int> idist(0, DIR_VIRUTALES - 1); // [0,4095] std::uniform_int_distribution<int> odist(0, 1); // [0,1] std::uniform_int_distribution<int> ddist(0, 255); // [0,255] std::uniform_int_distribution<int> nueva_dist(256, 511); // [0,255] /* ins_virtuales[*][x], x: 0 - direccion, 1 - lectura/escritura, 2 - dato */ std::vector<std::vector<int> > ins_virtuales (accesos, std::vector<int> (3,0)); std::vector<int> memoria (POS_MEMORIA); std::vector<int> disco (POS_DISCO); t_tabla tabla; /* Creamos la cache */ Cache mem_cache (vias, bloques, bloque_size); /* Inicializacion */ paginas_disco = POS_DISCO / pagina_size; paginas_mem = POS_MEMORIA / pagina_size; std::uniform_int_distribution<int> mdist(0, paginas_mem-1); // [0,paginas_memoria] fallos_pagina = 0; fallos_cache = 0; bits_offset = bits_para(pagina_size); div_virt = potencia(bits_offset);// para posterior division div_fisica = potencia(bits_para(bloque_size));// para posterior division std::cout << " Inicializacion terminada!" << std::endl; std::cout << "Paginas Memoria: " << paginas_mem << std::endl; std::cout << "Paginas Disco: " << paginas_disco << std::endl; std::cout << "Generando instrucciones..." << std::endl; /* Generar instrucciones virtuales */ for (int i = 0; i < accesos; ++i) { ins_virtuales[i][0] = idist(rgen); ins_virtuales[i][1] = odist(rgen); ins_virtuales[i][2] = nueva_dist(rgen); } std::cout << " Terminado!" << std::endl; std::cout << "Generando tabla de traduccion..." << std::endl; /* Generamos la tabla de traduccion */ int contador; for (contador = 0; contador < accesos; ++contador) { int tmp = ins_virtuales[contador][0]/div_virt; if(tabla.size() > paginas_mem) break; if(tabla.count(tmp) == 0) { tabla[tmp].push_back(odist(rgen)); /* 1 - memoria principal */ tabla[tmp].push_back(0); /* 1 - dato en disco mem llena */ tabla[tmp].push_back(contador); /* dir fisica */ } } for (; contador < accesos; ++contador) { int tmp = ins_virtuales[contador][0]/div_virt; if(tabla.size() >= (paginas_mem + paginas_disco)) break; if(tabla.count(tmp) == 0) { tabla[tmp].push_back(0); /* 1 - memoria principal */ tabla[tmp].push_back(1); /* 1 - dato en disco mem llena */ tabla[tmp].push_back(contador); /* dir disco */ } } std::cout << " Terminado!" << std::endl; std::cout << " Tamaño tabla: " << tabla.size() << std::endl; /* leemos la memoria y el disco */ std::ifstream inputmem; std::ifstream inputdisc; std::string outmem; std::string outdisc; int valor_io; int contador_io = 0; std::cout << "Leyendo memoria..." << std::endl; inputmem.open("memoria.txt", std::ifstream::in); while(inputmem >> valor_io) { memoria[contador_io] = valor_io; contador_io++; } inputmem.close(); std::cout << " Terminado!" << std::endl; if (contador_io == 0) { std::cout << "Memoria vacia, abortando!" << std::endl; return std::make_pair(0,0); } std::cout << "Leyendo disco..." << std::endl; inputdisc.open("disco.txt", std::ifstream::in); contador_io = 0; while(inputdisc >> valor_io) { disco[contador_io] = valor_io; contador_io++; } inputdisc.close(); std::cout << " Terminado!" << std::endl; if (contador_io == 0) { std::cout << "Disco vacio, abortando!" << std::endl; return std::make_pair(0,0); } std::cout << "Procesando instrucciones..." << std::endl; /* Iteramos en cada instruccion */ int dir_fisica, tmp, tmp2; std::vector<int> movimiento (bloque_size,0); std::vector<int> respuesta_cache; for (int i = 0; i < accesos; ++i) { /* Traducimos direccion virtual a fisica */ dir_fisica = ins_virtuales[i][0]/div_virt; /* No esta en memoria principal? */ if(tabla[dir_fisica][0] == 0) { //std::cout << "Fallo Pagina!" << std::endl; tabla[dir_fisica][0] = 1; fallos_pagina++; // nuevo fallo de pagina tmp2 = tabla[dir_fisica][2]; // direccion disco /* no esta asigana? */ if(tabla[dir_fisica][1] == 1) { tabla[dir_fisica][1] = 0; tmp = mdist(rgen); // nueva asignacion. tabla[dir_fisica][2] = tmp; /* Movemos de disco a memoria */ } else tmp = tmp2; // Si esta asignada disco - memoria concuerdan. tmp = tmp * div_virt; tmp2 = tmp2 * div_virt; for(int j = 0; j < pagina_size; ++j) { memoria[tmp + j] = disco[tmp2 + j]; } } /* El dato ya esta en memoria principal */ /* Extraemos direccion fisica */ dir_fisica = tabla[dir_fisica][2] * div_virt; /* Agregamos el offset */ dir_fisica = dir_fisica + (ins_virtuales[i][0] % div_virt); /* Cargamos los datos que hay en la memoria por si hay un miss en cache */ tmp = dir_fisica - (dir_fisica % div_fisica); // quitamos el offset de un bloque. for (int j = 0; j < bloque_size; ++j) { movimiento[j] = memoria[tmp + j]; } /* Lectura o escritura */ if (ins_virtuales[i][1] == 0) { //std::cout << "Read" << std::endl; respuesta_cache = mem_cache.read_cache(dir_fisica, movimiento); } else { //::cout << "Write" << std::endl; respuesta_cache = mem_cache.write_cache(dir_fisica, movimiento, ins_virtuales[i][2]); } /* Analimamos la respuesta de la cache */ /* no fue un hit? */ if(respuesta_cache[0] != 1) fallos_cache++; /* hay que escribir en memoria, por write-back? */ if (respuesta_cache[1] == 1) { //std::cout << "write-back" << std::endl; tmp = respuesta_cache[2]; // donde, escribir tmp = tmp - (tmp % div_fisica); // quitamos el offset del bloque. for (int j = 0; j < bloque_size; ++j) { memoria[tmp + j] = respuesta_cache[3+j]; } } } std::cout << " Terminado!" << std::endl; std::cout << "Reescribiendo memoria..." << std::endl; /* Excribimos en los archivos */ std::ofstream ofm ("memoria.txt", std::ofstream::out); for (int i = 0; i < POS_MEMORIA; ++i) { ofm << memoria[i] << "\n"; } ofm.close(); std::cout << "Terminado!" << std::endl; std::cout << "Reescribiendo disco..." << std::endl; std::ofstream ofd ("disco.txt", std::ofstream::out); for (int i = 0; i < POS_DISCO; ++i) { ofd << disco[i] << "\n"; } ofd.close(); std::cout << "Terminado!" << std::endl; std::cout << fallos_pagina << " " << fallos_cache << std::endl; return std::make_pair(fallos_pagina, fallos_cache); }
void MaximumCompositeLikelihood::SetupTrainingData( const std::vector<labeled_instance_type>& training_data, const std::vector<InferenceMethod*> inference_methods) { assert(comp_training_data.size() == 0); assert(comp_inference_methods.size() == 0); assert(inference_methods.size() == training_data.size()); // Number of times each component will be covered unsigned int cover_count = 1; assert(decomp >= -1); if (decomp == DecomposePseudolikelihood) { cover_count = 1; } else if (decomp > 0) { cover_count = decomp; } // Produce composite factor graphs boost::timer decomp_timer; int training_data_size = static_cast<int>(training_data.size()); fg_cc_var_label.resize(cover_count * training_data_size); fg_cc_count.resize(cover_count * training_data_size); fg_orig_index.resize(cover_count * training_data_size); std::fill(fg_cc_count.begin(), fg_cc_count.end(), 0); unsigned int cn = 0; for (int n = 0; n < training_data_size; ++n) { FactorGraph* fg = training_data[n].first; size_t var_count = fg->Cardinalities().size(); // Get observation const FactorGraphObservation* obs = training_data[n].second; // Obtain one or more decomposition(s) for (unsigned int cover_iter = 0; cover_iter < cover_count; ++cover_iter) { VAcyclicDecomposition vac(fg); std::vector<bool> factor_is_removed; if (decomp == DecomposePseudolikelihood) { factor_is_removed.resize(fg->Factors().size()); std::fill(factor_is_removed.begin(), factor_is_removed.end(), true); } else { std::vector<double> factor_weight(fg->Factors().size(), 0.0); if (decomp == DecomposeUniform) { // Use constant weights std::fill(factor_weight.begin(), factor_weight.end(), 1.0); } else { // Use uniform random weights boost::uniform_real<double> uniform_dist(0.0, 1.0); boost::variate_generator<boost::mt19937&, boost::uniform_real<double> > rgen(RandomSource::GlobalRandomSampler(), uniform_dist); for (unsigned int fi = 0; fi < factor_weight.size(); ++fi) factor_weight[fi] = rgen(); } vac.ComputeDecompositionSP(factor_weight, factor_is_removed); } // Shatter factor graph into trees fg_cc_count[cn] += FactorGraphStructurizer::ConnectedComponents( fg, factor_is_removed, fg_cc_var_label[cn]); #if 0 std::cout << "MCL, instance " << n << " decomposed into " << cc_count << " components" << std::endl; #endif // Add each component as separate factor graph for (unsigned int ci = 0; ci < fg_cc_count[cn]; ++ci) { std::vector<unsigned int> cond_var_set; cond_var_set.reserve(var_count); // Add all variables not in this component to the conditioning set for (size_t vi = 0; vi < var_count; ++vi) { if (fg_cc_var_label[cn][vi] != ci) cond_var_set.push_back(static_cast<unsigned int>(vi)); } AddTrainingComponentCond(fg, obs, inference_methods[n], cond_var_set); } fg_orig_index[cn] = n; cn += 1; } } std::cout << "MCL, decomposed " << training_data.size() << " instances " << "into " << comp_training_data.size() << " instances " << (decomp == DecomposeUniform ? "(uniform)" : "(randomized)") << " in " << decomp_timer.elapsed() << "s." << std::endl; // Initialize MLE training data from created components SetupMLETrainingData(); }
main() { printf("%d\n", tate[81]); obj list[tate[81]+1]; int i, k, modebb[tnum], modepl[tnum], modebbgen[tnum], modeplgen[tnum], internalcounter=0; //init list[0].time = 0; for (k=0;k<tnum;k++) { list[0].T[k] = tempgen(T_avg,5e6); //bb param printf("T[%d] = %e\n", k, list[0].T[k]); list[0].b[k] = plawexp(0,1); //oldval = 0, condition = 1 //pl param exp printf("b[%d] = %e\n", k, list[0].b[k]); double totflux_c = totflux(limlowf,limhighf,list[0].T[k]); //bb flux list[0].a[k] = totflux_c/powerlawint(limlowf,limhighf,list[0].b[k]); //pl param normalizer printf("a[%d] = %e\n", k, list[0].a[k]); list[0].r[k] = rgen(list[0].T[k],list[0].a[k],list[0].b[k]); list[0].j[k] = jgen(list[0].T[k],list[0].a[k],list[0].b[k]); if (list[0].r[k] < 0) {printf("negative here! %d\n", k);} printf("%e\n", list[0].r[k]); printf("%e\n", list[0].j[k]); printf("Generated for object %d.\n", k+1); } for (i=0;i<tnum;i++) //variations init { modebb[i] = 0; modepl[i] = 0; modebbgen[i] = bb_var_period(); modeplgen[i] = pl_var_period(); } for (i=1;i<=tate[81];i++) //evolution { list[i].time=list[0].time+i; printf("Working for day %d.\n", i+1); for (k=0;k<tnum;k++) { if (modebb[k]!=modebbgen[k]) //small variation of BB { //bb param small var list[i].T[k] = tempgen(list[i-1].T[k], T_var_small); modebb[k] += 1; } else //large variation of BB { //bb param large var list[i].T[k] = tempgen(list[i-1].T[k], T_var_large); modebb[k] = 0; modebbgen[k] = bb_var_period(); } if (modepl[k]!=modeplgen[k]) { //pl param small var list[i].b[k] = plawexp(list[i-1].b[k], 0); list[i].a[k] = aagen(list[i-1].a[k], 0); //t2 modepl[k] += 1; } else { list[i].b[k] = plawexp(0,1); list[i].a[k] = aagen(list[i-1].a[k], 1); //t2 modepl[k] = 0; modeplgen[k] = pl_var_period(); } //double totflux_c = totflux(limlowf,limhighf,list[i].T[k]); //bb flux //t1 //list[i].a[k] = totflux_c/powerlawint(limlowf,limhighf,list[i].b[k]); //pl param normalizer //t1 //list[i].a[k] = list[i-1].a[k] + ((list[i].a[k] - list[i-1].a[k])/4); //t1_new list[i].r[k] = rgen(list[i].T[k],list[i].a[k],list[i].b[k]); list[i].j[k] = jgen(list[i].T[k],list[i].a[k],list[i].b[k]); if (list[0].r[k] < 0) {printf("negative here! %d\n", k);} //printf("Working on object %d on day %d.\n", k+1, i+1); } } //file generation FILE *fp; fp = fopen("samp_hb_t2.dat", "w+"); fprintf(fp,"# date "); for (k=0;k<tnum;k++) { fprintf(fp,"r[%d] j[%d] ", k, k); printf("%d\n", k); } fprintf(fp,"\n"); for (i=0;i<=tate[81];i++) { if (i==tate[internalcounter]){ fprintf(fp,"%lf ", list[i].time); for (k=0;k<tnum;k++) { fprintf(fp,"%e %e ", list[i].r[k], list[i].j[k]); } fprintf(fp, "\n"); internalcounter++;} } fclose(fp); }
quest24::Print(TList* plist, class test &t) { drobi mtr[10][10]; int i, j, k, l, n; int ma[10][10], A[10][10]; int det_A, det_B; int Right_Numb; char* buf = new char[256]; char* buf1 = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } srand( keygen ); Right_Numb = random( 5 ) + 1; for( i = 0; i < dim; i++ ) { for( j = 0; j < dim; j++ ) { ma[i][j] = 0; } } while( !determ( dim, ma ) ) for( i = 0; i < dim; i++ ) { for( j = 0; j < dim; j++ ) { ma[i][j] = rgen( keygen, 1, amin, amax); } } sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); sprintf( buf, "String(Найти обратную матрцу к матрице:)" ); plist->Add( strdup(buf) ); sprintf( buf, "A=!(Matrix(%d,%d", dim, dim ); for ( i = 0; i < dim; i ++ ) for ( j = 0; j < dim; j ++ ) { sprintf( buf1, ",%d", ma[i][j] ); strcat( buf, buf1 ); } strcat( buf, "))" ); plist->Add( strdup(buf) ); sprintf( buf, "pow(A,-1)=..." ); plist->Add( strdup(buf) ); sprintf( buf, "String(Варианты ответов: )" ); plist->Add( strdup(buf) ); for ( n = 0; n < 5; n ++ ) { sprintf( buf, "String(\"Вариант %c):\")", 'a' + n ); plist->Add( strdup(buf) ); det_A = determ ( dim, ma ); if ( n != Right_Numb - 1 ) det_A = det_A + ( random ( 20 ) - 10 ); for ( i = 0; i < dim; i ++ ) for ( j = 0; j < dim; j ++ ) { int ai, aj; ai = 0; aj = 0; for ( k = 0; k < dim; k ++ ) { if ( k == j ) continue; else { for ( l = 0; l < dim; l ++ ) { if ( l != i ) { A[ai][aj] = ma[k][l]; aj ++; } else continue; } ai ++; aj = 0; } } det_B = determ( dim - 1, A ); if ( n != Right_Numb - 1 ) det_B = det_B + ( random ( 20 ) - 10 ); mtr[i][j] = drobi ( pow( -1, i + j ) * det_B, det_A ); mtr[i][j].sokrat(); } sprintf( buf, "pow(A,-1)=!(Matrix(%d,%d", dim, dim ); for ( i = 0; i < dim; i ++ ) for ( j = 0; j < dim; j ++ ) { sprintf( buf1, ",%s", DrobiToStr( mtr[i][j] ) ); strcat( buf, buf1 ); } strcat( buf, "))" ); plist->Add( strdup(buf) ); } /*sprintf(buf,"String(@Часть преподавателя )"); plist->Add(strdup(buf)); sprintf(buf,"String(\"Тема - %s \")",selecttask->name); plist->Add(strdup(buf)); sprintf(buf,"String(ВАРИАНТ %i, решение задачи %i, ключ %i)",nvar,nzad,keygen); plist->Add(strdup(buf)); sprintf(buf,"String( Правильный ответ - %c)", 'a' + Right_Numb - 1 ); plist->Add(strdup(buf)); GenHtml->Right_Number = Right_Numb;*/ t.pr_tst = 1; t.ch_ask = 5; t.right_ask = Right_Numb; t.msg = "Тест успешно сгенерирован."; keygen = 0; delete buf; delete buf1; return 0; }
quest15::Print(TList* plist) { int a, b, i; double c; drobi d; char * buf = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } srand( keygen ); a = rgen(keygen, 1, amin, amax ); b = rgen(keygen, 1, amin, amax ); if( !a ) a ++; if( !b ) b ++; a = abs (a); b = abs (b); if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); } else { sprintf( buf, "String(#)" ); plist->Add( strdup(buf) ); } sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(Выписать каноническое уравнение гиперболы зная её полуоси.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Найти расстояние между фокусами этой гиперболы.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Вычислить эксцентриситет этой гипербоы.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Найти acимптоты этой гиперболы.)" ); plist->Add( strdup(buf) ); } sprintf( buf, "a=%d", a ); plist->Add( strdup(buf) ); sprintf( buf, "b=%d", b ); plist->Add( strdup(buf) ); sprintf( buf, "String(@Часть преподавателя )" ); plist->Add( strdup(buf) ); sprintf( buf, "String(\"Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(ВАРИАНТ %i, решение задачи %i, ключ %i)", nvar, nzad, keygen ); plist->Add( strdup(buf) ); c = sqrt( a*a + b*b ); //e = c / a; sprintf( buf, "String(Искомое уравнение: )" ); plist->Add( strdup(buf) ); sprintf( buf, "(x^2)/%d-(y^2)/%d=1", a*a, b*b ); plist->Add( strdup(buf) ); sprintf( buf, "String(Расстояние между фокусами:)" ); plist->Add( strdup(buf) ); if ( (ceil(c)) == c ) sprintf( buf, "c=%f", c ); else sprintf( buf, "c=sqrt(%d)", a*a + b*b ); plist->Add( strdup(buf) ); sprintf( buf, "String(Эксцентриситет гиперболы:)" ); plist->Add( strdup(buf) ); if ( (ceil(c)) == c ) { d = drobi( (int)c, a ); sprintf( buf, "e=%s", DrobiToStr( d ) ); //sprintf( buf, "e=%f", e ); } else sprintf( buf, "e=sqrt(%d)/%d", a*a + b*b,a ); plist->Add( strdup(buf) ); sprintf( buf, "String(Асимптоты гиперболы:)" ); plist->Add( strdup(buf) ); d = drobi( b, a ); /*if ( d.b == 1 ) sprintf( buf, "y=+-%f*x", d.c ); else sprintf( buf, "y=+-(%d/%d)*x", b, a );*/ if( d.znak > 0 ) if( d.c != 1) sprintf( buf, "y=+-(%s)*x", DrobiToStr( d ) ); else sprintf( buf, "y=+-x" ); else sprintf( buf, "y=+%s*x", DrobiToStr( d ) ); plist->Add( strdup(buf) ); /* Graphics::TBitmap* bmv = new Graphics::TBitmap(); y0 = funk( -100, a, b ); bmv->Width = 300; bmv->Height = 2 * y0 + 20; y = funk( -100, a, b); bmv->Canvas->MoveTo(0, 100 - y); for( i = -100; i <= -a; i ++ ) { y = funk( i, a, b); draw(100,y0,i,y,bmv); } for( i = -a; i >= -100; i -- ) { y = -funk( i, a, b); draw(100,y0,i,y,bmv); } y = funk( 100, a, b); bmv->Canvas->MoveTo(200, 100 - y); for( i = 100; i >= a; i -- ) { y = funk( i, a, b); draw(100,y0,i,y,bmv); } for( i = a; i <= 100; i ++ ) { y = -funk( i, a, b); draw(100,y0,i,y,bmv); } y = gline( -100, a, b); bmv->Canvas->MoveTo(0, y0 - y); y = gline(100,a,b); draw(100,y0,100,y,bmv); y = -gline( -100, a, b); bmv->Canvas->MoveTo(0, y0 - y); y = -gline(100,a,b); draw(100,y0,100,y,bmv); bmv->Canvas->MoveTo(100,0); bmv->Canvas->LineTo(100,2 * y0); bmv->Canvas->MoveTo(0,y0); bmv->Canvas->LineTo(200,y0); sprintf(buf,"Гипербола ВАРИАНТА %i, решение задачи %i, ключ %i",nvar,nzad,keygen); bmv->Canvas->TextOutA(0, 2 * y0, buf); bmv->SaveToFile("plane.bmp"); delete bmv;*/ keygen = 0; delete buf; return 0; }
quest15::Print(TList* plist, class test &t) { int a, b, i, y; int n, Right_Numb; double c; drobi d; char * buf = new char[256]; char * buf1 = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } Right_Numb = random( 5 ) + 1; srand( keygen ); a = rgen( keygen, 1, amin, amax ); b = rgen( keygen, 1, amin, amax ); if( !a ) a ++; if( !b ) b ++; a = abs( a ); b = abs( b ); if( a <= b ) { if( a * a < 5 ) Right_Numb = a * a; } else { if( b * b < 5 ) Right_Numb = b * b; } sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); sprintf( buf, "String(Выписать каноническое уравнение гиперболы зная её полуоси.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Найти расстояние между фокусами этой гиперболы.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Вычислить эксцентриситет этой гипербоы.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Найти acимптоты этой гиперболы.)" ); plist->Add( strdup(buf) ); sprintf( buf, "a=%d", a ); plist->Add( strdup(buf) ); sprintf( buf, "b=%d", b ); plist->Add( strdup(buf) ); for( n = 0; n < 5; n ++ ) { c = sqrt( a * a + b * b - ( Right_Numb - 1 ) + n ); sprintf( buf, "String(\"Вариант %c):\")", 'a' + n ); plist->Add( strdup(buf) ); sprintf( buf, "String(Искомое уравнение: )" ); plist->Add( strdup(buf) ); /*sprintf( buf, "(x^2)/%d-(y^2)/%d=1", a * a - ( Right_Numb - 1 ) + n, b * b - ( Right_Numb - 1 ) + n ); plist->Add( strdup(buf) );*/ if( a * a - ( Right_Numb - 1 ) + n > 1 ) sprintf( buf, "(x^2)/%d", a * a - ( Right_Numb - 1 ) + n ); else sprintf( buf, "x^2" ); if( b * b - ( Right_Numb - 1 ) + n > 1 ) sprintf( buf1, "-(y^2)/%d=1", b * b - ( Right_Numb - 1 ) + n ); else sprintf( buf1, "-y^2=1" ); strcat( buf, buf1 ); plist->Add( strdup(buf) ); sprintf( buf, "String(Расстояние между фокусами:)" ); plist->Add( strdup(buf) ); if ( (ceil(c)) == c ) sprintf( buf, "c=%f", c ); else sprintf( buf, "c=sqrt(%d)", a * a + b * b - ( Right_Numb - 1 ) + n ); plist->Add( strdup(buf) ); sprintf( buf, "String(Эксцентриситет гиперболы:)" ); plist->Add( strdup(buf) ); if ( (ceil(c)) == c ) { d = drobi( (int)c, a ); sprintf( buf, "e=%s", DrobiToStr( d ) ); } else sprintf( buf, "e=sqrt(%d)/%d", a * a + b * b - ( Right_Numb - 1 ) + n, a ); plist->Add( strdup(buf) ); sprintf( buf, "String(Асимптоты гиперболы:)" ); plist->Add( strdup(buf) ); d = drobi( b - ( Right_Numb - 1 ) + n, a ); if( d.znak > 0 ) { if( !d.c ) sprintf( buf, "y=0" ); else if( d.c != 1) sprintf( buf, "y=+-(%s)*x", DrobiToStr( d ) ); else sprintf( buf, "y=+-x" ); } else sprintf( buf, "y=+%s*x", DrobiToStr( d ) ); plist->Add( strdup(buf) ); } /*sprintf(buf,"String(@Часть преподавателя )"); plist->Add(strdup(buf)); sprintf(buf,"String(\"Тема - %s \")",selecttask->name); plist->Add(strdup(buf)); sprintf(buf,"String(ВАРИАНТ %i, решение задачи %i, ключ %i)", nvar, nzad, keygen ); plist->Add(strdup(buf)); sprintf(buf,"String( Правильный ответ - %c)", 'a' + Right_Numb - 1 ); plist->Add(strdup(buf));*/ t.pr_tst = 1; t.ch_ask = 5; t.right_ask = Right_Numb; t.msg = "Тест успешно сгенерирован."; keygen = 0; delete buf; delete buf1; return 0; }
double StochasticFunctionMinimization::StochasticSubgradientMethodMinimize( StochasticFunctionMinimizationProblem& prob, std::vector<double>& x_opt, double conv_tol, unsigned int max_epochs, bool verbose) { unsigned int dim = prob.Dimensions(); std::vector<double> grad(dim, 0.0); // Initialize x std::vector<double> x(dim); prob.ProvideStartingPoint(x); // Random instance generator size_t N = prob.NumberOfElements(); assert(N > 0); boost::mt19937 rgen(static_cast<const boost::uint32_t>(std::time(0))+1); boost::uniform_int<unsigned int> rdestd(0, static_cast<unsigned int>(N-1)); boost::variate_generator<boost::mt19937, boost::uniform_int<unsigned int> > rand_n(rgen, rdestd); // Optimize a given number of epochs boost::timer total_timer; std::vector<double> avg_grad(dim, 0.0); double lambda = 1.0; // (should be lambda=1/C) double avg_obj = 0.0; for (unsigned int epoch = 0; max_epochs == 0 || epoch < max_epochs; ++epoch) { avg_obj = 0.0; std::fill(avg_grad.begin(), avg_grad.end(), 0.0); // Choose epoch-wide step size double alpha = 1.0 / (static_cast<double>(epoch + 1) * lambda); // Optimize by sampling instances for (size_t n = 0; n < N; ++n) { unsigned int id = rand_n(); // Update average objective and averaged gradient of this epoch avg_obj += prob.Eval(id, x, grad); std::transform(grad.begin(), grad.end(), avg_grad.begin(), avg_grad.begin(), std::plus<double>()); // Perform incremental subgradient update for (unsigned int d = 0; d < dim; ++d) x[d] -= alpha * grad[d]; } // Compute mean gradient and estimated objective double avg_grad_norm = 0.0; for (unsigned int d = 0; d < dim; ++d) avg_grad_norm += avg_grad[d]*avg_grad[d]; avg_grad_norm = std::sqrt(avg_grad_norm); // Output statistics if (verbose && (epoch % 20 == 0)) { std::cout << std::endl; std::cout << " iter time avg_obj |avg_grad|" << std::endl; } if (verbose) { std::ios_base::fmtflags original_format = std::cout.flags(); std::streamsize original_prec = std::cout.precision(); // Iteration std::cout << std::setiosflags(std::ios::left) << std::setiosflags(std::ios::adjustfield) << std::setw(6) << epoch << " "; // Total runtime std::cout << std::setiosflags(std::ios::left) << std::resetiosflags(std::ios::scientific) << std::setiosflags(std::ios::fixed) << std::setiosflags(std::ios::adjustfield) << std::setprecision(1) << std::setw(6) << total_timer.elapsed() << "s "; std::cout << std::resetiosflags(std::ios::fixed); // Objective function std::cout << std::setiosflags(std::ios::scientific) << std::setprecision(5) << std::setiosflags(std::ios::left) << std::setiosflags(std::ios::showpos) << std::setw(7) << avg_obj << " "; // Gradient norm std::cout << std::setiosflags(std::ios::scientific) << std::setprecision(2) << std::resetiosflags(std::ios::showpos) << std::setiosflags(std::ios::left) << avg_grad_norm; std::cout << std::endl; std::cout.precision(original_prec); std::cout.flags(original_format); } // Convergence check if (avg_grad_norm < conv_tol) break; } x_opt = x; return (avg_obj); // This is not exact, but stochastic anyway }
quest18::Print(TList* plist, class test &t) { int i, j, k; int p[4], a[3], b[3], c[3]; int M[3], matr[2][2]; int n, Right_Numb; double absc, absa; drobi d; char * buf = new char[256]; char * buf1 = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } Right_Numb = random( 5 ) + 1; srand( keygen ); for( i = 0; i < 4; i ++ ) { a[i] = rgen( keygen, 1, amin, amax ); M[i] = rgen( keygen, 1, amin, amax ); p[i] = rgen( keygen, 1, amin, amax ); while ( p[i] == M[i] ) p[i] = rgen( keygen, 1, amin, amax ); } sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); sprintf( buf, "String(Найти расстояние от точки до прямой.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Прямая задана уравнением:)" ); plist->Add( strdup(buf) ); if( M[0] ) sprintf( buf, "(x%+d)/%d=", -M[0], a[0] ); else sprintf( buf, "x/%d=", a[0] ); if( M[1] ) sprintf( buf1, "(y%+d)/%d=", -M[1], a[1] ); else sprintf( buf1, "y/%d=", a[1] ); strcat( buf, buf1 ); if( M[2] ) sprintf( buf1, "(z%+d)/%d", -M[2], a[2] ); else sprintf( buf1, "z/%d", a[2] ); strcat( buf, buf1 ); plist->Add( strdup(buf) ); sprintf( buf, "String(Координаты точки:)" ); plist->Add( strdup(buf) ); sprintf( buf, "A!(%d", p[0] ); for( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", p[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); sprintf( buf, "d=..." ); plist->Add( strdup(buf) ); for ( i = 0; i < 3; i++ ) b[i] = M[i] - p[i]; for (i = 0; i < 3; i ++) { k = 0; for ( j = 0; j < 3; j ++ ) { if ( j != i ) { matr[0][k] = a[j]; matr[1][k] = b[j]; k ++; } } c[i] = pow ( -1, i ) * determ( 2, matr ); } absc = 0; for ( i = 0; i < 3; i ++ ) absc += c[i] * c[i]; absa = 0; for ( i = 0; i < 3; i ++ ) absa += a[i] * a[i]; if( absc < 5 ) Right_Numb = 1; for( n = 0; n < 5; n ++ ) { sprintf( buf, "String(\"Вариант %c):\")", 'a' + n ); plist->Add( strdup(buf) ); if ( !( absc - ( Right_Numb - 1 ) + n ) ) sprintf( buf, "d=0" ); else if ( ( ceil( sqrt( absc - ( Right_Numb - 1 ) + n ) ) ) == ( sqrt( absc - ( Right_Numb - 1 ) + n ) ) && ( ceil( sqrt( absa ) ) ) == ( sqrt( absa ) ) ) { d = drobi( sqrt( absc - ( Right_Numb - 1 ) + n ), sqrt( absa ) ); //sprintf( buf, "d=%d/%d", d.a, d.b ); sprintf( buf, "d=%s", DrobiToStr( d ) ); } else if ( ( ceil( sqrt( absc - ( Right_Numb - 1 ) + n ) ) ) == ( sqrt( absc - ( Right_Numb - 1 ) + n ) ) ) sprintf( buf, "d=%f/sqrt(%f)", sqrt( absc - ( Right_Numb - 1 ) + n ), absa ); else if ( ( ceil( sqrt( absa ) ) ) == ( sqrt( absa ) ) ) sprintf( buf, "d=sqrt(%f)/%f", absc - ( Right_Numb - 1 ) + n, sqrt( absa ) ); else sprintf( buf, "d=sqrt(%f)/sqrt(%f)", absc - ( Right_Numb - 1 ) + n, absa ); plist->Add( strdup(buf) ); } /*sprintf(buf,"String(@Часть преподавателя )"); plist->Add(strdup(buf)); sprintf(buf,"String(\"Тема - %s \")",selecttask->name); plist->Add(strdup(buf)); sprintf(buf,"String(ВАРИАНТ %i, решение задачи %i, ключ %i)", nvar, nzad, keygen ); plist->Add(strdup(buf)); sprintf(buf,"String( Правильный ответ - %c)", 'a' + Right_Numb - 1 ); plist->Add(strdup(buf));*/ t.pr_tst = 1; t.ch_ask = 5; t.right_ask = Right_Numb; t.msg = "Тест успешно сгенерирован."; keygen = 0; delete buf; delete buf1; return 0; }
bool load_nonface_patch(std::vector< std::unique_ptr<NeuralNet::Image> >& images, size_t num_image, size_t train_set) { const size_t set_size = 2000; const size_t patch_size = 32; const size_t sample_per_img = 10; const size_t max_sample_per_img = 1000; const float var_thresh = 0.0007; size_t lbound = set_size * train_set + 1; size_t ubound = lbound + set_size; std::vector<size_t> idxes; for (size_t i = lbound; i < ubound; i++) idxes.push_back(i); std::random_device rd; std::mt19937 rgen(rd()); std::shuffle(idxes.begin(), idxes.end(), rgen); size_t num_failed_imgs = 0; size_t num_accept_imgs = 0; size_t img_count = 0; size_t loaded_patch = 0; while (loaded_patch < num_image && img_count < idxes.size()) { std::ostringstream oss; oss << "image-nonface/img_" << idxes[img_count] << ".bmp"; auto img_ptr = NeuralNet::loadBitmapImage(oss.str().c_str()); if (!img_ptr) { std::cout << "missing file " << oss.str() << std::endl; return false; } std::uniform_int_distribution<size_t> dis_w(0, img_ptr->getWidth() - patch_size); std::uniform_int_distribution<size_t> dis_h(0, img_ptr->getHeight() - patch_size); size_t added_patch = 0; for (size_t i=0; i<max_sample_per_img && added_patch < sample_per_img && added_patch + loaded_patch < num_image; i++) { auto cropImage = NeuralNet::cropImage( img_ptr, dis_w(rgen), dis_h(rgen), patch_size, patch_size); auto grayImage = NeuralNet::grayscaleImage(cropImage); if (NeuralNet::getVariance(grayImage) <= var_thresh) { num_failed_imgs++; } else { num_accept_imgs++; } if (NeuralNet::getVariance(grayImage) <= var_thresh) continue; images.push_back(preprocessImage(cropImage)); added_patch++; } loaded_patch += added_patch; img_count++; } std::cout << "accept=" << num_accept_imgs << " reject=" << num_failed_imgs << std::endl; if (loaded_patch == num_image) return true; return false; }
typename std::shared_ptr<ModelPair> operator()(const std::vector< std::shared_ptr<ModelPair> >& pairs)const { tools::rgen_int_t rgen(0, pairs.size()-1); int ind = rgen.rand(); return pairs[ind]; }
quest18::Print(TList* plist) { int i, j, k; int p[4], a[3], b[3], c[3]; int M[3], matr[2][2]; double absc, absa; drobi d; char * buf = new char[256]; char * buf1 = new char[256]; if( keygen == 0 ) { keygen = random( 1000 ) + 1; } srand( keygen ); for( i = 0; i < 4; i ++ ) { a[i] = rgen( keygen, 1, amin, amax ); M[i] = rgen( keygen, 1, amin, amax ); p[i] = rgen( keygen, 1, amin, amax ); while ( p[i] == M[i] ) p[i] = rgen( keygen, 1, amin, amax ); } if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(\"# Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); } else { sprintf( buf, "String(#)" ); plist->Add( strdup(buf) ); } sprintf( buf, "String(Вариант %i, задача %i.)", nvar, nzad ); plist->Add( strdup(buf) ); if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(Найти расстояние от точки до прямой.)" ); plist->Add( strdup(buf) ); sprintf( buf, "String(Прямая задана уравнением:)" ); plist->Add( strdup(buf) ); } if( M[0] ) sprintf( buf, "(x%+d)/%d=", -M[0], a[0] ); else sprintf( buf, "x/%d=", a[0] ); if( M[1] ) sprintf( buf1, "(y%+d)/%d=", -M[1], a[1] ); else sprintf( buf1, "y/%d=", a[1] ); strcat( buf, buf1 ); if( M[2] ) sprintf( buf1, "(z%+d)/%d", -M[2], a[2] ); else sprintf( buf1, "z/%d", a[2] ); strcat( buf, buf1 ); plist->Add( strdup(buf) ); if ( !qvar->MZad || ( qvar->MZad && nvar == 1 ) ) { sprintf( buf, "String(Координаты точки:)" ); plist->Add( strdup(buf) ); } sprintf( buf, "A!(%d", p[0] ); for( i = 1; i < 3; i ++ ) { sprintf( buf1, ",%d", p[i] ); strcat( buf, buf1 ); } strcat( buf, ")" ); plist->Add( strdup(buf) ); sprintf( buf, "d=..." ); plist->Add( strdup(buf) ); sprintf( buf, "String(@Часть преподавателя )" ); plist->Add( strdup(buf) ); sprintf( buf, "String(\"Тема - %s \")", selecttask->name ); plist->Add( strdup(buf) ); sprintf( buf, "String(ВАРИАНТ %i, решение задачи %i, ключ %i)", nvar, nzad, keygen ); plist->Add( strdup(buf) ); for ( i = 0; i < 3; i++ ) b[i] = M[i] - p[i]; for (i = 0; i < 3; i ++) { k = 0; for ( j = 0; j < 3; j ++ ) { if ( j != i ) { matr[0][k] = a[j]; matr[1][k] = b[j]; k ++; } } c[i] = pow ( -1, i ) * determ( 2, matr ); } absc = 0; for ( i = 0; i < 3; i ++ ) absc += c[i] * c[i]; absa = 0; for ( i = 0; i < 3; i ++ ) absa += a[i] * a[i]; if ( !absc ) sprintf( buf, "d=0" ); else if ( ( ceil( sqrt( absc ) ) ) == ( sqrt( absc ) ) && ( ceil( sqrt( absa ) ) ) == ( sqrt( absa ) ) ) { d = drobi( sqrt( absc ), sqrt( absa ) ); //sprintf( buf, "d=%d/%d", d.a, d.b ); sprintf( buf, "d=%s", DrobiToStr( d ) ); } else if ( ( ceil( sqrt( absc ) ) ) == ( sqrt( absc ) ) ) sprintf( buf, "d=%f/sqrt(%f)", sqrt( absc ), absa ); else if ( ( ceil( sqrt( absa ) ) ) == ( sqrt( absa ) ) ) sprintf( buf, "d=sqrt(%f)/%f", absc, sqrt( absa ) ); else sprintf( buf, "d=sqrt(%f)/sqrt(%f)", absc, absa ); plist->Add( strdup(buf) ); keygen = 0; delete buf; delete buf1; return 0; }
PConditionalProbabilityEstimator TConditionalProbabilityEstimatorConstructor_loess::operator()(PContingency frequencies, PDistribution, PExampleGenerator, const long &, const int &) const { if (frequencies->varType != TValue::FLOATVAR) if (frequencies->outerVariable) raiseError("attribute '%s' is not continuous", frequencies->outerVariable->get_name().c_str()); else raiseError("continuous attribute expected for condition"); if (!frequencies->continuous->size()) // This is ugly, but: if you change this, you should also change the code which catches it in // Bayesian learner raiseError("distribution (of attribute values, probably) is empty or has only a single value"); PContingency cont = CLONE(TContingency, frequencies); const TDistributionMap &points = *frequencies->continuous; /* if (frequencies->continuous->size() == 1) { TDiscDistribution *f = (TDiscDistribution *)(points.begin()->second.getUnwrappedPtr()); f->normalize(); f->variances = mlnew TFloatList(f->size(), 0.0); return mlnew TConditionalProbabilityEstimator_FromDistribution(cont); } */ cont->continuous->clear(); vector<float> xpoints; distributePoints(points, nPoints, xpoints, distributionMethod); if (!xpoints.size()) raiseError("no points for the curve (check 'nPoints')"); if (frequencies->continuous->size() == 1) { TDiscDistribution *f = (TDiscDistribution *)(points.begin()->second.getUnwrappedPtr()); f->normalize(); f->variances = mlnew TFloatList(f->size(), 0.0); const_ITERATE(vector<float>, pi, xpoints) (*cont->continuous)[*pi] = f; return mlnew TConditionalProbabilityEstimator_FromDistribution(cont); } TDistributionMap::const_iterator lowedge = points.begin(); TDistributionMap::const_iterator highedge = points.end(); bool needAll; map<float, PDistribution>::const_iterator from, to; vector<float>::const_iterator pi(xpoints.begin()), pe(xpoints.end()); float refx = *pi; from = lowedge; to = highedge; int totalNumOfPoints = frequencies->outerDistribution->abs; int needpoints = int(ceil(totalNumOfPoints * windowProportion)); if (needpoints<3) needpoints = 3; TSimpleRandomGenerator rgen(frequencies->outerDistribution->cases); if ((needpoints<=0) || (needpoints>=totalNumOfPoints)) { //points.size() needAll = true; from = lowedge; to = highedge; } else { needAll = false; /* Find the window */ from = points.lower_bound(refx); to = points.upper_bound(refx); if (from==to) if (to != highedge) to++; else from --; /* Extend the interval; we set from to highedge when it would go beyond lowedge, to indicate that only to can be modified now */ while (needpoints > 0) { if ((to == highedge) || ((from != highedge) && (refx - (*from).first < (*to).first - refx))) { if (from == lowedge) from = highedge; else { from--; needpoints -= (*from).second->cases; } } else { to++; if (to!=highedge) needpoints -= (*to).second->cases; else needpoints = 0; } } if (from == highedge) from = lowedge; /* else from++;*/ } int numOfOverflowing = 0; // This follows http://www-2.cs.cmu.edu/afs/cs/project/jair/pub/volume4/cohn96a-html/node7.html for(;;) { TDistributionMap::const_iterator tt = to; --tt; if (tt == from) { TDistribution *Sy = CLONE(TDistribution, (*tt).second); PDistribution wSy = Sy; Sy->normalize(); (*cont->continuous)[refx] = (wSy); ((TDiscDistribution *)(Sy)) ->variances = mlnew TFloatList(Sy->variable->noOfValues(), 0.0); } else { float h = (refx - (*from).first); if ((*tt).first - refx > h) h = ((*tt).first - refx); /* Iterate through the window */ tt = from; const float &x = (*tt).first; const PDistribution &y = (*tt).second; float cases = y->abs; float w = fabs(refx - x) / h; w = 1 - w*w*w; w = w*w*w; const float num = y->abs; // number of instances with this x - value float n = w * num; float Sww = w * w * num; float Sx = w * x * num; float Swwx = w * w * x * num; float Swwxx = w * w * x * x * num; TDistribution *Sy = CLONE(TDistribution, y); PDistribution wSy = Sy; *Sy *= w; float Sxx = w * x * x * num; TDistribution *Syy = CLONE(TDistribution, y); PDistribution wSyy = Syy; *Syy *= w; TDistribution *Sxy = CLONE(TDistribution, y); PDistribution wSxy = Sxy; *Sxy *= w * x; if (tt!=to) while (++tt != to) { const float &x = (*tt).first; const PDistribution &y = (*tt).second; cases += y->abs; w = fabs(refx - x) / h; w = 1 - w*w*w; w = w*w*w; const float num = y->abs; n += w * num; Sww += w * w * num; Sx += w * x * num; Swwx += w * w * x * num; Swwxx += w * w * x * x * num; Sxx += w * x * x * num; TDistribution *ty = CLONE(TDistribution, y); PDistribution wty = ty; *ty *= w; *Sy += wty; *Syy += wty; *ty *= x; *Sxy += wty; //*ty *= PDistribution(y); } float sigma_x2 = n<1e-6 ? 0.0 : (Sxx - Sx * Sx / n)/n; if (sigma_x2<1e-10) { *Sy *= 0; Sy->cases = cases; (*cont->continuous)[refx] = (wSy); } TDistribution *sigma_y2 = CLONE(TDistribution, Sy); PDistribution wsigma_y2 = sigma_y2; *sigma_y2 *= wsigma_y2; *sigma_y2 *= -1/n; *sigma_y2 += wSyy; *sigma_y2 *= 1/n; TDistribution *sigma_xy = CLONE(TDistribution, Sy); PDistribution wsigma_xy = sigma_xy; *sigma_xy *= -Sx/n; *sigma_xy += wSxy; *sigma_xy *= 1/n; // This will be sigma_xy / sigma_x2, but we'll multiply it by whatever we need TDistribution *sigma_tmp = CLONE(TDistribution, sigma_xy); PDistribution wsigma_tmp = sigma_tmp; //*sigma_tmp *= wsigma_tmp; if (sigma_x2 > 1e-10) *sigma_tmp *= 1/sigma_x2; const float difx = refx - Sx/n; // computation of y *sigma_tmp *= difx; *Sy *= 1/n; *Sy += *sigma_tmp; // probabilities that are higher than 0.9 normalize with a logistic function, which produces two positive // effects: prevents overfitting and avoids probabilities that are higher than 1.0. But, on the other hand, this // solution is rather unmathematical. Do the same for probabilities that are lower than 0.1. vector<float>::iterator syi(((TDiscDistribution *)(Sy))->distribution.begin()); vector<float>::iterator sye(((TDiscDistribution *)(Sy))->distribution.end()); for (; syi!=sye; syi++) { if (*syi > 0.9) { Sy->abs -= *syi; *syi = 1/(1+exp(-10*((*syi)-0.9)*log(9.0)-log(9.0))); Sy->abs += *syi; } if (*syi < 0.1) { Sy->abs -= *syi; *syi = 1/(1+exp(10*(0.1-(*syi))*log(9.0)+log(9.0))); Sy->abs += *syi; } } Sy->cases = cases; Sy->normalize(); (*cont->continuous)[refx] = (wSy); // now for the variance // restore sigma_tmp and compute the conditional sigma if ((fabs(difx) > 1e-10) && (sigma_x2 > 1e-10)) { *sigma_tmp *= (1/difx); *sigma_tmp *= wsigma_xy; *sigma_tmp *= -1; *sigma_tmp += wsigma_y2; // fct corresponds to part of (10) in the brackets (see URL above) // float fct = Sww + difx*difx/sigma_x2/sigma_x2 * (Swwxx - 2/n * Sx*Swwx + 2/n/n * Sx*Sx*Sww); float fct = 1 + difx*difx/sigma_x2; //n + difx*difx/sigma_x2+n*n --- add this product to the overall fct sum if you are estimating error for a single user and not for the line. *sigma_tmp *= fct/n; // fct/n/n; } ((TDiscDistribution *)(Sy)) ->variances = mlnew TFloatList(((TDiscDistribution *)(sigma_tmp))->distribution); } // on to the next point pi++; if (pi==pe) break; refx = *pi; // Adjust the window while (to!=highedge) { float dif = (refx - (*from).first) - ((*to).first - refx); if ((dif>0) || (dif==0) && rgen.randbool()) { if (numOfOverflowing > 0) { from++; numOfOverflowing -= (*from).second->cases; } else { to++; if (to!=highedge) numOfOverflowing += (*to).second->cases; } } else break; } } return mlnew TConditionalProbabilityEstimator_FromDistribution(cont); }
// The main function. It generates a PPM image to stdout. // Usage of the program is hence: ./card > erk.ppm int main(int argc, char **argv) { F(); int w = 512, h = 512; int num_threads = std::thread::hardware_concurrency(); if (num_threads==0) //8 threads is a reasonable assumption if we don't know how many cores there are num_threads=8; if (argc > 1) { w = atoi(argv[1]); } if (argc > 2) { h = atoi(argv[2]); } if (argc > 3) { num_threads = atoi(argv[3]); } printf("P6 %d %d 255 ", w, h); // The PPM Header is issued // The '!' are for normalizing each vectors with ! operator. vector g=!vector(-5.5f,-16,0), // Camera direction a=!(vector(0,0,1)^g)*.002f, // Camera up vector...Seem Z is pointing up :/ WTF ! b=!(g^a)*.002f, // The right vector, obtained via traditional cross-product c=(a+b)*-256+g; // WTF ? See https://news.ycombinator.com/item?id=6425965 for more. int s = 3*w*h; char *bytes = new char[s]; auto lambda=[&](unsigned int seed, int offset, int jump) { for (int y=offset; y<h; y+=jump) { //For each row int k = (h - y - 1) * w * 3; for(int x=w;x--;) { //For each pixel in a line //Reuse the vector class to store not XYZ but a RGB pixel color vector p(13,13,13); // Default pixel color is almost pitch black //Cast 64 rays per pixel (For blur (stochastic sampling) and soft-shadows. for(int r=64;r--;) { // The delta to apply to the origin of the view (For Depth of View blur). vector t=a*(R(seed)-.5f)*99+b*(R(seed)-.5f)*99; // A little bit of delta up/down and left/right // Set the camera focal point vector(17,16,8) and Cast the ray // Accumulate the color returned in the p variable p=S(vector(17,16,8)+t, //Ray Origin !(t*-1+(a*(R(seed)+x)+b*(y+R(seed))+c)*16) // Ray Direction with random deltas // for stochastic sampling , seed)*3.5f+p; // +p for color accumulation } bytes[k++] = (char)p.x; bytes[k++] = (char)p.y; bytes[k++] = (char)p.z; } } }; std::mt19937 rgen; std::vector<std::thread> threads; for(int i=0;i<num_threads;++i) { threads.emplace_back(lambda, rgen(), i, num_threads); } for(auto& t : threads) { t.join(); } fwrite(bytes, 1, s, stdout); delete [] bytes; }