void dragon64_state::page_rom(bool romswitch) { offs_t offset = romswitch ? 0x0000 // This is the 32k mode basic(64)/boot rom(alpha) : 0x8000; // This is the 64k mode basic(64)/basic rom(alpha) sam().set_bank_offset(1, offset); // 0x8000-0x9FFF sam().set_bank_offset(2, offset); // 0xA000-0xBFFF }
void save_key_to(UnicodeString &key,UnicodeString &path) { get_privilege(SE_BACKUP_PRIVILEGE); RegKey sam(key); sam.flush(); OBJECT_ATTRIBUTES file; InitializeObjectAttributes( &file, &path.unicode_string(), OBJ_CASE_INSENSITIVE, NULL, NULL); HANDLE hFile; IO_STATUS_BLOCK ios; ULONG status = ZwCreateFile( &hFile ,GENERIC_WRITE ,&file ,&ios ,0 ,0 ,0 ,FILE_CREATE ,0 ,0 ,0); CHECKER(status); sam.save_to(hFile); ZwClose(hFile); }
//---------------------------------------------------------------------- void SepStratSampler::polar_draw(){ cand_ = mod_->Sigma(); sd_ = cand_.vectorize(true); // true means minimal, only upper triangle SigmaPolarTarget target(this); SliceSampler sam(target); sd_ = sam.draw(sd_); cand_.unvectorize(sd_, true); mod_->set_Sigma(cand_); }
int main() { int x = 0; try { sam(); fail(__LINE__); } catch( ... ) { ++x; } if( x != 1 ) fail(__LINE__); _PASS; }
//---------------------------------------------------------------------- // driver function to draw a scalar variance parameter conditional // on the correlations and other variances. void SepStratSampler::draw_sigsq(int i){ i_ = i; j_ = i; SigmaTarget target(this); ScalarSliceSampler sam(target); sam.set_lower_limit(0); double ivar = 1.0/square(sd_[i]); ivar = sam.draw(ivar); sd_[i] = 1.0/sqrt(ivar); }
void EDS::draw(){ Vector nu(mod_->nu()); uint d = nu.size(); const Vector & sumlog(mod_->suf()->sumlog()); double nobs = mod_->suf()->n(); for(uint i=0; i<d; ++i){ target logp(sumlog, nobs, nu, i, pri_); ScalarSliceSampler sam(logp); sam.set_lower_limit(0); nu[i] = sam.draw(nu[i]); } mod_->set_nu(nu); }
void PDPS::draw(){ const Mat & sumlog(m_->suf()->sumlog()); double nobs(m_->suf()->n()); Mat Nu(m_->Nu()); uint d= nrow(Nu); for(uint i=0; i<d; ++i){ Vec sumlog_i(sumlog.row(i)); Vec nu(Nu.row(i)); for(uint j=0; j<d; ++j){ DirichletLogp logp(j, nu, sumlog_i, nobs, phi_row_prior_[i], alpha_row_prior_[i], min_nu_); ScalarSliceSampler sam(logp, true); sam.set_lower_limit(min_nu_); nu[j]= sam.draw(nu[j]); } Nu.row(i) = nu; } m_->set_Nu(Nu); }
static bool run2DSphereGeoNear(NamespaceDetails* nsDetails, int idxNo, BSONObj& cmdObj, const GeoNearArguments &parsedArgs, string& errmsg, BSONObjBuilder& result) { auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(nsDetails, idxNo)); auto_ptr<S2AccessMethod> sam(new S2AccessMethod(descriptor.get())); const S2IndexingParams& params = sam->getParams(); auto_ptr<S2NearIndexCursor> nic(new S2NearIndexCursor(descriptor.get(), params)); vector<string> geoFieldNames; BSONObjIterator i(descriptor->keyPattern()); while (i.more()) { BSONElement e = i.next(); if (e.type() == String && IndexNames::GEO_2DSPHERE == e.valuestr()) { geoFieldNames.push_back(e.fieldName()); } } // NOTE(hk): If we add a new argument to geoNear, we could have a // 2dsphere index with multiple indexed geo fields, and the geoNear // could pick the one to run over. Right now, we just require one. uassert(16552, "geoNear requires exactly one indexed geo field", 1 == geoFieldNames.size()); NearQuery nearQuery(geoFieldNames[0]); uassert(16679, "Invalid geometry given as arguments to geoNear: " + cmdObj.toString(), nearQuery.parseFromGeoNear(cmdObj, params.radius)); uassert(16683, "geoNear on 2dsphere index requires spherical", parsedArgs.isSpherical); // NOTE(hk): For a speedup, we could look through the query to see if // we've geo-indexed any of the fields in it. vector<GeoQuery> regions; nic->seek(parsedArgs.query, nearQuery, regions); // We do pass in the query above, but it's just so we can possibly use it in our index // scan. We have to do our own matching. auto_ptr<Matcher> matcher(new Matcher(parsedArgs.query)); double totalDistance = 0; BSONObjBuilder resultBuilder(result.subarrayStart("results")); double farthestDist = 0; int results; for (results = 0; results < parsedArgs.numWanted && !nic->isEOF(); ++results) { BSONObj currObj = nic->getValue().obj(); if (!matcher->matches(currObj)) { --results; nic->next(); continue; } double dist = nic->currentDistance(); // If we got the distance in radians, output it in radians too. if (nearQuery.fromRadians) { dist /= params.radius; } dist *= parsedArgs.distanceMultiplier; totalDistance += dist; if (dist > farthestDist) { farthestDist = dist; } BSONObjBuilder oneResultBuilder( resultBuilder.subobjStart(BSONObjBuilder::numStr(results))); oneResultBuilder.append("dis", dist); if (parsedArgs.includeLocs) { BSONElementSet geoFieldElements; currObj.getFieldsDotted(geoFieldNames[0], geoFieldElements, false); for (BSONElementSet::iterator oi = geoFieldElements.begin(); oi != geoFieldElements.end(); ++oi) { if (oi->isABSONObj()) { oneResultBuilder.appendAs(*oi, "loc"); } } } oneResultBuilder.append("obj", currObj); oneResultBuilder.done(); nic->next(); } resultBuilder.done(); BSONObjBuilder stats(result.subobjStart("stats")); stats.appendNumber("nscanned", nic->nscanned()); stats.append("avgDistance", totalDistance / results); stats.append("maxDistance", farthestDist); stats.append("time", cc().curop()->elapsedMillis()); stats.done(); return true; }
// creates an irregularly sampled SPD // may "truncate" the edges to fit the new resolution // wavelengths array containing the wavelength of each sample // samples array of sample values at the given wavelengths // n number of samples // resolution resampling resolution (in nm) IrregularSPD::IrregularSPD(const float* const wavelengths, const float* const samples, u_int n, float resolution, SPDResamplingMethod resamplignMethod) : SPD() { float lambdaMin = wavelengths[0]; float lambdaMax = wavelengths[n - 1]; u_int sn = Ceil2UInt((lambdaMax - lambdaMin) / resolution) + 1; std::vector<float> sam(sn); if (resamplignMethod == Linear) { u_int k = 0; for (u_int i = 0; i < sn; i++) { float lambda = lambdaMin + i * resolution; if (lambda < wavelengths[0] || lambda > wavelengths[n-1]) { sam[i] = 0.f; continue; } for (; k < n; ++k) { if (wavelengths[k] >= lambda) break; } if (wavelengths[k] == lambda) sam[i] = samples[k]; else { float intervalWidth = wavelengths[k] - wavelengths[k - 1]; float u = (lambda - wavelengths[k - 1]) / intervalWidth; sam[i] = Lerp(u, samples[k - 1], samples[k]); } } } else { std::vector<float> sd(n); calc_spline_data(wavelengths, samples, n, &sd[0]); u_int k = 0; for (u_int i = 0; i < sn; i++) { float lambda = lambdaMin + i * resolution; if (lambda < wavelengths[0] || lambda > wavelengths[n-1]) { sam[i] = 0.f; continue; } while (lambda > wavelengths[k+1]) k++; float h = wavelengths[k+1] - wavelengths[k]; float a = (wavelengths[k+1] - lambda) / h; float b = (lambda - wavelengths[k]) / h; sam[i] = Max(a*samples[k] + b*samples[k+1]+ ((a*a*a-a)*sd[k] + (b*b*b-b)*sd[k+1])*(h*h)/6.f, 0.f); } } init(lambdaMin, lambdaMax, &sam[0], sn); }
int main() { bar( (D*) 0 ); // bar( B * ) foo( (B*) 0 ); // foo( void * ) sam( (F2*) 0 ); // sam( F1 * ); _PASS; }
//---------------------------------------------------------------------- void Group::modify_unit_value(int i, int j){ // if (i == 0) return; if(fabs(total_value_ - unit_values_.sum()) > .01){ ostringstream err; err << "In BOOM::Agreg::Group::modify_unit_value: total_value and " << "unit_values_ have gotten out of sync."; report_error(err.str()); } double total = unit_values_[i] + unit_values_[j]; // Total is the total amount of value to be split between the two // assets. If total is really small then both assets are zero, and // won't change. // if (total < .01) return; if(unit_values_[i] > total){ ostringstream err; err << "unit_values_[" << i << "] on group " << name_ << " is greater than the maximum possible value of " << total << " sum(unit values_) = " << sum(unit_values_) << " total group value = " << total_value_ << endl << "inidividual unit_values: " << unit_values_; report_error(err.str()); } if(unit_values_[i] < 0 || unit_values_[j] < 0){ ostringstream err; err << "unit_values_ must be positive:" << endl << "unit_values_[" << i << "] = " << unit_values_[i] << endl << "unit_values_[" << j << "] = " << unit_values_[j] << endl; report_error(err.str()); } double mu_i = beta_->dot(unit_data_[i]->x()); double mu_j = beta_->dot(unit_data_[j]->x()); UnitValueDistribution logf(mu_i, mu_j, sigma_, total, f); ScalarSliceSampler sam(logf); sam.set_limits(0, total); // Keep values at least slightly away from the boundary. // if (fabs(unit_values_[i] - total) <= .01){ // unit_values_[i] = total - .01; // } for(int k = 0; k < 3; ++k){ // The slice sampler had trouble moving off of bad starting // values. Iterating the slice sampler a few times gives us // close-to-direct draws from the target distribution unit_values_[i] = sam.draw(unit_values_[i]); unit_values_[j] = total - unit_values_[i]; } if(unit_values_[i] < 0 || unit_values_[i] > total || unit_values_[j] < 0 || unit_values_[j] > total){ ostringstream err; err << "unit values must be non-negative, but less than their sum: " << total << endl << "unit_values_[" << i << "] = " << unit_values_[i] << endl << "unit_values_[" << j << "] = " << unit_values_[j] << endl ; report_error(err.str()); } }