TEST_F(DictTest, bool_cast) { EXPECT_TRUE (static_cast<bool>(this->_l)); EXPECT_FALSE(static_cast<bool>(this->_r)); configment::Dict d0("{}"); EXPECT_FALSE(static_cast<bool>(d0)); configment::Dict d1("{{}:{}}"); EXPECT_TRUE (static_cast<bool>(d1)); }
TEST(DictionaryEquality, MixedTypes) { AmfDictionary d0(false); AmfArray a; AmfVector<int> v; AmfUndefined u; EXPECT_NE(d0, a); EXPECT_NE(d0, v); EXPECT_NE(d0, u); }
bool Decimal::operator== (const tntdb::Decimal& other) const { tntdb::Decimal d0(*this); tntdb::Decimal d1(other); d0.normalize(); d1.normalize(); return d0.mantissa == d1.mantissa && d0.exponent == d1.exponent && d0.isPositive() == d1.isPositive(); }
double sqDistPointSegment(const SPoint3 &p, const SPoint3 &s0, const SPoint3 &s1) { SVector3 d(s1 - s0); SVector3 d0(p - s0); SVector3 d1(p - s1); double dn2 = crossprod(d, d0).normSq(); double dt2 = std::max(0., std::max(-dot(d, d0), dot(d, d1))); dt2 *= dt2; return (dt2 + dn2) / d.normSq(); }
TEST_F(DictTest, string_constructor) { configment::Dict d0("{1: 2, 'one': 'two', 'x': {}, {}: 'empty'}"); EXPECT_EQ(4u, d0.size()); configment::Dict d1(d0.repr()); EXPECT_EQ(d0, d1); configment::Dict d2("{'a': 1, ' b ': 2, 'c': 3.5}"); configment::Dict d3("dict(a=1, ' b '=2, c=3.5)"); EXPECT_EQ(d2, d3); }
TEST(DictionaryEquality, EmptyDictionary) { AmfDictionary d0(true); AmfDictionary d1(true, false); EXPECT_EQ(d0, d1); AmfDictionary d2(false); EXPECT_NE(d0, d2); AmfDictionary d3(false, true); EXPECT_NE(d2, d3); EXPECT_NE(d0, d3); }
void RemoveTranslationOptimizerState::remove_translation() const { Particle *p0 = *pis_.begin(); core::XYZ d0(p0); algebra::Vector3D coords = d0.get_coordinates(); for (Particles::const_iterator pi = pis_.begin(); pi != pis_.end(); ++pi) { Particle *p = *pi; core::XYZ d(p); d.set_coordinates(d.get_coordinates()-coords); } }
K eval(K x,K y,K z){K*k;S*b,s;SQLULEN w;SQLLEN*nb;SQLINTEGER*wb;H*tb,u,t,j=0,p,m;F f;C c[128];I n=xj<0;D d=d1(n?-xj:xj);U(d)x=y;Q(z->t!=-KJ||xt!=-KS&&xt!=KC,"type") if(z->j)SQLSetStmtAttr(d,SQL_ATTR_QUERY_TIMEOUT,(SQLPOINTER)(SQLULEN)z->j,0); if(xt==-KS)Q1(SQLColumns(d,(S)0,0,(S)0,0,xs,S0,(S)0,0))else{I e;K q=kpn(xG,xn);ja(&q,"\0");e=SQLExecDirect(d,q->G0,xn);r0(q);Q1(e)} SQLNumResultCols(d,&j);P(!j,(d0(d),knk(0))) b=malloc(j*SZ),tb=malloc(j*2),wb=malloc(j*SZ),nb=malloc(j*SZ),x=ktn(KS,j),y=ktn(0,j);// sqlserver: no bind past nonbind DO(j,Q1(SQLDescribeCol(d,(H)(i+1),c,128,&u,&t,&w,&p,&m))xS[i]=sn(c,u); if(t>90)t-=82; Q(t<-11||t>12,xS[i])wb[i]=ut[tb[i]=t=t>0?t:12-t]==KS&&w?w+1:wt[t];if(ut[t]==KS&&(n||!wb[i]||wb[i]>9))tb[i]=13) DO(j,kK(y)[i]=ktn(ut[t=tb[i]],0);if(w=wb[i])Q1(SQLBindCol(d,(H)(i+1),ct[t],b[i]=malloc(w),w,nb+i))) for(;SQL_SUCCEEDED(SQLFetch(d));)DO(j,k=kK(y)+i;u=ut[t=tb[i]];s=b[i];n=SQL_NULL_DATA==(int)nb[i]; if(!u)jk(k,n?ktn(ct[t]?KC:KG,0):wb[i]?kp(s):gb(d,(H)(i+1),t)); else ja(k,n?nu(u):u==KH&&wb[i]==1?(t=(H)*s,(S)&t):u==KS?(s=dtb(s,nb[i]),(S)&s):u<KD?s:u==KZ?(f=ds(s)+(vs(s+6)+*(I*)(s+12)/1e9)/8.64e4,(S)&f):(w=u==KD?ds(s):vs(s),(S)&w))) if(!SQLMoreResults(d))O("more\n");DO(j,if(wb[i])free(b[i]))R free(b),free(tb),free(wb),free(nb),d0(d),xT(xD(x,y));}
double CAAngleRestraint::unprotected_evaluate(DerivativeAccumulator *) const { Model *m = get_model(); core::XYZ d0(m, p_[0]); core::XYZ d1(m, p_[1]); core::XYZ d2(m, p_[2]); double phi0 = core::internal::angle(d0, d1, d2, nullptr, nullptr, nullptr); int index = get_closest(phi0_,phi0); return score_[index]; }
string Particle::toString() const { stringstream out; out << "Particle information" << "\n"; out << setw(30) << "energy" << setw(30) << "px" << setw(30) << "py" << setw(30) << "pz" << "\n"; out << setw(30) << energy() << setw(30) << px() << setw(30) << py() << setw(30) << pz() << "\n"; out << setw(30) << "phi" << setw(30) << "eta" << setw(30) << "theta" << setw(30) << " " << "\n"; out << setw(30) << phi() << setw(30) << eta() << setw(30) << theta() << setw(30) << " " << "\n"; out << setw(30) << "momentum" << setw(30) << "E_T" << setw(30) << "p_T" << setw(30) << " " << "\n"; out << setw(30) << momentum() << setw(30) << et() << setw(30) << pt() << setw(30) << " " << "\n"; out << setw(30) << "m_dyn" << setw(30) << "m_fix" << setw(30) << "charge" << setw(30) << " " << "\n"; out << setw(30) << massFromEnergyAndMomentum() << setw(30) << mass() << setw(30) << charge() << setw(30) << " " << "\n"; out << setw(30) << "d0 =" << setw(30) << "d0_bs" << setw(30) << " " << setw(30) << " " << "\n"; out << setw(30) << d0() << setw(30) << d0_wrtBeamSpot() << setw(30) << " " << setw(30) << " " << "\n"; return out.str(); }
bool Decimal::operator< (const tntdb::Decimal& other) const { tntdb::Decimal d0(*this); tntdb::Decimal d1(other); if (!d0.isPositive() && d1.isPositive()) return true; if (d0.isPositive() && !d1.isPositive()) return false; if( d0.exponent > d1.exponent) { while( d0.exponent != d1.exponent) { if( d0.mantissa > std::numeric_limits<MantissaType>::max()/10) return !d0.isPositive(); d0.mantissa *= 10; --d0.exponent; } } else if( d0.exponent < d1.exponent) { while( d0.exponent != d1.exponent) { if( d1.mantissa > std::numeric_limits<MantissaType>::max()/10) return !d1.isPositive(); d1.mantissa *= 10; --d1.exponent; } } if (d0.exponent < d1.exponent) return d0.isPositive(); if (d0.exponent > d1.exponent) return !d0.isPositive(); if (d0.mantissa < d1.mantissa) return d0.isPositive(); if (d0.mantissa > d1.mantissa) return !d0.isPositive(); return false; }
int main( void ) { BASE b0( 1, 1 ); // change to no arg.s later BASE b1( 3, 4 ); b0.check( 1, 1, "BASE() failed" ); b1.check( 3, 4, "BASE( 3, 4 ) failed" ); b0 = b1; b0.check( 3, 4, "b0 = b1 failed" ); DERIVED d0( 1, 1 ); // change to no arg.s later DERIVED d1( 5, 6 ); d0.check( 1, 1, "DERIVED() failed" ); d1.check( 5, 6, "DERIVED( 3, 4 ) failed" ); d0 = d1; d0.check( 5, 6, "d0 = d1 failed" ); { DTOR dummy; dtor_called = FALSE; dummy = dummy; } if( ! dtor_called ) { printf( "DTOR not dtor'd properly\n" ); ++ error_count; } { class DTOR_DERIVED : public DTOR { public: int x; }; DTOR_DERIVED dummy; dtor_called = FALSE; dummy.x = 18; } if( ! dtor_called ) { printf( "DTOR not dtor'd properly\n" ); ++ error_count; } if( error_count == 0 ) { printf( "CHKCL -- passed all tests\n\n" ); } else { printf( "CHKCL -- %d errors noted\n\n", error_count ); } return( error_count != 0 ); }
double Trajectory::calcVelCoherence( const _2Real::Vec2 &p, double time, double velTolerance ) const { if( !this->canCalcVelCoherence() ) throw _2Real::Exception( "cannot calc velocity coherence -- not enough values" ); _2Real::Vec2 d0( m_v1 - m_v0 ); _2Real::Vec2 d1( p - m_v1 ); double len0 = std::max<double>( d0.norm(), velTolerance ); double len1 = std::max<double>( d1.norm(), velTolerance ); double dt = time - m_prevTime; double compVel = 0.0; compVel = 1.0 - 2.0 * std::sqrt( len0 * len1 ) / ( len0 + len1 ); return compVel / ( dt * 30.0 ); }
static inline bool flat_enough_angular(const BezierPath::Point& p0, const BezierPath::Point& p1, const BezierPath::Point& p2, const BezierPath::Point& p3, float proj) { vector2d<BezierPath::Coord> d0(p1-p0); vector2d<BezierPath::Coord> d1(p3-p2); vector2d<BezierPath::Coord> d(p3-p0); d /= norm(d); d0 /= norm(d0); if (abs(dot(d,d0)) > proj) return false; d1 /= norm(d1); if (abs(dot(d,d1)) > proj) return false; return true; }
void test() { //Single element deleter { reset_counters(); bml::default_delete<B> d2; bml::default_delete<A> d1; d1 = d2; A* p = new B; BOOST_TEST(A::count == 1); BOOST_TEST(B::count == 1); d1(p); BOOST_TEST(A::count == 0); BOOST_TEST(B::count == 0); } //Array element deleter { reset_counters(); bml::default_delete<A[]> d2; bml::default_delete<const A[]> d1; d1 = d2; const A* p = new const A[2]; BOOST_TEST(A::count == 2); d1(p); BOOST_TEST(A::count == 0); } //Bounded array element deleter { reset_counters(); bml::default_delete<A[2]> d2; bml::default_delete<const A[2]> d1; d1 = d2; const A* p = new const A[2]; BOOST_TEST(A::count == 2); d1(p); bml::default_delete<const A[]> d0; d0 = d1; d0(0); BOOST_TEST(A::count == 0); } }
TEST(DictionaryEquality, SimpleValues) { AmfDictionary d0(true), d1(true), d2(false); d0.insert(AmfInteger(0), AmfString("foo")); d1.insert(AmfInteger(0), AmfString("foo")); d2.insert(AmfInteger(0), AmfString("foo")); EXPECT_EQ(d0, d1); EXPECT_NE(d0, d2); d0.insert(AmfString("qux"), AmfByteArray(v8 { 0x00 })); EXPECT_NE(d0, d1); d1.insert(AmfString("qux"), AmfByteArray(v8 { 0x00 })); EXPECT_EQ(d0, d1); d0.insert(AmfNull(), AmfUndefined()); d1.insert(AmfUndefined(), AmfNull()); EXPECT_NE(d0, d1); d0.insert(AmfUndefined(), AmfNull()); d1.insert(AmfNull(), AmfUndefined()); EXPECT_EQ(d0, d1); }
/* Apply the restraint to two atoms, two Scales, one experimental value. */ double NOERestraint::unprotected_evaluate(DerivativeAccumulator *accum) const { core::XYZ d0(p0_); core::XYZ d1(p1_); Scale sigma_nuis(sigma_); Scale gamma_nuis(gamma_); /* compute Icalc */ algebra::Vector3D c0 = d0.get_coordinates(); algebra::Vector3D c1 = d1.get_coordinates(); double diff = (c0-c1).get_magnitude(); double gamma_val=gamma_nuis.get_scale(); double sigma_val=sigma_nuis.get_scale(); double Icalc = gamma_val*pow(diff,-6); /* compute all arguments to FNormal */ double FA = log(Vexp_); double FM = log(Icalc); double JA = 1.0/Vexp_; IMP_NEW(FNormal, lognormal, (FA,JA,FM,sigma_val)); //lognormal->set_was_used(true); // get rid of warning /* get score */ double score= lognormal->evaluate(); const_cast<NOERestraint *>(this)->set_chi(FA-FM); if (accum) { /* derivative for coordinates */ double DFM = lognormal->evaluate_derivative_FM(); double factor = -6/diff; /* d(log(gamma*pow(diff,-6)))/d(diff) */ algebra::Vector3D deriv = DFM*factor*(c0-c1)/diff; d0.add_to_derivatives(deriv, *accum); d1.add_to_derivatives( -deriv, *accum); /* derivative for sigma */ sigma_nuis.add_to_scale_derivative( lognormal-> evaluate_derivative_sigma(), *accum); /* derivative for gamma */ gamma_nuis.add_to_scale_derivative(DFM/gamma_val, *accum); } return score; }
static inline bool flat_enough(const BezierPath::Point& p0, const BezierPath::Point& p1, const BezierPath::Point& p2, const BezierPath::Point& p3, BezierPath::Coord flat2) { static const BezierPath::Coord epsilon = 0.001; vector2d<BezierPath::Coord> d(p3-p0); vector2d<BezierPath::Coord> d0(p1-p0); vector2d<BezierPath::Coord> d1(p3-p2); double n = norm(d); if (n < epsilon) { return sqr(norm(d0)) < flat2 && sqr(norm(d1)) < flat2; } if (dist2(p0,p3,p1) > flat2) return false; if (dist2(p0,p3,p2) > flat2) return false; return true; }
/** \param[in] accum If not nullptr, use this object to accumulate partial first derivatives. \return Current score. */ double TALOSRestraint::unprotected_evaluate(DerivativeAccumulator *accum) const { core::XYZ d0(p_[0]); core::XYZ d1(p_[1]); core::XYZ d2(p_[2]); core::XYZ d3(p_[3]); Scale kappascale(kappa_); double kappaval = kappascale.get_scale(); // get angle algebra::VectorD<3> derv0, derv1, derv2, derv3; double angle; if (accum) { angle = core::internal::dihedral(d0, d1, d2, d3, &derv0, &derv1, &derv2, &derv3); } else { angle = core::internal::dihedral(d0, d1, d2, d3, nullptr, nullptr, nullptr, nullptr); } // score current angle mises_->set_x(angle); mises_->set_kappa(kappaval); double score = mises_->evaluate(); // store derivatives if necessary if (accum) { double deriv = mises_->evaluate_derivative_x(); d0.add_to_derivatives(derv0 * deriv, *accum); d1.add_to_derivatives(derv1 * deriv, *accum); d2.add_to_derivatives(derv2 * deriv, *accum); d3.add_to_derivatives(derv3 * deriv, *accum); kappascale.add_to_scale_derivative(mises_->evaluate_derivative_kappa(), *accum); } return score; }
/* Apply the restraint to two atoms, two Scales, one experimental value. */ double AmbiguousNOERestraint::unprotected_evaluate(DerivativeAccumulator *accum) const { IMP_OBJECT_LOG; IMP_USAGE_CHECK(get_model(), "You must at least register the restraint with the model" << " before calling evaluate."); /* compute Icalc = 1/(gamma*d^6) where d = (sum d_i^-6)^(-1/6) */ double vol = 0; Floats vols; IMP_CONTAINER_FOREACH(PairContainer, pc_, { core::XYZ d0(get_model(), _1[0]); core::XYZ d1(get_model(), _1[1]); algebra::Vector3D c0 = d0.get_coordinates(); algebra::Vector3D c1 = d1.get_coordinates(); //will raise an error if c0 == c1 double tmp = 1.0/(c0-c1).get_squared_magnitude(); vols.push_back(IMP::cube(tmp)); // store di^-6 vol += vols.back(); });
double RepulsiveDistancePairScore::evaluate_index( kernel::Model *m, const kernel::ParticleIndexPair &p, DerivativeAccumulator *da) const { core::XYZR d0(m, p[0]), d1(m, p[1]); algebra::VectorD<3> delta; for (int i = 0; i < 3; ++i) { delta[i] = d0.get_coordinate(i) - d1.get_coordinate(i); } double distance2 = delta.get_squared_magnitude(); double distance = std::sqrt(distance2); double target = x0_ + d0.get_radius() + d1.get_radius(); double shifted_distance = distance - target; if (shifted_distance > 0) return 0; double energy = .5 * k_ * pow(shifted_distance, 4); if (da) { double deriv = 4 * energy / shifted_distance; algebra::Vector3D uv = delta / distance; d0.add_to_derivatives(uv * deriv, *da); d1.add_to_derivatives(-uv * deriv, *da); } return energy; }
Float ExamplePairScore::evaluate_index(kernel::Model *m, const kernel::ParticleIndexPair &pip, DerivativeAccumulator *da) const { // turn on logging for this method IMP_OBJECT_LOG; // assume they have coordinates core::XYZ d0(m, pip[0]); core::XYZ d1(m, pip[1]); // log something double diff = (d0.get_coordinates() - d1.get_coordinates()).get_magnitude() - x0_; IMP_LOG_VERBOSE("The distance off from x0 is " << diff << std::endl); double score = .5 * k_ * square(diff); if (da) { // derivatives are requested algebra::Vector3D delta = d0.get_coordinates() - d1.get_coordinates(); algebra::Vector3D udelta = delta.get_unit_vector(); double dv = k_ * diff; // add to the particle derivatives d0.add_to_derivatives(udelta * dv, *da); d1.add_to_derivatives(-udelta * dv, *da); } return score; }
void osgToy::Polyhedron::addTristrip( const osg::Vec3& u0, const osg::Vec3& u1, const osg::Vec3& v0, const osg::Vec3& v1, unsigned int numQuads ) { osg::Vec3Array* vAry = dynamic_cast<osg::Vec3Array*>( getVertexArray() ); int start = vAry->size(); vAry->push_back( u0 ); vAry->push_back( u1 ); osg::Vec3 d0( v0 - u0 ); osg::Vec3 d1( v1 - u1 ); for( unsigned int i = 1; i < numQuads; ++i ) { float s = float(i) / numQuads; vAry->push_back( u0 + d0 * s ); vAry->push_back( u1 + d1 * s ); } vAry->push_back( v0 ); vAry->push_back( v1 ); int count = vAry->size() - start; addPrimitiveSet( new osg::DrawArrays( GL_TRIANGLE_STRIP, start, count ) ); }
double Trajectory::calcDirCoherence( const _2Real::Vec2 &p, double time, double dirTolerance ) const { if( !this->canCalcDirCoherence() ) throw _2Real::Exception( "cannot calc directional coherence -- not enough values" ); _2Real::Vec2 d0( m_v1 - m_v0 ); _2Real::Vec2 d1( p - m_v1 ); double len0 = d0.norm(); double len1 = d1.norm(); double dt = time - m_prevTime; double compDir = 0.0; if( len0 * len1 > std::numeric_limits<double>::epsilon() ) { if( len0 > dirTolerance && len1 > dirTolerance ) compDir = ( 1.0 - ( d0.dot( d1 ) / ( len0 * len1 ) ) ) * 0.5f; else compDir = 0.5f; //cannot make a decision -> tie value. } return compDir / ( dt * 30.0 ); }
double CTestApp::PreciseStringToDouble(const CTempStringEx& s0) { double best_ret = NStr::StringToDouble(s0); CDecimal d0(s0); CDecimal best_err = CDecimal(best_ret, 24)-d0, first_err = best_err; if ( best_err.m_Sign == 0 ) { return best_ret; } double last_v = best_ret; double toward_v; if ( (last_v > 0) == (first_err.m_Sign > 0) ) { toward_v = 0; } else { toward_v = last_v*2; } //LOG_POST(s0<<" err: "<<best_err); for ( ;; ) { double v = GetNextToward(last_v, toward_v); CDecimal err = CDecimal(v, 24)-d0; //LOG_POST(" new err: "<<err); if ( (abs(err) - abs(best_err)).m_Sign < 0 ) { if ( err.m_Sign == 0 ) { return v; } best_err = err; best_ret = v; } if ( (err.m_Sign > 0) != (first_err.m_Sign > 0) ) { break; } last_v = v; } //LOG_POST(s0<<" correct bits: "<<log(fabs(best_ret/best_err))/log(2.)); return best_ret; }
tmp<fvVectorMatrix> surfaceShearForce::correct(volVectorField& U) { // local reference to film model const kinematicSingleLayer& film = static_cast<const kinematicSingleLayer&>(owner_); // local references to film fields const volScalarField& mu = film.mu(); const volVectorField& Uw = film.Uw(); const volScalarField& delta = film.delta(); const volVectorField& Up = film.UPrimary(); // film surface linear coeff to apply to velocity tmp<volScalarField> tCs; typedef compressible::turbulenceModel turbModel; if (film.primaryMesh().foundObject<turbModel>("turbulenceProperties")) { // local reference to turbulence model const turbModel& turb = film.primaryMesh().lookupObject<turbModel>("turbulenceProperties"); // calculate and store the stress on the primary region const volSymmTensorField primaryReff(turb.devRhoReff()); // create stress field on film // - note boundary condition types (mapped) // - to map, the field name must be the same as the field on the // primary region volSymmTensorField Reff ( IOobject ( primaryReff.name(), film.regionMesh().time().timeName(), film.regionMesh(), IOobject::NO_READ, IOobject::NO_WRITE ), film.regionMesh(), dimensionedSymmTensor ( "zero", primaryReff.dimensions(), symmTensor::zero ), film.mappedFieldAndInternalPatchTypes<symmTensor>() ); // map stress from primary region to film region Reff.correctBoundaryConditions(); dimensionedScalar U0("SMALL", U.dimensions(), SMALL); tCs = Cf_*mag(-film.nHat() & Reff)/(mag(Up - U) + U0); } else { // laminar case - employ simple coeff-based model const volScalarField& rho = film.rho(); tCs = Cf_*rho*mag(Up - U); } dimensionedScalar d0("SMALL", delta.dimensions(), SMALL); // linear coeffs to apply to velocity const volScalarField& Cs = tCs(); volScalarField Cw("Cw", mu/(0.3333*(delta + d0))); Cw.min(1.0e+06); return ( - fvm::Sp(Cs, U) + Cs*Up // surface contribution - fvm::Sp(Cw, U) + Cw*Uw // wall contribution ); }
void dpps::Pattern_random::generate () { // We have do declare them all. We cannot do a switch of if, // because distributions would get out of scope. // The only other possibility would be to place the switch inside // the loop, but it would be very inefficient to call the constructor // everytime. // So we have to pay for overhead and initialize them all in te beginning. // It's probably a negligible amount of time and memory as compared to the // loop. // signification of parametres depends on the engine. Some engines // need only one parametre. // For all user-changeable values (p1 and p2), 1.0 would have been the // default if we had not specified it, so it is an acceptable value for the // user as well. const double p1 {pattern_settings. p1} ; const double p2 {pattern_settings. p2} ; std::uniform_real_distribution<double> d0 (-p1, p1) ; // min, max // Normal-type std::normal_distribution<double> d1 (0.0, p1) ; // average = 0, sigma std::lognormal_distribution<double> d2 (0.0, p1) ; // average = 0, m std::chi_squared_distribution<double> d3 (p1) ; // n std::cauchy_distribution<double> d4 (p1, p2) ; // a, b std::fisher_f_distribution<double> d5 (p1, p2) ; // m, n std::student_t_distribution<double> d6 (p1) ; // n // Poisson-type std::exponential_distribution<double> d7 (p1) ; // lambda std::gamma_distribution<double> d8 (p1, p2) ; // alpha, beta std::weibull_distribution<double> d9 (p1, p2) ; // a, b Polyline p ; p. closed = true ; double x {0.0}, y {0.0} ; long_unsigned_int i {0}, attempts {0} ; //const double s {pattern_settings. side / 2.0} ; if (pattern_settings. max_attempts < pattern_settings. number) pattern_settings. max_attempts = std::numeric_limits<long_unsigned_int>::max () ; //std::cout << "hello" << i << " " << pattern_settings. number << " " << attempts << " " << pattern_settings. max_attempts << std::endl ; while ((i < pattern_settings. number) && (attempts < pattern_settings. max_attempts)) { bool overlap = false ; switch (pattern_settings. type) { case type_uniform_real_distribution: // 0 x = d0 (pseudorandom_generator) ; y = d0 (pseudorandom_generator) ; //std::cout << "alea 0 : " << x << " " << y << std::endl ; break ; case type_normal_distribution: // 1 x = d1 (pseudorandom_generator) ; y = d1 (pseudorandom_generator) ; break ; case type_lognormal_distribution: // 2 x = d2 (pseudorandom_generator) ; y = d2 (pseudorandom_generator) ; break ; case type_chi_squared_distribution: // 3 x = d3 (pseudorandom_generator) ; y = d3 (pseudorandom_generator) ; break ; case type_cauchy_distribution: // 4 x = d4 (pseudorandom_generator) ; y = d4 (pseudorandom_generator) ; break ; case type_fisher_f_distribution: // 5 x = d5 (pseudorandom_generator) ; y = d5 (pseudorandom_generator) ; break ; case type_student_t_distribution: // 6 x = d6 (pseudorandom_generator) ; y = d6 (pseudorandom_generator) ; break ; case type_exponential_distribution: // 7 x = d7 (pseudorandom_generator) ; y = d7 (pseudorandom_generator) ; break ; case type_gamma_distribution: // 8 x = d8 (pseudorandom_generator) ; y = d8 (pseudorandom_generator) ; break ; case type_weibull_distribution: // 9 x = d9 (pseudorandom_generator) ; y = d9 (pseudorandom_generator) ; break ; default: break ; } if (pattern_settings. avoid_overlap) { for (long_unsigned_int j = 0 ; j < polylines. size () ; j++) { if ((polylines[j]. vertices[0] - Vertex(x, y)).norm2_square() < pattern_settings. diametre*pattern_settings. diametre) { overlap = true ; break ; } } } bool within_limits = (((fabs (x - pattern_settings. x0) < pattern_settings. lx / 2.0) || (pattern_settings. lx <= 0)) && ((fabs (y - pattern_settings. y0) < pattern_settings. ly / 2.0) || (pattern_settings. ly <= 0))) ; // Either we are inside lx, or user set it to negative to disable it. if (within_limits && !overlap) { p. push_back (Vertex (x, y)) ; p. dose = pattern_settings. diametre ; polylines. push_back (p) ; p. vertices. clear () ; // does not change that p. closed == true ; i++ ; } attempts++ ; } }
void CoreCloseBipartitePairContainer::do_before_evaluate() { IMP_OBJECT_LOG; if (covers_[0]==base::get_invalid_index<ParticleIndexTag>() || algebra::get_distance(get_model()->get_sphere(covers_[0]), get_model()->get_sphere(covers_[1])) < distance_ || reset_) { if (!reset_ && were_close_ && !internal::get_if_moved(get_model(), slack_, xyzrs_[0], rbs_[0], constituents_, rbs_backup_[0], xyzrs_backup_[0]) && !internal::get_if_moved(get_model(), slack_, xyzrs_[1], rbs_[1], constituents_, rbs_backup_[1], xyzrs_backup_[1])){ // all ok } else { // rebuild IMP_LOG(TERSE, "Recomputing bipartite close pairs list." << std::endl); internal::reset_moved(get_model(), xyzrs_[0], rbs_[0], constituents_, rbs_backup_[0], xyzrs_backup_[0]); internal::reset_moved(get_model(), xyzrs_[1], rbs_[1], constituents_, rbs_backup_[1], xyzrs_backup_[1]); ParticleIndexPairs pips; internal::fill_list(get_model(), access_pair_filters(), key_, 2*slack_+distance_, xyzrs_, rbs_, constituents_, pips); reset_=false; update_list(pips); } were_close_=true; } else { ParticleIndexPairs none; update_list(none); } IMP_IF_CHECK(base::USAGE_AND_INTERNAL) { for (unsigned int i=0; i< sc_[0]->get_number_of_particles(); ++i) { XYZR d0(sc_[0]->get_particle(i)); for (unsigned int j=0; j< sc_[1]->get_number_of_particles(); ++j) { XYZR d1(sc_[1]->get_particle(j)); double dist = get_distance(d0, d1); if (dist < .9*distance_) { ParticleIndexPair pip(d0.get_particle_index(), d1.get_particle_index()); bool filtered=false; for (unsigned int i=0; i< get_number_of_pair_filters(); ++i) { if (get_pair_filter(i)->get_value_index(get_model(), pip)) { filtered=true; break; } } IMP_INTERNAL_CHECK(filtered|| std::find(get_access().begin(), get_access().end(), pip) != get_access().end(), "Pair " << pip << " not found in list with coordinates " << d0 << " and " << d1 << " list is " << get_access()); } } } } }
void CTestApp::RunPrecisionBenchmark(void) { const CArgs& args = GetArgs(); const int COUNT = args["count"].AsInteger(); double threshold = args["threshold"].AsDouble(); const int kCallPosix = 0; const int kCallPosixOld = 1; const int kCallstrtod = 2; int call_type = kCallPosix; if ( args["precision"].AsString() == "Posix" ) { call_type = kCallPosix; } if ( args["precision"].AsString() == "PosixOld" ) { call_type = kCallPosixOld; } if ( args["precision"].AsString() == "strtod" ) { call_type = kCallstrtod; } char str[200]; char* errptr = 0; const int MAX_DIGITS = 24; typedef map<int, int> TErrCount; int err_close = 0; TErrCount err_count; for ( int test = 0; test < COUNT; ++test ) { { int digits = 1+rand()%MAX_DIGITS; int exp = rand()%600-300; char* ptr = str; if ( rand()%1 ) *ptr++ = '-'; *ptr++ = '.'; for ( int i = 0; i < digits; ++i ) { *ptr++ = '0'+rand()%10; } sprintf(ptr, "e%d", exp); } double v_ref = PreciseStringToDouble(str); errno = 0; double v = 0; switch ( call_type ) { case kCallPosix: v = NStr::StringToDoublePosix(str, &errptr); break; case kCallPosixOld: v = StringToDoublePosixOld(str, &errptr); break; case kCallstrtod: v = strtod(str, &errptr); break; } if ( errno||(errptr&&(*errptr||errptr==str)) ) { // error ERR_POST("Failed to convert: "<< str); err_count[-1] += 1; continue; } if ( v == v_ref ) { continue; } CDecimal d0(str); CDecimal d_ref(v_ref, 24); CDecimal d_v(v, 24); int exp_shift = 0; if ( d0.m_Exponent > 200 ) exp_shift = -100; if ( d0.m_Exponent < -200 ) exp_shift = 100; double err_ref = fabs((d_ref-d0).ToDouble(exp_shift)); double err_v = fabs((d_v-d0).ToDouble(exp_shift)); if ( err_v <= err_ref*(1+threshold) ) { if ( m_VerboseLevel >= 2 ) { LOG_POST("d_str: "<<d0); LOG_POST("d_ref: "<<d_ref<<" err="<<err_ref); LOG_POST("d_cur: "<<d_v<<" err="<<err_v); } ++err_close; continue; } if ( m_VerboseLevel >= 1 ) { LOG_POST("d_str: "<<d0); LOG_POST("d_ref: "<<d_ref<<" err="<<err_ref); LOG_POST("d_cur: "<<d_v<<" err="<<err_v); } int err = 0; for ( double t = v; t != v_ref; ) { //LOG_POST(setprecision(20)<<t<<" - "<<v_ref<<" = "<<(t-v_ref)); ++err; t = GetNextToward(t, v_ref); } err_count[err] += 1; } NcbiCout << "Close errors: "<<err_close<<"/"<<COUNT << " = " << 1e2*err_close/COUNT<<"%" << NcbiEndl; ITERATE ( TErrCount, it, err_count ) { NcbiCout << "Errors["<<it->first<<"] = "<<it->second<<"/"<<COUNT << " = " << 1e2*it->second/COUNT<<"%" << NcbiEndl; }
template <class P> static void Apply( const GenericImage<P>& image, MultiscaleMedianTransform& T ) { InitializeStructures(); bool statusInitialized = false; StatusMonitor& status = (StatusMonitor&)image.Status(); try { if ( status.IsInitializationEnabled() ) { status.Initialize( String( T.m_medianWaveletTransform ? "Median-wavelet" : "Multiscale median" ) + " transform", image.NumberOfSelectedSamples()*T.m_numberOfLayers*(T.m_medianWaveletTransform ? 2 : 1) ); status.DisableInitialization(); statusInitialized = true; } GenericImage<P> cj0( image ); cj0.Status().Clear(); for ( int j = 1, j0 = 0; ; ++j, ++j0 ) { GenericImage<P> cj( cj0 ); cj.Status() = status; MedianFilterLayer( cj, T.FilterSize( j0 ), T.m_parallel, T.m_maxProcessors ); if ( T.m_medianWaveletTransform ) { GenericImage<P> w0( cj0 ); GenericImage<P> d0( cj0 ); d0 -= cj; for ( int c = 0; c < d0.NumberOfChannels(); ++c ) { w0.SelectChannel( c ); d0.SelectChannel( c ); cj.SelectChannel( c ); double t = T.m_medianWaveletThreshold*d0.MAD( d0.Median() )/0.6745; for ( typename GenericImage<P>::sample_iterator iw( w0 ), id( d0 ), ic( cj ); iw; ++iw, ++id, ++ic ) if ( Abs( *id ) > t ) *iw = *ic; } w0.ResetSelections(); cj.ResetSelections(); w0.Status() = cj.Status(); LinearFilterLayer( w0, T.FilterSize( j0 ), T.m_parallel, T.m_maxProcessors ); cj = w0; } status = cj.Status(); cj.Status().Clear(); if ( T.m_layerEnabled[j0] ) { cj0 -= cj; T.m_transform[j0] = Image( cj0 ); } if ( j == T.m_numberOfLayers ) { if ( T.m_layerEnabled[j] ) T.m_transform[j] = Image( cj ); break; } cj0 = cj; } if ( statusInitialized ) status.EnableInitialization(); } catch ( ... ) { T.DestroyLayers(); if ( statusInitialized ) status.EnableInitialization(); throw; } }