bool StringData::checkSane() const { static_assert(size_t(MaxSize) <= size_t(INT_MAX), "Beware int wraparound"); static_assert(offsetof(StringData, m_count) == FAST_REFCOUNT_OFFSET, "m_count at wrong offset"); static_assert(MaxSmallSize == sizeof(StringData) - offsetof(StringData, m_small) - 1, "layout bustage"); assert(uint32_t(size()) <= MaxSize); assert(uint32_t(capacity()) < MaxSize); assert(size() < capacity()); if (isSmall()) { assert(m_data == m_small && m_len <= MaxSmallSize); } else { assert(m_data && m_data != m_small); } return true; }
bool StringData::checkSane() const { static_assert(sizeof(Format) == 8, "enum Format is wrong size"); static_assert(offsetof(StringData, _count) == FAST_REFCOUNT_OFFSET, "_count at wrong offset"); static_assert(MaxSmallSize == sizeof(StringData) - offsetof(StringData, m_small) - 1, "layout bustage"); ASSERT(uint32_t(size()) <= MaxSize); ASSERT(uint32_t(capacity()) <= MaxSize); ASSERT(size() <= capacity()); ASSERT(rawdata()[size()] == 0); // all strings must be null-terminated if (isSmall()) { ASSERT(m_data == m_small && m_len <= MaxSmallSize); } else { ASSERT(m_data && m_data != m_small); } return true; }
inline double BasisSet::pointS(BasisSet *set, unsigned int moIndex, const double &dr2, unsigned int indexMO) { // If the MO coefficient is very small skip it if (isSmall(set->m_moMatrix.coeffRef(set->m_moIndices[moIndex], indexMO))) { return 0.0; } // S type orbitals - the simplest of the calculations with one component double tmp = 0.0; unsigned int cIndex = set->m_cIndices[moIndex]; for (unsigned int i = set->m_gtoIndices[moIndex]; i < set->m_gtoIndices[moIndex+1]; ++i) { tmp += set->m_gtoCN[cIndex++] * exp(-set->m_gtoA[i] * dr2); } // There is one MO coefficient per S shell basis return tmp * set->m_moMatrix.coeffRef(set->m_moIndices[moIndex], indexMO); }
void Deallocator::processObjectLog() { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); Heap* heap = PerProcess<Heap>::getFastCase(); for (auto object : m_objectLog) { if (isSmall(object)) { SmallLine* line = SmallLine::get(object); heap->derefSmallLine(lock, line); } else { BASSERT(isMedium(object)); MediumLine* line = MediumLine::get(object); heap->derefMediumLine(lock, line); } } m_objectLog.clear(); }
void Deallocator::processObjectLog() { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); for (auto object : m_objectLog) { if (isSmall(object)) { SmallLine* line = SmallLine::get(object); if (!line->deref(lock)) continue; deallocateSmallLine(lock, line); } else { BASSERT(isSmallOrMedium(object)); MediumLine* line = MediumLine::get(object); if (!line->deref(lock)) continue; deallocateMediumLine(lock, line); } } m_objectLog.clear(); }
bool analyseRoughRodRodCollision( const ElasticStrand* sP, const ElasticStrand* sQ, const int iP, const int iQ, Vec3 &depl, Scalar &s, Scalar &t, Scalar &d ) { const CollisionParameters &cpP = sP->collisionParameters(); const CollisionParameters &cpQ = sQ->collisionParameters(); const Vec3 &P0 = sP->getFutureVertex( iP ); const Vec3 &P1 = sP->getFutureVertex( iP + 1 ); const Vec3 &Q0 = sQ->getFutureVertex( iQ ); const Vec3 &Q1 = sQ->getFutureVertex( iQ + 1 ); const Scalar BCRad = cpP.collisionRadius( iP ) + cpQ.collisionRadius( iQ ); Scalar sqDist = SquareDistSegmentToSegment<Vec3, Scalar, Vec3>( P0, P1, Q0, Q1, s, t ); // Required to determnisticity -- are x87 registers sometimes used ? s = ( float ) s; t = ( float ) t; if ( sqDist > BCRad * BCRad ) return false; // see FIXME in DistSegmentToSegment Vec3 PC = ( ( 1. - s ) * P0 + s * P1 ); Vec3 QC = ( ( 1. - t ) * Q0 + t * Q1 ); depl = ( PC - QC ); const Scalar n2depl = depl.squaredNorm(); if( isSmall( n2depl ) ) return false; depl /= std::sqrt( n2depl ); if ( depl.dot( ( P1 - P0 ).normalized() ) > COS_PARALLEL_ENOUGH || depl.dot( ( Q1 - Q0 ).normalized() ) < -COS_PARALLEL_ENOUGH ) return false; d = std::sqrt( sqDist ); return true; }
HOT_FUNC void StringData::releaseDataSlowPath() { assert(!isSmall()); assert(checkSane()); auto const loadedMode = mode(); if (LIKELY(loadedMode == Mode::Smart)) { smart_free(m_data); return; } if (loadedMode == Mode::Shared) { assert(checkSane()); m_big.shared->decRef(); delist(); return; } assert(loadedMode == Mode::Malloc); assert(checkSane()); free(m_data); }
SkRect getFilterRect() const { return isSmall() ? SkRect::MakeWH(FILTER_WIDTH_SMALL, FILTER_HEIGHT_SMALL) : SkRect::MakeWH(FILTER_WIDTH_LARGE, FILTER_HEIGHT_LARGE); }
const char* onGetName() override { return isSmall() ? "colorfilter_dim_bright_small" : "colorfilter_dim_bright_large"; }
const char* onGetName() override { return isSmall() ? "colorfilter_gray_small" : "colorfilter_gray_large"; }
void ImplicitStepper::solveNonLinear() { StrandDynamics& dynamics = m_strand.dynamics() ; Scalar minErr = 1.e99, prevErr = 1.e99; m_newtonIter = 0; JacobianMatrixType bestLHS; VecXx bestRhs, prevRhs; Scalar alpha = 0.5; // Current step length const Scalar minAlpha = 0.1; // Minimum step length bool foundOneSPD = false; m_strand.requireExactJacobian( false ); // Newton loop -- try to zero-out m_rhs for( m_newtonIter = 0; m_newtonIter < m_params.m_maxNewtonIterations; ++m_newtonIter ) { dynamics.setDisplacements( m_dt * m_futureVelocities ); prevRhs = m_rhs; computeRHS(); if( m_newtonIter ) { VecXx residual = m_rhs; dynamics.getScriptingController()->fixRHS( residual ); const Scalar err = residual.squaredNorm() / residual.size(); if( err < minErr || ( !foundOneSPD && !m_linearSolver.notSPD() ) ) { foundOneSPD = !m_linearSolver.notSPD(); minErr = err; if( isSmall( err ) || ( m_newtonIter > 3 && minErr < 1.e-6 ) ) { m_rhs = prevRhs; break; } bestLHS = Lhs(); bestRhs = prevRhs; } // Decrease or increase the step length based on current convergence if( err < prevErr ){ alpha = std::min( 1.0, 1.5 * alpha ); } else{ alpha = std::max( minAlpha, 0.5 * alpha ); } prevErr = err; } computeLHS(); m_rhs = m_rhs * alpha; Lhs().multiply( m_rhs, 1., m_futureVelocities ); dynamics.getScriptingController()->fixLHSAndRHS( Lhs(), m_rhs, m_dt ); m_linearSolver.store( Lhs() ); m_notSPD = m_linearSolver.notSPD(); m_linearSolver.solve( m_futureVelocities, m_rhs ); } // If the non-linear solve failed, returns to the the least problematic step if( m_newtonIter == m_params.m_maxNewtonIterations ) { m_rhs = bestRhs; Lhs() = bestLHS; m_linearSolver.store( Lhs() ); m_linearSolver.solve( m_futureVelocities, rhs() ); m_notSPD = m_linearSolver.notSPD(); } m_usedNonlinearSolver = true; }
void StringData::append(const char *s, int len) { ASSERT(!isStatic()); // never mess around with static strings! if (len == 0) return; if (UNLIKELY(uint32_t(len) > MaxSize)) { throw InvalidArgumentException("len>=2^30", len); } if (UNLIKELY(len + m_len > MaxSize)) { throw FatalErrorException(0, "String length exceeded 2^30 - 1: %u", len + m_len); } int newlen; // TODO: t1122987: in any of the cases below where we need a bigger buffer, // we can probably assume we're in a concat-loop and pick a good buffer // size to avoid O(N^2) copying cost. if (isShared() || isLiteral()) { // buffer is immutable, don't modify it. // We are mutating, so we don't need to repropagate our own taint StringSlice r = slice(); char* newdata = string_concat(r.ptr, r.len, s, len, newlen); if (isShared()) m_big.shared->decRef(); m_len = newlen; m_data = newdata; m_big.cap = newlen | IsMalloc; m_hash = 0; } else if (rawdata() == s) { // appending ourself to ourself, be conservative. // We are mutating, so we don't need to repropagate our own taint StringSlice r = slice(); char *newdata = string_concat(r.ptr, r.len, s, len, newlen); releaseData(); m_len = newlen; m_data = newdata; m_big.cap = newlen | IsMalloc; m_hash = 0; } else if (isSmall()) { // we're currently small but might not be after append. // We are mutating, so we don't need to repropagate our own taint int oldlen = m_len; newlen = oldlen + len; if (unsigned(newlen) <= MaxSmallSize) { // win. memcpy(&m_small[oldlen], s, len); m_small[newlen] = 0; m_small[MaxSmallSize] = 0; m_len = newlen; m_data = m_small; m_hash = 0; } else { // small->big string transition. char *newdata = string_concat(m_small, oldlen, s, len, newlen); m_len = newlen; m_data = newdata; m_big.cap = newlen | IsMalloc; m_hash = 0; } } else { // generic "big string concat" path. realloc buffer. int oldlen = m_len; char* oldp = m_data; ASSERT((oldp > s && oldp - s > len) || (oldp < s && s - oldp > oldlen)); // no overlapping newlen = oldlen + len; char* newdata = (char*) realloc(oldp, newlen + 1); memcpy(newdata + oldlen, s, len); newdata[newlen] = 0; m_len = newlen; m_data = newdata; m_big.cap = newlen | IsMalloc; m_hash = 0; } ASSERT(uint32_t(newlen) <= MaxSize); TAINT_OBSERVER_REGISTER_MUTATED(m_taint_data, rawdata()); ASSERT(checkSane()); }
const char* onGetName() override { return isSmall() ? "displacement_full_small" : "displacement_full_large"; }