// @mfunc Set the value of this <c OMWeakObjectReference>. // The value is a pointer to the referenced <c OMStorable>. // @parm TBS // @parm A pointer to the new <c OMStorable>. // @rdesc A pointer to previous <c OMStorable>, if any. OMStorable* OMWeakObjectReference::setValue( const void* identification, const OMStorable* value) { TRACE("OMWeakObjectReference::setValue"); PRECONDITION("Valid container property", _property != 0); PRECONDITION("Valid identification", (_identification != 0) && (_identificationSize > 0)); PRECONDITION("Valid new identification", identification != 0); ASSERT("Valid identification", IMPLIES(value != 0, !isNullIdentification(identification, _identificationSize))); ASSERT("Valid identification", IMPLIES(value == 0, !isNullIdentification(identification, _identificationSize))); OMStorable* oldObject = _pointer; _pointer = const_cast<OMStorable*>(value); memcpy(_identification, identification, _identificationSize); #if defined(OM_VALIDATE_WEAK_REFERENCES) #if 0 ASSERT("Consistent source and target", IMPLIES(_pointer != 0, set()->contains(_identification))); #endif #endif POSTCONDITION("Element properly set", _pointer == value); return oldObject; }
void TypeConstraint::init() { if (UNLIKELY(s_typeNamesToTypes.empty())) { const struct Pair { const StringData* name; Type type; } pairs[] = { { makeStaticString("HH\\bool"), { KindOfBoolean, MetaType::Precise }}, { makeStaticString("HH\\int"), { KindOfInt64, MetaType::Precise }}, { makeStaticString("HH\\float"), { KindOfDouble, MetaType::Precise }}, { makeStaticString("HH\\string"), { KindOfString, MetaType::Precise }}, { makeStaticString("array"), { KindOfArray, MetaType::Precise }}, { makeStaticString("HH\\resource"), { KindOfResource, MetaType::Precise }}, { makeStaticString("HH\\num"), { KindOfDouble, MetaType::Number }}, { makeStaticString("self"), { KindOfObject, MetaType::Self }}, { makeStaticString("parent"), { KindOfObject, MetaType::Parent }}, { makeStaticString("callable"), { KindOfObject, MetaType::Callable }}, }; for (unsigned i = 0; i < sizeof(pairs) / sizeof(Pair); ++i) { s_typeNamesToTypes[pairs[i].name] = pairs[i].type; } } if (isTypeVar()) { // We kept the type variable type constraint to correctly check child // classes implementing abstract methods or interfaces. m_type.dt = KindOfInvalid; m_type.metatype = MetaType::Precise; return; } if (m_typeName == nullptr) { m_type.dt = KindOfInvalid; m_type.metatype = MetaType::Precise; return; } Type dtype; TRACE(5, "TypeConstraint: this %p type %s, nullable %d\n", this, m_typeName->data(), isNullable()); auto const mptr = folly::get_ptr(s_typeNamesToTypes, m_typeName); if (mptr) dtype = *mptr; if (!mptr || !(isHHType() || dtype.dt == KindOfArray || dtype.metatype == MetaType::Parent || dtype.metatype == MetaType::Self || dtype.metatype == MetaType::Callable)) { TRACE(5, "TypeConstraint: this %p no such type %s, treating as object\n", this, m_typeName->data()); m_type = { KindOfObject, MetaType::Precise }; m_namedEntity = Unit::GetNamedEntity(m_typeName); TRACE(5, "TypeConstraint: NamedEntity: %p\n", m_namedEntity); return; } m_type = dtype; assert(m_type.dt != KindOfStaticString); assert(IMPLIES(isParent(), m_type.dt == KindOfObject)); assert(IMPLIES(isSelf(), m_type.dt == KindOfObject)); assert(IMPLIES(isCallable(), m_type.dt == KindOfObject)); }
// @mfunc Equality. // This operator provides value semantics for <c OMSet>. // This operator does not provide equality of object references. // @parm The <c OMStrongReferenceSetElement> to be compared. // @rdesc True if the values are the same, false otherwise. bool OMStrongReferenceSetElement::operator== ( const OMStrongReferenceSetElement& rhs) const { TRACE("OMStrongReferenceSetElement::operator=="); bool result; if ((_identification != 0) && (rhs._identification != 0)) { if (memcmp(_identification, rhs._identification, _identificationSize) == 0) { result = true; } else { result = false; } } else if ((_identification == 0) && (rhs._identification == 0)) { result = true; } else { result = false; } ASSERT("Consistent", IMPLIES(result, _referenceCount == rhs._referenceCount)); #if defined (OM_DEBUG) bool check = OMStrongReferenceVectorElement::operator==(rhs); #endif ASSERT("Consistent", IMPLIES(result, check)); return result; }
void aom_highbd_blend_a64_vmask_c(uint8_t *dst_8, uint32_t dst_stride, const uint8_t *src0_8, uint32_t src0_stride, const uint8_t *src1_8, uint32_t src1_stride, const uint8_t *mask, int h, int w, int bd) { int i, j; uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8); const uint16_t *src0 = CONVERT_TO_SHORTPTR(src0_8); const uint16_t *src1 = CONVERT_TO_SHORTPTR(src1_8); (void)bd; assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); assert(h >= 1); assert(w >= 1); assert(IS_POWER_OF_TWO(h)); assert(IS_POWER_OF_TWO(w)); assert(bd == 8 || bd == 10 || bd == 12); for (i = 0; i < h; ++i) { const int m = mask[i]; for (j = 0; j < w; ++j) { dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], src1[i * src1_stride + j]); } } }
void prepareForNextHHBC(IRGS& env, const NormalizedInstruction* ni, SrcKey newSk, bool lastBcInst) { FTRACE(1, "------------------- prepareForNextHHBC ------------------\n"); env.currentNormalizedInstruction = ni; always_assert_flog( IMPLIES(isInlining(env), !env.lastBcInst), "Tried to end trace while inlining." ); always_assert_flog( IMPLIES(isInlining(env), !env.firstBcInst), "Inlining while still at the first region instruction." ); always_assert(env.bcStateStack.size() >= env.inlineLevel + 1); auto pops = env.bcStateStack.size() - 1 - env.inlineLevel; while (pops--) env.bcStateStack.pop_back(); always_assert_flog(env.bcStateStack.back().func() == newSk.func(), "Tried to update current SrcKey with a different func"); env.bcStateStack.back().setOffset(newSk.offset()); updateMarker(env); env.lastBcInst = lastBcInst; env.catchCreator = nullptr; env.irb->prepareForNextHHBC(); }
void aom_blend_a64_vmask_sse4_1(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int w, int h) { typedef void (*blend_fn)(uint8_t * dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int w, int h); // Dimension: width_index static const blend_fn blend[9] = { blend_a64_vmask_w16n_sse4_1, // w % 16 == 0 aom_blend_a64_vmask_c, // w == 1 aom_blend_a64_vmask_c, // w == 2 NULL, // INVALID blend_a64_vmask_w4_sse4_1, // w == 4 NULL, // INVALID NULL, // INVALID NULL, // INVALID blend_a64_vmask_w8_sse4_1, // w == 8 }; assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); assert(h >= 1); assert(w >= 1); assert(IS_POWER_OF_TWO(h)); assert(IS_POWER_OF_TWO(w)); blend[w & 0xf](dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, w, h); }
static int tmpfs_write(struct vop_write_args *v) { struct vnode *vp; struct uio *uio; struct tmpfs_node *node; off_t oldsize; int error, ioflag; boolean_t extended; vp = v->a_vp; uio = v->a_uio; ioflag = v->a_ioflag; error = 0; node = VP_TO_TMPFS_NODE(vp); oldsize = node->tn_size; if (uio->uio_offset < 0 || vp->v_type != VREG) return (EINVAL); if (uio->uio_resid == 0) return (0); if (ioflag & IO_APPEND) uio->uio_offset = node->tn_size; if (uio->uio_offset + uio->uio_resid > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) return (EFBIG); if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); extended = uio->uio_offset + uio->uio_resid > node->tn_size; if (extended) { error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid, FALSE); if (error != 0) goto out; } error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio); node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | (extended ? TMPFS_NODE_CHANGED : 0); if (node->tn_mode & (S_ISUID | S_ISGID)) { if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID, 0)) node->tn_mode &= ~(S_ISUID | S_ISGID); } if (error != 0) (void)tmpfs_reg_resize(vp, oldsize, TRUE); out: MPASS(IMPLIES(error == 0, uio->uio_resid == 0)); MPASS(IMPLIES(error != 0, oldsize == node->tn_size)); return (error); }
void CFSShip::SetSide(CFSMission * pfsMission, IsideIGC * pside) { // they can't hop between mission without first going to lobby assert (IMPLIES(pfsMission, !m_pfsMission) || pfsMission == m_pfsMission); assert (IMPLIES(pside, pfsMission)); m_pfsMission = pfsMission; GetIGCShip()->SetMission(m_pfsMission ? m_pfsMission->GetIGCMission() : g.trekCore); GetIGCShip()->SetSide(pside); if (pside && (pside->GetObjectID() >= 0)) { GetIGCShip()->SetBaseHullType(pside->GetCivilization()->GetLifepod()); } }
bool OMDataStreamProperty::hasStreamAccess(void) const { TRACE("OMDataStreamProperty::hasStreamAccess"); bool result; if (_streamAccess != 0) { result = true; } else { result = false; } POSTCONDITION("Consistent result", IMPLIES(_streamAccess == 0, !result)); POSTCONDITION("Consistent result", IMPLIES(_streamAccess != 0, result)); return result; }
void *CHOLMOD(free) /* always returns NULL */ ( /* ---- input ---- */ size_t n, /* number of items */ size_t size, /* size of each item */ /* ---- in/out --- */ void *p, /* block of memory to free */ /* --------------- */ cholmod_common *Common ) { RETURN_IF_NULL_COMMON (NULL) ; if (p != NULL) { /* only free the object if the pointer is not NULL */ /* call free, or its equivalent */ (Common->free_memory) (p) ; Common->malloc_count-- ; Common->memory_inuse -= (n * size) ; PRINTM (("cholmod_free %p %g cnt: %g inuse %g\n", p, (double) n*size, (double) Common->malloc_count, (double) Common->memory_inuse)) ; /* This assertion will fail if the user calls cholmod_malloc and * cholmod_free with mismatched memory sizes. It shouldn't fail * otherwise. */ ASSERT (IMPLIES (Common->malloc_count == 0, Common->memory_inuse == 0)); } /* return NULL, and the caller should assign this to p. This avoids * freeing the same pointer twice. */ return (NULL) ; }
// @mfunc Constructor. Create an <c OMFile> object representing // an existing external file on the given <c OMRawStorage>. OMFile::OMFile(OMRawStorage* rawStorage, void* clientOnRestoreContext, OMStoredObjectEncoding encoding, const OMAccessMode mode, const OMClassFactory* factory, OMDictionary* dictionary, const OMLoadMode loadMode) : _root(0), _rootStore(0), _dictionary(dictionary), _classFactory(factory), _referencedProperties(0), _mode(mode), _loadMode(loadMode), _fileName(0), _encoding(encoding), _clientOnSaveContext(0), _clientOnRestoreContext(clientOnRestoreContext), _rawStorage(rawStorage), _isOpen(false), _isClosed(false), _isNew(false), _isValid(true), _byteOrder(unspecified) { TRACE("OMFile::OMFile"); PRECONDITION("Valid raw storage", _rawStorage != 0); PRECONDITION("Consistent access modes", IMPLIES(((mode == modifyMode) || (mode == writeOnlyMode)), rawStorage->isWritable())); PRECONDITION("Valid dictionary", _dictionary != 0); POSTCONDITION("File not yet open", !_isOpen); }
void IRTranslator::translateInstr(const NormalizedInstruction& ni) { auto& ht = m_hhbcTrans; ht.setBcOff(ni.source.offset(), ni.breaksTracelet && !m_hhbcTrans.isInlining()); FTRACE(1, "\n{:-^60}\n", folly::format("Translating {}: {} with stack:\n{}", ni.offset(), ni.toString(), ht.showStack())); // When profiling, we disable type predictions to avoid side exits assert(IMPLIES(JIT::tx->mode() == TransKind::Profile, !ni.outputPredicted)); if (ni.guardedThis) { // Task #2067635: This should really generate an AssertThis ht.setThisAvailable(); } ht.emitRB(RBTypeBytecodeStart, ni.source, 2); auto pc = reinterpret_cast<const Op*>(ni.pc()); for (auto i = 0, num = instrNumPops(pc); i < num; ++i) { auto const type = flavorToType(instrInputFlavor(pc, i)); if (type != Type::Gen) m_hhbcTrans.assertTypeStack(i, type); } if (RuntimeOption::EvalHHIRGenerateAsserts >= 2) { ht.emitDbgAssertRetAddr(); } if (instrMustInterp(ni) || ni.interp) { interpretInstr(ni); } else { translateInstrWork(ni); } passPredictedAndInferredTypes(ni); }
void IRTranslator::translateInstr(const NormalizedInstruction& ni) { auto& ht = m_hhbcTrans; ht.setBcOff(ni.source.offset(), ni.endsRegion && !m_hhbcTrans.isInlining()); FTRACE(1, "\n{:-^60}\n", folly::format("Translating {}: {} with stack:\n{}", ni.offset(), ni.toString(), ht.showStack())); // When profiling, we disable type predictions to avoid side exits assert(IMPLIES(mcg->tx().mode() == TransKind::Profile, !ni.outputPredicted)); ht.emitRB(RBTypeBytecodeStart, ni.source, 2); ht.emitIncStat(Stats::Instr_TC, 1, false); auto pc = reinterpret_cast<const Op*>(ni.pc()); for (auto i = 0, num = instrNumPops(pc); i < num; ++i) { auto const type = flavorToType(instrInputFlavor(pc, i)); if (type != Type::Gen) m_hhbcTrans.assertTypeStack(i, type); } if (RuntimeOption::EvalHHIRGenerateAsserts >= 2) { ht.emitDbgAssertRetAddr(); } if (isAlwaysNop(ni.op())) { // Do nothing } else if (instrMustInterp(ni) || ni.interp) { interpretInstr(ni); } else { translateInstrWork(ni); } }
Type::bits_t Type::bitsFromDataType(DataType outer, DataType inner) { assert(outer != KindOfInvalid); assert(inner != KindOfRef); assert(IMPLIES(inner == KindOfNone, outer != KindOfRef)); switch (outer) { case KindOfUninit : return kUninit; case KindOfNull : return kInitNull; case KindOfBoolean : return kBool; case KindOfInt64 : return kInt; case KindOfDouble : return kDbl; case KindOfStaticString : return kStaticStr; case KindOfString : return kStr; case KindOfArray : return kArr; case KindOfResource : return kRes; case KindOfObject : return kObj; case KindOfClass : return kCls; case KindOfUncountedInit : return kUncountedInit; case KindOfUncounted : return kUncounted; case KindOfAny : return kGen; case KindOfRef: { if (inner == KindOfAny) { return kBoxedCell; } else { assert(inner != KindOfUninit); return bitsFromDataType(inner, KindOfNone) << kBoxShift; } } default : always_assert(false && "Unsupported DataType"); } }
// @mfunc Assignment. // This operator provides value semantics for <c OMContainer>. // This operator does not provide assignment of object references. // @parm The <c OMWeakObjectReference> to be assigned. // @rdesc The <c OMWeakObjectReference> resulting from the assignment. OMWeakObjectReference& OMWeakObjectReference::operator= (const OMWeakObjectReference& rhs) { TRACE("OMWeakObjectReference::operator="); PRECONDITION("Valid identification", IMPLIES(_identification != 0, (rhs._identificationSize == 0) || (rhs._identificationSize == _identificationSize))); if (this == &rhs) { return *this; // early return ! } OMObjectReference::operator=(rhs); _identificationSize = rhs._identificationSize; delete [] _identification; _identification = 0; // for BoundsChecker if (rhs._identification != 0) { _identification = new OMByte[_identificationSize]; ASSERT("Valid heap pointer", _identification != 0); memcpy(_identification, rhs._identification, _identificationSize); } _targetTag = rhs._targetTag; _targetSet = 0; return *this; }
void aom_highbd_blend_a64_vmask_sse4_1( uint8_t *dst_8, uint32_t dst_stride, const uint8_t *src0_8, uint32_t src0_stride, const uint8_t *src1_8, uint32_t src1_stride, const uint8_t *mask, int w, int h, int bd) { typedef void (*blend_fn)(uint16_t * dst, uint32_t dst_stride, const uint16_t *src0, uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride, const uint8_t *mask, int w, int h); // Dimensions are: bd_index X width_index static const blend_fn blend[2][2] = { { // bd == 8 or 10 blend_a64_vmask_b10_w8n_sse4_1, // w % 8 == 0 blend_a64_vmask_b10_w4_sse4_1, // w == 4 }, { // bd == 12 blend_a64_vmask_b12_w8n_sse4_1, // w % 8 == 0 blend_a64_vmask_b12_w4_sse4_1, // w == 4 } }; assert(IMPLIES(src0_8 == dst_8, src0_stride == dst_stride)); assert(IMPLIES(src1_8 == dst_8, src1_stride == dst_stride)); assert(h >= 1); assert(w >= 1); assert(IS_POWER_OF_TWO(h)); assert(IS_POWER_OF_TWO(w)); assert(bd == 8 || bd == 10 || bd == 12); if (UNLIKELY((h | w) & 3)) { // if (w <= 2 || h <= 2) aom_highbd_blend_a64_vmask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8, src1_stride, mask, w, h, bd); } else { uint16_t *const dst = CONVERT_TO_SHORTPTR(dst_8); const uint16_t *const src0 = CONVERT_TO_SHORTPTR(src0_8); const uint16_t *const src1 = CONVERT_TO_SHORTPTR(src1_8); blend[bd == 12][(w >> 2) & 1](dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, w, h); } }
void TypeConstraint::init() { if (isTypeVar()) { // We kept the type variable type constraint to correctly check child // classes implementing abstract methods or interfaces. m_type.dt = folly::none; m_type.metatype = MetaType::Precise; return; } if (m_typeName == nullptr) { m_type.dt = folly::none; m_type.metatype = MetaType::Precise; return; } Type dtype; TRACE(5, "TypeConstraint: this %p type %s, nullable %d\n", this, m_typeName->data(), isNullable()); auto const mptr = typeNameToType(m_typeName); if (mptr) dtype = *mptr; if (!mptr || !(isHHType() || dtype.dt == KindOfArray || dtype.dt == KindOfBoolean || dtype.dt == KindOfString || dtype.dt == KindOfInt64 || dtype.dt == KindOfDouble || dtype.dt == KindOfResource || dtype.metatype == MetaType::ArrayKey || dtype.metatype == MetaType::Number || dtype.metatype == MetaType::Parent || dtype.metatype == MetaType::Self || dtype.metatype == MetaType::Callable)) { TRACE(5, "TypeConstraint: this %p no such type %s, treating as object\n", this, m_typeName->data()); m_type = { KindOfObject, MetaType::Precise }; m_namedEntity = NamedEntity::get(m_typeName); TRACE(5, "TypeConstraint: NamedEntity: %p\n", m_namedEntity); return; } m_type = dtype; assert(m_type.dt != KindOfStaticString); assert(IMPLIES(isParent(), m_type.dt == KindOfObject)); assert(IMPLIES(isSelf(), m_type.dt == KindOfObject)); assert(IMPLIES(isCallable(), m_type.dt == KindOfObject)); }
LeaseHolderBase::LeaseHolderBase(Lease& l, LeaseAcquire acquire, bool blocking) : m_lease(l), m_haveLock(false), m_acquired(false) { assert(IMPLIES(blocking, acquire == ACQUIRE)); if (!m_lease.amOwner() && acquire == ACQUIRE) { m_acquired = m_lease.acquire(blocking); } m_haveLock = m_lease.amOwner(); }
SSATmp* TraceBuilder::genLdStackAddr(SSATmp* sp, int64_t index) { Type type; bool spansCall; UNUSED SSATmp* val = getStackValue(sp, index, spansCall, type); type = noneToGen(type); assert(IMPLIES(val != nullptr, val->type().equals(type))); assert(type.notPtr()); return gen(LdStackAddr, type.ptr(), sp, cns(index)); }
// @mfunc Attempt to read the number of bytes given by <p bytes> // from the data stream into the buffer at address // <p buffer>. The actual number of bytes read is returned // in <p bytesRead>. // @parm The address of the buffer into which the bytes should be read. // @parm The number of bytes to read. // @parm The actual number of bytes that were read. // @this const void OMDataStreamProperty::read(OMByte* buffer, const OMUInt32 bytes, OMUInt32& bytesRead) const { TRACE("OMDataStreamProperty::read"); PRECONDITION("Optional property is present", IMPLIES(isOptional(), isPresent())); stream()->read(buffer, bytes, bytesRead); }
/** make_char_array * * PARAMETERS: * IN s : A string. Might not be NUL-terminated. * May be NULL. * len : The length of s, excluding any NUL terminator. * * RETURNS: * The constructed char_array structure. */ static char_array make_char_array(const dropt_char * s, size_t len) { char_array a; assert(IMPLIES(s == NULL, len == 0)); a.s = s; a.len = len; return a; }
void IRInstruction::convertToJmp() { assert(isControlFlow()); assert(IMPLIES(block(), block()->back() == this)); m_op = Jmp_; m_typeParam = Type::None; m_numSrcs = 0; m_numDsts = 0; m_srcs = nullptr; m_dst = nullptr; // Instructions in the simplifier don't have blocks yet. if (block()) block()->setNext(nullptr); }
void aom_blend_a64_vmask_c(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w) { int i, j; assert(IMPLIES(src0 == dst, src0_stride == dst_stride)); assert(IMPLIES(src1 == dst, src1_stride == dst_stride)); assert(h >= 1); assert(w >= 1); assert(IS_POWER_OF_TWO(h)); assert(IS_POWER_OF_TWO(w)); for (i = 0; i < h; ++i) { const int m = mask[i]; for (j = 0; j < w; ++j) { dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j], src1[i * src1_stride + j]); } } }
void IRInstruction::convertToJmp() { assert(isControlFlow()); assert(IMPLIES(block(), &block()->back() == this)); m_op = Jmp; m_typeParam.clear(); m_numSrcs = 0; m_numDsts = 0; m_srcs = nullptr; m_dst = nullptr; m_extra = nullptr; // Instructions in the simplifier don't have blocks yet. setNext(nullptr); }
/*static*/ bool OMWeakObjectReference::isNullIdentification( const void* identification, size_t identificationSize) { TRACE("OMWeakObjectReference::isNullIdentification"); PRECONDITION("Valid identification", IMPLIES(identification != 0, identificationSize > 0)); PRECONDITION("Valid identification", IMPLIES(identification == 0, identificationSize == 0)); bool result = true; if (identification != 0) { const OMByte* bytes = reinterpret_cast<const OMByte*>(identification); for (size_t i=0; i<identificationSize; i++) { if (bytes[i] != 0) { result = false; break; } } } return result; }
void TraceBuilder::pushTrace(IRTrace* t, BCMarker marker, Block* b, const boost::optional<Block::iterator>& where) { FTRACE(2, "TraceBuilder saving {}@{} and using {}@{}\n", m_curTrace, m_state.marker().show(), t, marker.show()); assert(t); assert(bool(b) == bool(where)); assert(IMPLIES(b, b->trace() == t)); m_savedTraces.push( TraceState{ m_curTrace, m_curBlock, m_state.marker(), m_curWhere }); m_curTrace = t; m_curBlock = b; setMarker(marker); m_curWhere = where; }
// @mfunc Get the value of this <c OMWeakObjectReference>. // The value is a pointer to the referenced <c OMStorable>. // @rdesc A pointer to the referenced <c OMStorable>. // @this const OMStorable* OMWeakObjectReference::getValue(void) const { TRACE("OMWeakObjectReference::getValue"); OMWeakObjectReference* nonConstThis = const_cast<OMWeakObjectReference*>(this); if ((_pointer == 0) && (!isNullIdentification(_identification, _identificationSize))) { OMStorable* object = 0; set()->find(_identification, object); nonConstThis->_pointer = object; } #if 1 // HACK4MEIP2 if ((_pointer == 0) && (!isNullIdentification(_identification, _identificationSize))) { // We failed to resolve the reference as an object id, try again as a label // We should only come here for KLV encoded files. ASSERT("Referenced object ID can be a label", _identificationSize == sizeof(OMUniqueObjectIdentification)); OMUniqueObjectIdentification bid; memcpy(&bid, _identification, sizeof(OMUniqueObjectIdentification)); if (hostByteOrder() != bigEndian) { OMUniqueObjectIdentificationType::instance()->reorder( reinterpret_cast<OMByte*>(&bid), sizeof(bid)); } OMKLVKey k; memcpy(&k, &bid, sizeof(OMKLVKey)); OMUniqueObjectIdentification id; convert(id, k); memcpy(_identification, &id, sizeof(OMUniqueObjectIdentification)); OMStorable* object = 0; set()->find(_identification, object); nonConstThis->_pointer = object; } #endif // If the following assertion is violated we have a dangling weak // reference. The reference illegally designates an object that is // not present in the target set. Code elsewhere prevents the // removal of objects that are weakly referenced hence a dangling // reference is an assertion violation rather than a run-time error. // POSTCONDITION("Object found", IMPLIES(!isNullIdentification(_identification, _identificationSize), _pointer != 0)); return _pointer; }
void prepareForNextHHBC(HTS& env, const NormalizedInstruction* ni, Offset newOff, bool lastBcOff) { FTRACE(1, "------------------- prepareForNextHHBC ------------------\n"); env.currentNormalizedInstruction = ni; always_assert_log( IMPLIES(isInlining(env), !env.lastBcOff), [&] { return folly::format("Tried to end trace while inlining:\n{}", env.unit).str(); } ); env.bcStateStack.back().setOffset(newOff); updateMarker(env); env.lastBcOff = lastBcOff; env.irb->prepareForNextHHBC(); }
bool IRBuilder::constrainLocal(uint32_t locId, SSATmp* typeSrc, TypeConstraint tc, const std::string& why) { if (!shouldConstrainGuards()) return false; always_assert(IMPLIES(tc.innerCat > DataTypeGeneric, tc.category >= DataTypeCountness)); ITRACE(1, "constrainLocal({}, {}, {}, {})\n", locId, typeSrc ? typeSrc->inst()->toString() : "null", tc, why); Indent _i; if (!typeSrc) return false; if (!typeSrc->isA(Type::FramePtr)) { return constrainValue(typeSrc, tc); } // When typeSrc is a FramePtr, that means we loaded the value the local had // coming into the trace. Trace through the FramePtr chain, looking for a // guard for this local id. If we find it, constrain the guard. If we don't // find it, there wasn't a guard for this local so there's nothing to // constrain. auto guard = guardForLocal(locId, typeSrc); while (guard) { if (guard->is(AssertLoc)) { // If the refined type of the local satisfies the constraint we're // trying to apply, we can stop here. This can happen if we assert a // more general type than what we already know. Otherwise we need to // keep tracing back to the guard. if (typeFitsConstraint(guard->typeParam(), tc)) return false; guard = guardForLocal(locId, guard->src(0)); } else { assert(guard->is(GuardLoc, CheckLoc)); ITRACE(2, "found guard to constrain\n"); return constrainGuard(guard, tc); } } ITRACE(2, "no guard to constrain\n"); return false; }
PRIVATE void dump_mat ( char *xname, char *yname, Int nx, Int ny, const Int Xp [ ], const Int Xi [ ], Int Xdeg [ ], Int Ydeg [ ] ) { Int x, y, p, p1, p2, xdeg, do_xdeg, ydeg ; DEBUG6 (("\n ==== Dump %s mat:\n", xname)) ; for (x = 0 ; x < nx ; x++) { p1 = Xp [x] ; p2 = Xp [x+1] ; xdeg = Xdeg [x] ; DEBUG6 (("Dump %s "ID" p1 "ID" p2 "ID" deg "ID"\n", xname, x, p1, p2, xdeg)) ; do_xdeg = (xdeg >= 0) ; for (p = p1 ; p < p2 ; p++) { y = Xi [p] ; DEBUG7 ((" %s "ID" deg: ", yname, y)) ; ASSERT (y >= 0 && y < ny) ; ydeg = Ydeg [y] ; DEBUG7 ((ID"\n", ydeg)) ; if (do_xdeg && ydeg >= 0) { xdeg-- ; } } ASSERT (IMPLIES (do_xdeg, xdeg == 0)) ; } }