bool JSFunction::getOwnPropertySlot(JSCell* cell, ExecState* exec, PropertyName propertyName, PropertySlot& slot) { JSFunction* thisObject = jsCast<JSFunction*>(cell); if (thisObject->isHostFunction()) return Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); if (propertyName == exec->propertyNames().prototype) { JSGlobalData& globalData = exec->globalData(); PropertyOffset offset = thisObject->getDirectOffset(globalData, propertyName); if (!isValidOffset(offset)) { JSObject* prototype = constructEmptyObject(exec); prototype->putDirect(globalData, exec->propertyNames().constructor, thisObject, DontEnum); thisObject->putDirect(globalData, exec->propertyNames().prototype, prototype, DontDelete | DontEnum); offset = thisObject->getDirectOffset(globalData, exec->propertyNames().prototype); ASSERT(isValidOffset(offset)); } slot.setValue(thisObject, thisObject->getDirect(offset), offset); } if (propertyName == exec->propertyNames().arguments) { if (thisObject->jsExecutable()->isStrictMode()) { bool result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); if (!result) { thisObject->putDirectAccessor(exec, propertyName, thisObject->globalObject()->throwTypeErrorGetterSetter(exec), DontDelete | DontEnum | Accessor); result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); ASSERT(result); } return result; } slot.setCacheableCustom(thisObject, argumentsGetter); return true; } if (propertyName == exec->propertyNames().length) { slot.setCacheableCustom(thisObject, lengthGetter); return true; } if (propertyName == exec->propertyNames().name) { slot.setCacheableCustom(thisObject, nameGetter); return true; } if (propertyName == exec->propertyNames().caller) { if (thisObject->jsExecutable()->isStrictMode()) { bool result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); if (!result) { thisObject->putDirectAccessor(exec, propertyName, thisObject->globalObject()->throwTypeErrorGetterSetter(exec), DontDelete | DontEnum | Accessor); result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); ASSERT(result); } return result; } slot.setCacheableCustom(thisObject, callerGetter); return true; } return Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); }
bool setUpStaticFunctionSlot(ExecState* exec, const HashTableValue* entry, JSObject* thisObj, PropertyName propertyName, PropertySlot& slot) { ASSERT(thisObj->globalObject()); ASSERT(entry->attributes() & BuiltinOrFunction); VM& vm = exec->vm(); unsigned attributes; PropertyOffset offset = thisObj->getDirectOffset(vm, propertyName, attributes); if (!isValidOffset(offset)) { // If a property is ever deleted from an object with a static table, then we reify // all static functions at that time - after this we shouldn't be re-adding anything. if (thisObj->staticFunctionsReified()) return false; if (entry->attributes() & Builtin) thisObj->putDirectBuiltinFunction(vm, thisObj->globalObject(), propertyName, entry->builtinGenerator()(vm), entry->attributes()); else { thisObj->putDirectNativeFunction( vm, thisObj->globalObject(), propertyName, entry->functionLength(), entry->function(), entry->intrinsic(), entry->attributes()); } offset = thisObj->getDirectOffset(vm, propertyName, attributes); ASSERT(isValidOffset(offset)); } slot.setValue(thisObj, attributes, thisObj->getDirect(offset), offset); return true; }
bool setUpStaticFunctionSlot(VM& vm, const HashTableValue* entry, JSObject* thisObject, PropertyName propertyName, PropertySlot& slot) { ASSERT(thisObject->globalObject()); ASSERT(entry->attributes() & BuiltinOrFunctionOrAccessorOrLazyProperty); unsigned attributes; bool isAccessor = entry->attributes() & Accessor; PropertyOffset offset = thisObject->getDirectOffset(vm, propertyName, attributes); if (!isValidOffset(offset)) { // If a property is ever deleted from an object with a static table, then we reify // all static functions at that time - after this we shouldn't be re-adding anything. if (thisObject->staticPropertiesReified()) return false; reifyStaticProperty(vm, propertyName, *entry, *thisObject); offset = thisObject->getDirectOffset(vm, propertyName, attributes); if (!isValidOffset(offset)) { dataLog("Static hashtable initialiation for ", propertyName, " did not produce a property.\n"); RELEASE_ASSERT_NOT_REACHED(); } } if (isAccessor) slot.setCacheableGetterSlot(thisObject, attributes, jsCast<GetterSetter*>(thisObject->getDirect(offset)), offset); else slot.setValue(thisObject, attributes, thisObject->getDirect(offset), offset); return true; }
PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(ident); #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) if (!profiledBlock->numberOfStructureStubInfos()) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); if (!stubInfo.seen) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (stubInfo.resetByGC) return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); switch (stubInfo.accessType) { case access_unset: // If the JIT saw it but didn't optimize it, then assume that this takes slow path. return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); case access_put_by_id_replace: { PropertyOffset offset = stubInfo.u.putByIdReplace.baseObjectStructure->get( *profiledBlock->vm(), ident); if (isValidOffset(offset)) { return PutByIdStatus( SimpleReplace, stubInfo.u.putByIdReplace.baseObjectStructure.get(), 0, 0, offset); } return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } case access_put_by_id_transition_normal: case access_put_by_id_transition_direct: { ASSERT(stubInfo.u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); PropertyOffset offset = stubInfo.u.putByIdTransition.structure->get( *profiledBlock->vm(), ident); if (isValidOffset(offset)) { return PutByIdStatus( SimpleTransition, stubInfo.u.putByIdTransition.previousStructure.get(), stubInfo.u.putByIdTransition.structure.get(), stubInfo.u.putByIdTransition.chain.get(), offset); } return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } default: return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } #else // ENABLE(JIT) return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); #endif // ENABLE(JIT) }
bool setUpStaticFunctionSlot(VM& vm, const HashTableValue* entry, JSObject* thisObject, PropertyName propertyName, PropertySlot& slot) { ASSERT(thisObject->globalObject()); ASSERT(entry->attributes() & BuiltinOrFunctionOrAccessorOrLazyProperty); unsigned attributes; bool isAccessor = entry->attributes() & Accessor; PropertyOffset offset = thisObject->getDirectOffset(vm, propertyName, attributes); if (!isValidOffset(offset)) { // If a property is ever deleted from an object with a static table, then we reify // all static functions at that time - after this we shouldn't be re-adding anything. if (thisObject->staticFunctionsReified()) return false; if (entry->attributes() & Builtin) thisObject->putDirectBuiltinFunction(vm, thisObject->globalObject(), propertyName, entry->builtinGenerator()(vm), attributesForStructure(entry->attributes())); else if (entry->attributes() & Function) { thisObject->putDirectNativeFunction( vm, thisObject->globalObject(), propertyName, entry->functionLength(), entry->function(), entry->intrinsic(), attributesForStructure(entry->attributes())); } else if (isAccessor) reifyStaticAccessor(vm, *entry, *thisObject, propertyName); else if (entry->attributes() & CellProperty) { LazyCellProperty* property = bitwise_cast<LazyCellProperty*>( bitwise_cast<char*>(thisObject) + entry->lazyCellPropertyOffset()); JSCell* result = property->get(thisObject); thisObject->putDirect(vm, propertyName, result, attributesForStructure(entry->attributes())); } else if (entry->attributes() & ClassStructure) { LazyClassStructure* structure = bitwise_cast<LazyClassStructure*>( bitwise_cast<char*>(thisObject) + entry->lazyClassStructureOffset()); structure->get(jsCast<JSGlobalObject*>(thisObject)); } else if (entry->attributes() & PropertyCallback) { JSValue result = entry->lazyPropertyCallback()(vm, thisObject); thisObject->putDirect(vm, propertyName, result, attributesForStructure(entry->attributes())); } else { dataLog("Static hashtable entry for ", propertyName, " has weird attributes: ", entry->attributes(), "\n"); RELEASE_ASSERT_NOT_REACHED(); } offset = thisObject->getDirectOffset(vm, propertyName, attributes); if (!isValidOffset(offset)) { dataLog("Static hashtable initialiation for ", propertyName, " did not produce a property.\n"); RELEASE_ASSERT_NOT_REACHED(); } } if (isAccessor) slot.setCacheableGetterSlot(thisObject, attributes, jsCast<GetterSetter*>(thisObject->getDirect(offset)), offset); else slot.setValue(thisObject, attributes, thisObject->getDirect(offset), offset); return true; }
GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid) { // For now we only handle the super simple self access case. We could handle the // prototype case in the future. if (!structure) return GetByIdStatus(TakesSlowPath); if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex) return GetByIdStatus(TakesSlowPath); if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) return GetByIdStatus(TakesSlowPath); if (!structure->propertyAccessesAreCacheable()) return GetByIdStatus(TakesSlowPath); unsigned attributes; JSCell* specificValue; PropertyOffset offset = structure->getConcurrently(vm, uid, attributes, specificValue); if (!isValidOffset(offset)) return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. if (attributes & Accessor) return GetByIdStatus(MakesCalls); if (structure->isDictionary()) specificValue = 0; return GetByIdStatus( Simple, false, GetByIdVariant(StructureSet(structure), offset, specificValue)); }
GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, Identifier& ident) { // For now we only handle the super simple self access case. We could handle the // prototype case in the future. if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex) return GetByIdStatus(TakesSlowPath); if (structure->typeInfo().overridesGetOwnPropertySlot()) return GetByIdStatus(TakesSlowPath); if (!structure->propertyAccessesAreCacheable()) return GetByIdStatus(TakesSlowPath); GetByIdStatus result; result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, Identifier&) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically. unsigned attributes; JSCell* specificValue; result.m_offset = structure->get(vm, ident, attributes, specificValue); if (!isValidOffset(result.m_offset)) return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. if (attributes & Accessor) return GetByIdStatus(MakesCalls); if (structure->isDictionary()) specificValue = 0; result.m_structureSet.add(structure); result.m_specificValue = JSValue(specificValue); return result; }
GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(ident); #if ENABLE(LLINT) Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length)) return GetByIdStatus(NoInformation, false); Structure* structure = instruction[4].u.structure.get(); if (!structure) return GetByIdStatus(NoInformation, false); unsigned attributesIgnored; JSCell* specificValue; PropertyOffset offset = structure->get( *profiledBlock->vm(), ident, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(offset)) return GetByIdStatus(NoInformation, false); return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue); #else return GetByIdStatus(NoInformation, false); #endif }
GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); VM& vm = *profiledBlock->vm(); Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length)) return GetByIdStatus(NoInformation, false); StructureID structureID = instruction[4].u.structureID; if (!structureID) return GetByIdStatus(NoInformation, false); Structure* structure = vm.heap.structureIDTable().get(structureID); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(NoInformation, false); unsigned attributesIgnored; PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored); if (!isValidOffset(offset)) return GetByIdStatus(NoInformation, false); return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); }
GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, StringImpl* uid) { // For now we only handle the super simple self access case. We could handle the // prototype case in the future. if (set.isEmpty()) return GetByIdStatus(); if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex) return GetByIdStatus(TakesSlowPath); GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = false; for (unsigned i = 0; i < set.size(); ++i) { Structure* structure = set[i]; if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) return GetByIdStatus(TakesSlowPath); if (!structure->propertyAccessesAreCacheable()) return GetByIdStatus(TakesSlowPath); unsigned attributes; PropertyOffset offset = structure->getConcurrently(uid, attributes); if (!isValidOffset(offset)) return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. if (attributes & Accessor) return GetByIdStatus(MakesCalls); // We could be smarter here, like strenght-reducing this to a Call. if (!result.appendVariant(GetByIdVariant(structure, offset))) return GetByIdStatus(TakesSlowPath); } return result; }
bool DS345::setOffset(const double &new_offset) const { if (!isValidOffset(new_offset)) return false; QString command = "OFFS " + QString::number(new_offset); return sendCommand(command); }
PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); VM& vm = *profiledBlock->vm(); Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; StructureID structureID = instruction[4].u.structureID; if (!structureID) return PutByIdStatus(NoInformation); Structure* structure = vm.heap.structureIDTable().get(structureID); StructureID newStructureID = instruction[6].u.structureID; if (!newStructureID) { PropertyOffset offset = structure->getConcurrently(uid); if (!isValidOffset(offset)) return PutByIdStatus(NoInformation); return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid)); } Structure* newStructure = vm.heap.structureIDTable().get(newStructureID); ASSERT(structure->transitionWatchpointSetHasBeenInvalidated()); PropertyOffset offset = newStructure->getConcurrently(uid); if (!isValidOffset(offset)) return PutByIdStatus(NoInformation); ObjectPropertyConditionSet conditionSet; if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) { conditionSet = generateConditionsForPropertySetterMissConcurrently( *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid); if (!conditionSet.isValid()) return PutByIdStatus(NoInformation); } return PutByIdVariant::transition( structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid)); }
void StructureRareData::setObjectToStringValue(ExecState* exec, VM& vm, Structure* ownStructure, JSString* value, PropertySlot toStringTagSymbolSlot) { if (m_giveUpOnObjectToStringValueCache) return; ObjectPropertyConditionSet conditionSet; if (toStringTagSymbolSlot.isValue()) { // We don't handle the own property case of Symbol.toStringTag because we would never know if a new // object transitioning to the same structure had the same value stored in Symbol.toStringTag. // Additionally, this is a super unlikely case anyway. if (!toStringTagSymbolSlot.isCacheable() || toStringTagSymbolSlot.slotBase()->structure(vm) == ownStructure) return; // This will not create a condition for the current structure but that is good because we know the Symbol.toStringTag // is not on the ownStructure so we will transisition if one is added and this cache will no longer be used. conditionSet = generateConditionsForPrototypePropertyHit(vm, this, exec, ownStructure, toStringTagSymbolSlot.slotBase(), vm.propertyNames->toStringTagSymbol.impl()); ASSERT(!conditionSet.isValid() || conditionSet.hasOneSlotBaseCondition()); } else if (toStringTagSymbolSlot.isUnset()) conditionSet = generateConditionsForPropertyMiss(vm, this, exec, ownStructure, vm.propertyNames->toStringTagSymbol.impl()); else return; if (!conditionSet.isValid()) { m_giveUpOnObjectToStringValueCache = true; return; } ObjectPropertyCondition equivCondition; for (const ObjectPropertyCondition& condition : conditionSet) { if (condition.condition().kind() == PropertyCondition::Presence) { ASSERT(isValidOffset(condition.offset())); condition.object()->structure(vm)->startWatchingPropertyForReplacements(vm, condition.offset()); equivCondition = condition.attemptToMakeEquivalenceWithoutBarrier(); // The equivalence condition won't be watchable if we have already seen a replacement. if (!equivCondition.isWatchable()) { m_giveUpOnObjectToStringValueCache = true; return; } } else if (!condition.isWatchable()) { m_giveUpOnObjectToStringValueCache = true; return; } } ASSERT(conditionSet.structuresEnsureValidity()); for (ObjectPropertyCondition condition : conditionSet) { if (condition.condition().kind() == PropertyCondition::Presence) { m_objectToStringAdaptiveInferredValueWatchpoint = std::make_unique<ObjectToStringAdaptiveInferredPropertyValueWatchpoint>(equivCondition, this); m_objectToStringAdaptiveInferredValueWatchpoint->install(); } else m_objectToStringAdaptiveWatchpointSet.add(condition, this)->install(); } m_objectToStringValue.set(vm, this, value); }
PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; Structure* structure = instruction[4].u.structure.get(); if (!structure) return PutByIdStatus(NoInformation); if (instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id) || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_out_of_line)) { PropertyOffset offset = structure->getConcurrently(uid); if (!isValidOffset(offset)) return PutByIdStatus(NoInformation); return PutByIdVariant::replace(structure, offset); } ASSERT(structure->transitionWatchpointSetHasBeenInvalidated()); ASSERT(instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct) || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal) || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct_out_of_line) || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal_out_of_line)); Structure* newStructure = instruction[6].u.structure.get(); StructureChain* chain = instruction[7].u.structureChain.get(); ASSERT(newStructure); ASSERT(chain); PropertyOffset offset = newStructure->getConcurrently(uid); if (!isValidOffset(offset)) return PutByIdStatus(NoInformation); RefPtr<IntendedStructureChain> intendedChain; if (chain) intendedChain = adoptRef(new IntendedStructureChain(profiledBlock, structure, chain)); return PutByIdVariant::transition(structure, newStructure, intendedChain.get(), offset); }
PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(ident); #if ENABLE(LLINT) Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; Structure* structure = instruction[4].u.structure.get(); if (!structure) return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id) || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) { PropertyOffset offset = structure->get(*profiledBlock->vm(), ident); if (!isValidOffset(offset)) return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); return PutByIdStatus(SimpleReplace, structure, 0, 0, offset); } ASSERT(structure->transitionWatchpointSetHasBeenInvalidated()); ASSERT(instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct) || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal) || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line) || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line)); Structure* newStructure = instruction[6].u.structure.get(); StructureChain* chain = instruction[7].u.structureChain.get(); ASSERT(newStructure); ASSERT(chain); PropertyOffset offset = newStructure->get(*profiledBlock->vm(), ident); if (!isValidOffset(offset)) return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); return PutByIdStatus(SimpleTransition, structure, newStructure, chain, offset); #else return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); #endif }
static ResolveGlobalStatus computeForStructure(CodeBlock* codeBlock, Structure* structure, Identifier& identifier) { unsigned attributesIgnored; JSCell* specificValue; PropertyOffset offset = structure->get(*codeBlock->globalData(), identifier, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(offset)) return ResolveGlobalStatus(); return ResolveGlobalStatus(ResolveGlobalStatus::Simple, structure, offset, specificValue); }
bool GetByIdStatus::computeForChain(CodeBlock* profiledBlock, StringImpl* uid, PassRefPtr<IntendedStructureChain> passedChain) { #if ENABLE(JIT) RefPtr<IntendedStructureChain> chain = passedChain; // Validate the chain. If the chain is invalid, then currently the best thing // we can do is to assume that TakesSlow is true. In the future, it might be // worth exploring reifying the structure chain from the structure we've got // instead of using the one from the cache, since that will do the right things // if the structure chain has changed. But that may be harder, because we may // then end up having a different type of access altogether. And it currently // does not appear to be worth it to do so -- effectively, the heuristic we // have now is that if the structure chain has changed between when it was // cached on in the baseline JIT and when the DFG tried to inline the access, // then we fall back on a polymorphic access. if (!chain->isStillValid()) return false; if (chain->head()->takesSlowPathInDFGForImpureProperty()) return false; size_t chainSize = chain->size(); for (size_t i = 0; i < chainSize; i++) { if (chain->at(i)->takesSlowPathInDFGForImpureProperty()) return false; } JSObject* currentObject = chain->terminalPrototype(); Structure* currentStructure = chain->last(); ASSERT_UNUSED(currentObject, currentObject); unsigned attributesIgnored; JSCell* specificValue; PropertyOffset offset = currentStructure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (currentStructure->isDictionary()) specificValue = 0; if (!isValidOffset(offset)) return false; m_variants.append( GetByIdVariant(StructureSet(chain->head()), offset, specificValue, chain)); return true; #else // ENABLE(JIT) UNUSED_PARAM(profiledBlock); UNUSED_PARAM(uid); UNUSED_PARAM(passedChain); UNREACHABLE_FOR_PLATFORM(); return false; #endif // ENABLE(JIT) }
bool IntendedStructureChain::mayInterceptStoreTo(StringImpl* uid) { for (unsigned i = 0; i < m_vector.size(); ++i) { unsigned attributes; PropertyOffset offset = m_vector[i]->getConcurrently(uid, attributes); if (!isValidOffset(offset)) continue; if (attributes & (ReadOnly | Accessor)) return true; return false; } return false; }
bool setUpStaticFunctionSlot(ExecState* exec, const HashEntry* entry, JSObject* thisObj, PropertyName propertyName, PropertySlot& slot) { ASSERT(thisObj->globalObject()); ASSERT(entry->attributes() & Function); PropertyOffset offset = thisObj->getDirectOffset(exec->vm(), propertyName); if (!isValidOffset(offset)) { // If a property is ever deleted from an object with a static table, then we reify // all static functions at that time - after this we shouldn't be re-adding anything. if (thisObj->staticFunctionsReified()) return false; thisObj->putDirectNativeFunction( exec, thisObj->globalObject(), propertyName, entry->functionLength(), entry->function(), entry->intrinsic(), entry->attributes()); offset = thisObj->getDirectOffset(exec->vm(), propertyName); ASSERT(isValidOffset(offset)); } slot.setValue(thisObj, thisObj->getDirect(offset), offset); return true; }
void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, Identifier& ident, Structure* structure) { #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) // Validate the chain. If the chain is invalid, then currently the best thing // we can do is to assume that TakesSlow is true. In the future, it might be // worth exploring reifying the structure chain from the structure we've got // instead of using the one from the cache, since that will do the right things // if the structure chain has changed. But that may be harder, because we may // then end up having a different type of access altogether. And it currently // does not appear to be worth it to do so -- effectively, the heuristic we // have now is that if the structure chain has changed between when it was // cached on in the baseline JIT and when the DFG tried to inline the access, // then we fall back on a polymorphic access. Structure* currentStructure = structure; JSObject* currentObject = 0; for (unsigned i = 0; i < result.m_chain.size(); ++i) { ASSERT(!currentStructure->isDictionary()); currentObject = asObject(currentStructure->prototypeForLookup(profiledBlock)); currentStructure = result.m_chain[i]; if (currentObject->structure() != currentStructure) return; } ASSERT(currentObject); unsigned attributesIgnored; JSCell* specificValue; result.m_offset = currentStructure->get( *profiledBlock->vm(), ident, attributesIgnored, specificValue); if (currentStructure->isDictionary()) specificValue = 0; if (!isValidOffset(result.m_offset)) return; result.m_structureSet.add(structure); result.m_specificValue = JSValue(specificValue); #else UNUSED_PARAM(result); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(ident); UNUSED_PARAM(structure); UNREACHABLE_FOR_PLATFORM(); #endif }
ComplexGetStatus ComplexGetStatus::computeFor( CodeBlock* profiledBlock, Structure* headStructure, StructureChain* chain, unsigned chainCount, AtomicStringImpl* uid) { // FIXME: We should assert that we never see a structure that // hasImpureGetOwnPropertySlot() but for which we don't // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do // that, yet. // https://bugs.webkit.org/show_bug.cgi?id=131810 if (headStructure->takesSlowPathInDFGForImpureProperty()) return takesSlowPath(); ComplexGetStatus result; result.m_kind = Inlineable; if (chain && chainCount) { result.m_chain = adoptRef(new IntendedStructureChain( profiledBlock, headStructure, chain, chainCount)); if (!result.m_chain->isStillValid()) return skip(); if (headStructure->takesSlowPathInDFGForImpureProperty() || result.m_chain->takesSlowPathInDFGForImpureProperty()) return takesSlowPath(); JSObject* currentObject = result.m_chain->terminalPrototype(); Structure* currentStructure = result.m_chain->last(); ASSERT_UNUSED(currentObject, currentObject); result.m_offset = currentStructure->getConcurrently(uid, result.m_attributes); } else { result.m_offset = headStructure->getConcurrently(uid, result.m_attributes); } if (!isValidOffset(result.m_offset)) return takesSlowPath(); return result; }
PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, Identifier& ident, bool isDirect) { if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex) return PutByIdStatus(TakesSlowPath); if (structure->typeInfo().overridesGetOwnPropertySlot()) return PutByIdStatus(TakesSlowPath); if (!structure->propertyAccessesAreCacheable()) return PutByIdStatus(TakesSlowPath); unsigned attributes; JSCell* specificValue; PropertyOffset offset = structure->get(vm, ident, attributes, specificValue); if (isValidOffset(offset)) { if (attributes & (Accessor | ReadOnly)) return PutByIdStatus(TakesSlowPath); if (specificValue) { // We need the PutById slow path to verify that we're storing the right value into // the specialized slot. return PutByIdStatus(TakesSlowPath); } return PutByIdStatus(SimpleReplace, structure, 0, 0, offset); } // Our hypothesis is that we're doing a transition. Before we prove that this is really // true, we want to do some sanity checks. // Don't cache put transitions on dictionaries. if (structure->isDictionary()) return PutByIdStatus(TakesSlowPath); // If the structure corresponds to something that isn't an object, then give up, since // we don't want to be adding properties to strings. if (structure->typeInfo().type() == StringType) return PutByIdStatus(TakesSlowPath); if (!isDirect) { // If the prototype chain has setters or read-only properties, then give up. if (structure->prototypeChainMayInterceptStoreTo(vm, ident)) return PutByIdStatus(TakesSlowPath); // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries) // then give up. The dictionary case would only happen if this structure has not been // used in an optimized put_by_id transition. And really the only reason why we would // bail here is that I don't really feel like having the optimizing JIT go and flatten // dictionaries if we have evidence to suggest that those objects were never used as // prototypes in a cacheable prototype access - i.e. there's a good chance that some of // the other checks below will fail. if (!isPrototypeChainNormalized(globalObject, structure)) return PutByIdStatus(TakesSlowPath); } // We only optimize if there is already a structure that the transition is cached to. // Among other things, this allows us to guard against a transition with a specific // value. // // - If we're storing a value that could be specific: this would only be a problem if // the existing transition did have a specific value already, since if it didn't, // then we would behave "as if" we were not storing a specific value. If it did // have a specific value, then we'll know - the fact that we pass 0 for // specificValue will tell us. // // - If we're not storing a value that could be specific: again, this would only be a // problem if the existing transition did have a specific value, which we check for // by passing 0 for the specificValue. Structure* transition = Structure::addPropertyTransitionToExistingStructure(structure, ident, 0, 0, offset); if (!transition) return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above. ASSERT(!transition->transitionDidInvolveSpecificValue()); ASSERT(isValidOffset(offset)); return PutByIdStatus( SimpleTransition, structure, transition, structure->prototypeChain(vm, globalObject), offset); }
PutByIdStatus PutByIdStatus::computeForStubInfo( const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) { if (!stubInfo || !stubInfo->seen) return PutByIdStatus(); switch (stubInfo->accessType) { case access_unset: // If the JIT saw it but didn't optimize it, then assume that this takes slow path. return PutByIdStatus(TakesSlowPath); case access_put_by_id_replace: { PropertyOffset offset = stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently(uid); if (isValidOffset(offset)) { return PutByIdVariant::replace( stubInfo->u.putByIdReplace.baseObjectStructure.get(), offset); } return PutByIdStatus(TakesSlowPath); } case access_put_by_id_transition_normal: case access_put_by_id_transition_direct: { ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); PropertyOffset offset = stubInfo->u.putByIdTransition.structure->getConcurrently(uid); if (isValidOffset(offset)) { RefPtr<IntendedStructureChain> chain; if (stubInfo->u.putByIdTransition.chain) { chain = adoptRef(new IntendedStructureChain( profiledBlock, stubInfo->u.putByIdTransition.previousStructure.get(), stubInfo->u.putByIdTransition.chain.get())); } return PutByIdVariant::transition( stubInfo->u.putByIdTransition.previousStructure.get(), stubInfo->u.putByIdTransition.structure.get(), chain.get(), offset); } return PutByIdStatus(TakesSlowPath); } case access_put_by_id_list: { PolymorphicPutByIdList* list = stubInfo->u.putByIdList.list; PutByIdStatus result; result.m_state = Simple; State slowPathState = TakesSlowPath; for (unsigned i = 0; i < list->size(); ++i) { const PutByIdAccess& access = list->at(i); switch (access.type()) { case PutByIdAccess::Setter: case PutByIdAccess::CustomSetter: slowPathState = MakesCalls; break; default: break; } } for (unsigned i = 0; i < list->size(); ++i) { const PutByIdAccess& access = list->at(i); PutByIdVariant variant; switch (access.type()) { case PutByIdAccess::Replace: { Structure* structure = access.structure(); PropertyOffset offset = structure->getConcurrently(uid); if (!isValidOffset(offset)) return PutByIdStatus(slowPathState); variant = PutByIdVariant::replace(structure, offset); break; } case PutByIdAccess::Transition: { PropertyOffset offset = access.newStructure()->getConcurrently(uid); if (!isValidOffset(offset)) return PutByIdStatus(slowPathState); RefPtr<IntendedStructureChain> chain; if (access.chain()) { chain = adoptRef(new IntendedStructureChain( profiledBlock, access.oldStructure(), access.chain())); if (!chain->isStillValid()) continue; } variant = PutByIdVariant::transition( access.oldStructure(), access.newStructure(), chain.get(), offset); break; } case PutByIdAccess::Setter: { Structure* structure = access.structure(); ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( profiledBlock, structure, access.chain(), access.chainCount(), uid); switch (complexGetStatus.kind()) { case ComplexGetStatus::ShouldSkip: continue; case ComplexGetStatus::TakesSlowPath: return PutByIdStatus(slowPathState); case ComplexGetStatus::Inlineable: { AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>( access.stubRoutine()); std::unique_ptr<CallLinkStatus> callLinkStatus = std::make_unique<CallLinkStatus>( CallLinkStatus::computeFor( locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData)); variant = PutByIdVariant::setter( structure, complexGetStatus.offset(), complexGetStatus.chain(), std::move(callLinkStatus)); } } break; } case PutByIdAccess::CustomSetter: return PutByIdStatus(MakesCalls); default: return PutByIdStatus(slowPathState); } if (!result.appendVariant(variant)) return PutByIdStatus(slowPathState); } return result; } default: return PutByIdStatus(TakesSlowPath); } }
GetByIdStatus GetByIdStatus::computeForStubInfo( const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) { if (!stubInfo || !stubInfo->seen) return GetByIdStatus(NoInformation); PolymorphicGetByIdList* list = 0; State slowPathState = TakesSlowPath; if (stubInfo->accessType == access_get_by_id_list) { list = stubInfo->u.getByIdList.list; for (unsigned i = 0; i < list->size(); ++i) { const GetByIdAccess& access = list->at(i); if (access.doesCalls()) slowPathState = MakesCalls; } } // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->accessType) { case access_unset: return GetByIdStatus(NoInformation); case access_get_by_id_self: { Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(slowPathState, true); unsigned attributesIgnored; GetByIdVariant variant; variant.m_offset = structure->getConcurrently(uid, attributesIgnored); if (!isValidOffset(variant.m_offset)) return GetByIdStatus(slowPathState, true); variant.m_structureSet.add(structure); bool didAppend = result.appendVariant(variant); ASSERT_UNUSED(didAppend, didAppend); return result; } case access_get_by_id_list: { for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { Structure* structure = list->at(listIndex).structure(); ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( profiledBlock, structure, list->at(listIndex).chain(), list->at(listIndex).chainCount(), uid); switch (complexGetStatus.kind()) { case ComplexGetStatus::ShouldSkip: continue; case ComplexGetStatus::TakesSlowPath: return GetByIdStatus(slowPathState, true); case ComplexGetStatus::Inlineable: { std::unique_ptr<CallLinkStatus> callLinkStatus; switch (list->at(listIndex).type()) { case GetByIdAccess::SimpleInline: case GetByIdAccess::SimpleStub: { break; } case GetByIdAccess::Getter: { AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>( list->at(listIndex).stubRoutine()); callLinkStatus = std::make_unique<CallLinkStatus>( CallLinkStatus::computeFor( locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData)); break; } case GetByIdAccess::CustomGetter: case GetByIdAccess::WatchedStub:{ // FIXME: It would be totally sweet to support this at some point in the future. // https://bugs.webkit.org/show_bug.cgi?id=133052 return GetByIdStatus(slowPathState, true); } default: RELEASE_ASSERT_NOT_REACHED(); } GetByIdVariant variant( StructureSet(structure), complexGetStatus.offset(), complexGetStatus.chain(), std::move(callLinkStatus)); if (!result.appendVariant(variant)) return GetByIdStatus(slowPathState, true); break; } } } return result; } default: return GetByIdStatus(slowPathState, true); } RELEASE_ASSERT_NOT_REACHED(); return GetByIdStatus(); }
PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, StringImpl* uid, bool isDirect) { if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex) return PutByIdStatus(TakesSlowPath); if (set.isEmpty()) return PutByIdStatus(); PutByIdStatus result; result.m_state = Simple; for (unsigned i = 0; i < set.size(); ++i) { Structure* structure = set[i]; if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) return PutByIdStatus(TakesSlowPath); if (!structure->propertyAccessesAreCacheable()) return PutByIdStatus(TakesSlowPath); unsigned attributes; PropertyOffset offset = structure->getConcurrently(uid, attributes); if (isValidOffset(offset)) { if (attributes & CustomAccessor) return PutByIdStatus(MakesCalls); if (attributes & (Accessor | ReadOnly)) return PutByIdStatus(TakesSlowPath); WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset); if (!replaceSet || replaceSet->isStillValid()) { // When this executes, it'll create, and fire, this replacement watchpoint set. // That means that this has probably never executed or that something fishy is // going on. Also, we cannot create or fire the watchpoint set from the concurrent // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy. // So, better leave this alone and take slow path. return PutByIdStatus(TakesSlowPath); } if (!result.appendVariant(PutByIdVariant::replace(structure, offset))) return PutByIdStatus(TakesSlowPath); continue; } // Our hypothesis is that we're doing a transition. Before we prove that this is really // true, we want to do some sanity checks. // Don't cache put transitions on dictionaries. if (structure->isDictionary()) return PutByIdStatus(TakesSlowPath); // If the structure corresponds to something that isn't an object, then give up, since // we don't want to be adding properties to strings. if (structure->typeInfo().type() == StringType) return PutByIdStatus(TakesSlowPath); RefPtr<IntendedStructureChain> chain; if (!isDirect) { chain = adoptRef(new IntendedStructureChain(globalObject, structure)); // If the prototype chain has setters or read-only properties, then give up. if (chain->mayInterceptStoreTo(uid)) return PutByIdStatus(TakesSlowPath); // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries) // then give up. The dictionary case would only happen if this structure has not been // used in an optimized put_by_id transition. And really the only reason why we would // bail here is that I don't really feel like having the optimizing JIT go and flatten // dictionaries if we have evidence to suggest that those objects were never used as // prototypes in a cacheable prototype access - i.e. there's a good chance that some of // the other checks below will fail. if (structure->isProxy() || !chain->isNormalized()) return PutByIdStatus(TakesSlowPath); } // We only optimize if there is already a structure that the transition is cached to. Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset); if (!transition) return PutByIdStatus(TakesSlowPath); ASSERT(isValidOffset(offset)); bool didAppend = result.appendVariant( PutByIdVariant::transition(structure, transition, chain.get(), offset)); if (!didAppend) return PutByIdStatus(TakesSlowPath); } return result; }
GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) { if (!stubInfo || !stubInfo->everConsidered) return GetByIdStatus(NoInformation); PolymorphicAccess* list = 0; State slowPathState = TakesSlowPath; if (stubInfo->cacheType == CacheType::Stub) { list = stubInfo->u.stub; for (unsigned i = 0; i < list->size(); ++i) { const AccessCase& access = list->at(i); if (access.doesCalls()) slowPathState = MakesCalls; } } if (stubInfo->tookSlowPath) return GetByIdStatus(slowPathState); // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->cacheType) { case CacheType::Unset: return GetByIdStatus(NoInformation); case CacheType::GetByIdSelf: { Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(slowPathState, true); unsigned attributesIgnored; GetByIdVariant variant; variant.m_offset = structure->getConcurrently(uid, attributesIgnored); if (!isValidOffset(variant.m_offset)) return GetByIdStatus(slowPathState, true); variant.m_structureSet.add(structure); bool didAppend = result.appendVariant(variant); ASSERT_UNUSED(didAppend, didAppend); return result; } case CacheType::Stub: { for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { const AccessCase& access = list->at(listIndex); if (access.viaProxy()) return GetByIdStatus(slowPathState, true); Structure* structure = access.structure(); if (!structure) { // The null structure cases arise due to array.length and string.length. We have no way // of creating a GetByIdVariant for those, and we don't really have to since the DFG // handles those cases in FixupPhase using value profiling. That's a bit awkward - we // shouldn't have to use value profiling to discover something that the AccessCase // could have told us. But, it works well enough. So, our only concern here is to not // crash on null structure. return GetByIdStatus(slowPathState, true); } ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( structure, access.conditionSet(), uid); switch (complexGetStatus.kind()) { case ComplexGetStatus::ShouldSkip: continue; case ComplexGetStatus::TakesSlowPath: return GetByIdStatus(slowPathState, true); case ComplexGetStatus::Inlineable: { std::unique_ptr<CallLinkStatus> callLinkStatus; JSFunction* intrinsicFunction = nullptr; switch (access.type()) { case AccessCase::Load: { break; } case AccessCase::IntrinsicGetter: { intrinsicFunction = access.intrinsicFunction(); break; } case AccessCase::Getter: { CallLinkInfo* callLinkInfo = access.callLinkInfo(); ASSERT(callLinkInfo); callLinkStatus = std::make_unique<CallLinkStatus>( CallLinkStatus::computeFor( locker, profiledBlock, *callLinkInfo, callExitSiteData)); break; } default: { // FIXME: It would be totally sweet to support more of these at some point in the // future. https://bugs.webkit.org/show_bug.cgi?id=133052 return GetByIdStatus(slowPathState, true); } } GetByIdVariant variant( StructureSet(structure), complexGetStatus.offset(), complexGetStatus.conditionSet(), WTFMove(callLinkStatus), intrinsicFunction); if (!result.appendVariant(variant)) return GetByIdStatus(slowPathState, true); break; } } } return result; } default: return GetByIdStatus(slowPathState, true); } RELEASE_ASSERT_NOT_REACHED(); return GetByIdStatus(); }
bool JSFunction::getOwnPropertySlot(JSObject* object, ExecState* exec, PropertyName propertyName, PropertySlot& slot) { JSFunction* thisObject = jsCast<JSFunction*>(object); if (thisObject->isHostOrBuiltinFunction()) return Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); if (propertyName == exec->propertyNames().prototype) { VM& vm = exec->vm(); unsigned attributes; PropertyOffset offset = thisObject->getDirectOffset(vm, propertyName, attributes); if (!isValidOffset(offset)) { JSObject* prototype = constructEmptyObject(exec); prototype->putDirect(vm, exec->propertyNames().constructor, thisObject, DontEnum); thisObject->putDirect(vm, exec->propertyNames().prototype, prototype, DontDelete | DontEnum); offset = thisObject->getDirectOffset(vm, exec->propertyNames().prototype, attributes); ASSERT(isValidOffset(offset)); } slot.setValue(thisObject, attributes, thisObject->getDirect(offset), offset); } if (propertyName == exec->propertyNames().arguments) { if (thisObject->jsExecutable()->isStrictMode()) { bool result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); if (!result) { thisObject->putDirectAccessor(exec, propertyName, thisObject->globalObject()->throwTypeErrorGetterSetter(exec->vm()), DontDelete | DontEnum | Accessor); result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); ASSERT(result); } return result; } slot.setCacheableCustom(thisObject, ReadOnly | DontEnum | DontDelete, argumentsGetter); return true; } if (propertyName == exec->propertyNames().length) { slot.setCacheableCustom(thisObject, ReadOnly | DontEnum | DontDelete, lengthGetter); return true; } if (propertyName == exec->propertyNames().name) { slot.setCacheableCustom(thisObject, ReadOnly | DontEnum | DontDelete, nameGetter); return true; } if (propertyName == exec->propertyNames().caller) { if (thisObject->jsExecutable()->isStrictMode()) { bool result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); if (!result) { thisObject->putDirectAccessor(exec, propertyName, thisObject->globalObject()->throwTypeErrorGetterSetter(exec->vm()), DontDelete | DontEnum | Accessor); result = Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); ASSERT(result); } return result; } slot.setCacheableCustom(thisObject, ReadOnly | DontEnum | DontDelete, callerGetter); return true; } return Base::getOwnPropertySlot(thisObject, exec, propertyName, slot); }
GetByIdStatus GetByIdStatus::computeForStubInfo( const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid) { if (!stubInfo || !stubInfo->seen) return GetByIdStatus(NoInformation); if (stubInfo->resetByGC) return GetByIdStatus(TakesSlowPath, true); PolymorphicGetByIdList* list = 0; if (stubInfo->accessType == access_get_by_id_list) { list = stubInfo->u.getByIdList.list; for (unsigned i = 0; i < list->size(); ++i) { if (list->at(i).doesCalls()) return GetByIdStatus(MakesCalls, true); } } // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->accessType) { case access_unset: return GetByIdStatus(NoInformation); case access_get_by_id_self: { Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); unsigned attributesIgnored; JSCell* specificValue; GetByIdVariant variant; variant.m_offset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (!isValidOffset(variant.m_offset)) return GetByIdStatus(TakesSlowPath, true); if (structure->isDictionary()) specificValue = 0; variant.m_structureSet.add(structure); variant.m_specificValue = JSValue(specificValue); result.appendVariant(variant); return result; } case access_get_by_id_list: { for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { ASSERT(!list->at(listIndex).doesCalls()); Structure* structure = list->at(listIndex).structure(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); if (list->at(listIndex).chain()) { RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain( profiledBlock, structure, list->at(listIndex).chain(), list->at(listIndex).chainCount())); if (!result.computeForChain(profiledBlock, uid, chain)) return GetByIdStatus(TakesSlowPath, true); continue; } unsigned attributesIgnored; JSCell* specificValue; PropertyOffset myOffset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(myOffset)) return GetByIdStatus(TakesSlowPath, true); bool found = false; for (unsigned variantIndex = 0; variantIndex < result.m_variants.size(); ++variantIndex) { GetByIdVariant& variant = result.m_variants[variantIndex]; if (variant.m_chain) continue; if (variant.m_offset != myOffset) continue; found = true; if (variant.m_structureSet.contains(structure)) break; if (variant.m_specificValue != JSValue(specificValue)) variant.m_specificValue = JSValue(); variant.m_structureSet.add(structure); break; } if (found) continue; if (!result.appendVariant(GetByIdVariant(StructureSet(structure), myOffset, specificValue))) return GetByIdStatus(TakesSlowPath, true); } return result; } case access_get_by_id_chain: { if (!stubInfo->u.getByIdChain.isDirect) return GetByIdStatus(MakesCalls, true); RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain( profiledBlock, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), stubInfo->u.getByIdChain.count)); if (result.computeForChain(profiledBlock, uid, chain)) return result; return GetByIdStatus(TakesSlowPath, true); } default: return GetByIdStatus(TakesSlowPath, true); } RELEASE_ASSERT_NOT_REACHED(); return GetByIdStatus(); }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(ident); #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) if (!profiledBlock->numberOfStructureStubInfos()) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); // First check if it makes either calls, in which case we want to be super careful, or // if it's not set at all, in which case we punt. StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); if (!stubInfo.seen) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (stubInfo.resetByGC) return GetByIdStatus(TakesSlowPath, true); PolymorphicAccessStructureList* list; int listSize; switch (stubInfo.accessType) { case access_get_by_id_self_list: list = stubInfo.u.getByIdSelfList.structureList; listSize = stubInfo.u.getByIdSelfList.listSize; break; case access_get_by_id_proto_list: list = stubInfo.u.getByIdProtoList.structureList; listSize = stubInfo.u.getByIdProtoList.listSize; break; default: list = 0; listSize = 0; break; } for (int i = 0; i < listSize; ++i) { if (!list->list[i].isDirect) return GetByIdStatus(MakesCalls, true); } // Next check if it takes slow case, in which case we want to be kind of careful. if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) return GetByIdStatus(TakesSlowPath, true); // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo.accessType) { case access_unset: return computeFromLLInt(profiledBlock, bytecodeIndex, ident); case access_get_by_id_self: { Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get(); unsigned attributesIgnored; JSCell* specificValue; result.m_offset = structure->get( *profiledBlock->vm(), ident, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (isValidOffset(result.m_offset)) { result.m_structureSet.add(structure); result.m_specificValue = JSValue(specificValue); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_self_list: { for (int i = 0; i < listSize; ++i) { ASSERT(list->list[i].isDirect); Structure* structure = list->list[i].base.get(); if (result.m_structureSet.contains(structure)) continue; unsigned attributesIgnored; JSCell* specificValue; PropertyOffset myOffset = structure->get( *profiledBlock->vm(), ident, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(myOffset)) { result.m_offset = invalidOffset; break; } if (!i) { result.m_offset = myOffset; result.m_specificValue = JSValue(specificValue); } else if (result.m_offset != myOffset) { result.m_offset = invalidOffset; break; } else if (result.m_specificValue != JSValue(specificValue)) result.m_specificValue = JSValue(); result.m_structureSet.add(structure); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_proto: { if (!stubInfo.u.getByIdProto.isDirect) return GetByIdStatus(MakesCalls, true); result.m_chain.append(stubInfo.u.getByIdProto.prototypeStructure.get()); computeForChain( result, profiledBlock, ident, stubInfo.u.getByIdProto.baseObjectStructure.get()); break; } case access_get_by_id_chain: { if (!stubInfo.u.getByIdChain.isDirect) return GetByIdStatus(MakesCalls, true); for (unsigned i = 0; i < stubInfo.u.getByIdChain.count; ++i) result.m_chain.append(stubInfo.u.getByIdChain.chain->head()[i].get()); computeForChain( result, profiledBlock, ident, stubInfo.u.getByIdChain.baseObjectStructure.get()); break; } default: ASSERT(!isValidOffset(result.m_offset)); break; } if (!isValidOffset(result.m_offset)) { result.m_state = TakesSlowPath; result.m_structureSet.clear(); result.m_chain.clear(); result.m_specificValue = JSValue(); } else result.m_state = Simple; return result; #else // ENABLE(JIT) return GetByIdStatus(NoInformation, false); #endif // ENABLE(JIT) }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid) { ConcurrentJITLocker locker(profiledBlock->m_lock); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); #if ENABLE(JIT) StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex)); if (!stubInfo || !stubInfo->seen) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); if (stubInfo->resetByGC) return GetByIdStatus(TakesSlowPath, true); PolymorphicAccessStructureList* list; int listSize; switch (stubInfo->accessType) { case access_get_by_id_self_list: list = stubInfo->u.getByIdSelfList.structureList; listSize = stubInfo->u.getByIdSelfList.listSize; break; case access_get_by_id_proto_list: list = stubInfo->u.getByIdProtoList.structureList; listSize = stubInfo->u.getByIdProtoList.listSize; break; default: list = 0; listSize = 0; break; } for (int i = 0; i < listSize; ++i) { if (!list->list[i].isDirect) return GetByIdStatus(MakesCalls, true); } // Next check if it takes slow case, in which case we want to be kind of careful. if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) return GetByIdStatus(TakesSlowPath, true); // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->accessType) { case access_unset: return computeFromLLInt(profiledBlock, bytecodeIndex, uid); case access_get_by_id_self: { Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); unsigned attributesIgnored; JSCell* specificValue; result.m_offset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (isValidOffset(result.m_offset)) { result.m_structureSet.add(structure); result.m_specificValue = JSValue(specificValue); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_self_list: { for (int i = 0; i < listSize; ++i) { ASSERT(list->list[i].isDirect); Structure* structure = list->list[i].base.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); if (result.m_structureSet.contains(structure)) continue; unsigned attributesIgnored; JSCell* specificValue; PropertyOffset myOffset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(myOffset)) { result.m_offset = invalidOffset; break; } if (!i) { result.m_offset = myOffset; result.m_specificValue = JSValue(specificValue); } else if (result.m_offset != myOffset) { result.m_offset = invalidOffset; break; } else if (result.m_specificValue != JSValue(specificValue)) result.m_specificValue = JSValue(); result.m_structureSet.add(structure); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_proto: { if (!stubInfo->u.getByIdProto.isDirect) return GetByIdStatus(MakesCalls, true); result.m_chain = adoptRef(new IntendedStructureChain( profiledBlock, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get())); computeForChain(result, profiledBlock, uid); break; } case access_get_by_id_chain: { if (!stubInfo->u.getByIdChain.isDirect) return GetByIdStatus(MakesCalls, true); result.m_chain = adoptRef(new IntendedStructureChain( profiledBlock, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), stubInfo->u.getByIdChain.count)); computeForChain(result, profiledBlock, uid); break; } default: ASSERT(!isValidOffset(result.m_offset)); break; } if (!isValidOffset(result.m_offset)) { result.m_state = TakesSlowPath; result.m_structureSet.clear(); result.m_chain.clear(); result.m_specificValue = JSValue(); } else result.m_state = Simple; return result; #else // ENABLE(JIT) UNUSED_PARAM(map); return GetByIdStatus(NoInformation, false); #endif // ENABLE(JIT) }