void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) { JmpSrc failureCase = checkStructure(X86::eax, structure); __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); ASSERT(code); // Use the repatch information to link the failure cases back to the original slow case routine. void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; if (!lastProtoBegin) lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; X86Assembler::link(code, failureCase, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); structure->ref(); polymorphicStructures->list[currentIndex].set(code, structure); // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); }
void checkObject(const SIMPLE_OBJECT *psObject, const char *const location_description, const char *function, const int recurse) { if (recurse < 0) { return; } ASSERT(psObject != nullptr, "NULL pointer"); switch (psObject->type) { case OBJ_DROID: checkDroid((const DROID *)psObject, location_description, function, recurse - 1); break; case OBJ_STRUCTURE: checkStructure((const STRUCTURE *)psObject, location_description, function, recurse - 1); break; case OBJ_PROJECTILE: checkProjectile((const PROJECTILE *)psObject, location_description, function, recurse - 1); break; case OBJ_FEATURE: case OBJ_TARGET: break; default: ASSERT_HELPER(!"invalid object type", location_description, function, "CHECK_OBJECT: Invalid object type (type num %u)", (unsigned int)psObject->type); break; } }
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) { ASSERT(count); Vector<JmpSrc> bucketsOfFail; // Check eax is an object of the right Structure. JmpSrc baseObjectCheck = checkStructure(X86::eax, structure); bucketsOfFail.append(baseObjectCheck); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); JSObject* protoObject = 0; for (unsigned i = 0; i < count; ++i) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = chainEntries[i].get(); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); bucketsOfFail.append(__ jne()); } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; for (unsigned i = 0; i < bucketsOfFail.size(); ++i) X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); // Track the stub we have created so that it will be deleted later. structure->ref(); chain->ref(); prototypeStructures->list[currentIndex].set(code, structure, chain); // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); }
void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) { // Check eax is an object of the right Structure. __ testl_i32r(JSImmediate::TagMask, X86::eax); JmpSrc failureCases1 = __ jne(); JmpSrc failureCases2 = checkStructure(X86::eax, structure); // checks out okay! - putDirectOffset __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax); __ ret(); void* code = __ executableCopy(m_codeBlock->executablePool()); X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); stubInfo->stubRoutine = code; ctiRepatchCallByReturnAddress(returnAddress, code); }
void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) { // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); // Check eax is an object of the right Structure. JmpSrc failureCases1 = checkStructure(X86::eax, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); JmpSrc failureCases2 = __ jne(); // Checks out okay! - getDirectOffset __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; X86Assembler::link(code, failureCases1, lastProtoBegin); X86Assembler::link(code, failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); structure->ref(); prototypeStructure->ref(); prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure); // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); }
void fixupNode(Node& node) { if (!node.shouldGenerate()) return; NodeType op = node.op(); #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex); #endif switch (op) { case GetById: { if (!isInt32Speculation(m_graph[m_compileIndex].prediction())) break; if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length) break; bool isArray = isArraySpeculation(m_graph[node.child1()].prediction()); bool isArguments = isArgumentsSpeculation(m_graph[node.child1()].prediction()); bool isString = isStringSpeculation(m_graph[node.child1()].prediction()); bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array(); bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array(); bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array(); bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array(); bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray(); bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array(); bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array(); bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array(); bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array(); if (!isArray && !isArguments && !isString && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array) break; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength"); #endif if (isArray) { node.setOp(GetArrayLength); ASSERT(node.flags() & NodeMustGenerate); node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); ArrayProfile* arrayProfile = m_graph.baselineCodeBlockFor(node.codeOrigin)->getArrayProfile( node.codeOrigin.bytecodeIndex); if (!arrayProfile) break; arrayProfile->computeUpdatedPrediction(); if (!arrayProfile->hasDefiniteStructure()) break; m_graph.ref(node.child1()); Node checkStructure(CheckStructure, node.codeOrigin, OpInfo(m_graph.addStructureSet(arrayProfile->expectedStructure())), node.child1().index()); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); m_insertionSet.append(m_indexInBlock, checkStructureIndex); break; } if (isArguments) node.setOp(GetArgumentsLength); else if (isString) node.setOp(GetStringLength); else if (isInt8Array) node.setOp(GetInt8ArrayLength); else if (isInt16Array) node.setOp(GetInt16ArrayLength); else if (isInt32Array) node.setOp(GetInt32ArrayLength); else if (isUint8Array) node.setOp(GetUint8ArrayLength); else if (isUint8ClampedArray) node.setOp(GetUint8ClampedArrayLength); else if (isUint16Array) node.setOp(GetUint16ArrayLength); else if (isUint32Array) node.setOp(GetUint32ArrayLength); else if (isFloat32Array) node.setOp(GetFloat32ArrayLength); else if (isFloat64Array) node.setOp(GetFloat64ArrayLength); else ASSERT_NOT_REACHED(); // No longer MustGenerate ASSERT(node.flags() & NodeMustGenerate); node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); break; } case GetIndexedPropertyStorage: { if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].shouldSpeculateInteger() || m_graph[node.child1()].shouldSpeculateArguments()) { node.setOpAndDefaultFlags(Nop); m_graph.clearAndDerefChild1(node); m_graph.clearAndDerefChild2(node); m_graph.clearAndDerefChild3(node); node.setRefCount(0); } break; } case GetByVal: case StringCharAt: case StringCharCodeAt: { if (!!node.child3() && m_graph[node.child3()].op() == Nop) node.children.child3() = Edge(); break; } case ValueToInt32: { if (m_graph[node.child1()].shouldSpeculateNumber() && node.mustGenerate()) { node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); } break; } case BitAnd: case BitOr: case BitXor: case BitRShift: case BitLShift: case BitURShift: { fixIntEdge(node.children.child1()); fixIntEdge(node.children.child2()); break; } case CompareEq: case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareStrictEq: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case LogicalNot: { if (m_graph[node.child1()].shouldSpeculateInteger()) break; if (!m_graph[node.child1()].shouldSpeculateNumber()) break; fixDoubleEdge(0); break; } case Branch: { if (!m_graph[node.child1()].shouldSpeculateInteger() && m_graph[node.child1()].shouldSpeculateNumber()) fixDoubleEdge(0); Node& myNode = m_graph[m_compileIndex]; // reload because the graph may have changed Edge logicalNotEdge = myNode.child1(); Node& logicalNot = m_graph[logicalNotEdge]; if (logicalNot.op() == LogicalNot && logicalNot.adjustedRefCount() == 1) { Edge newChildEdge = logicalNot.child1(); if (m_graph[newChildEdge].hasBooleanResult()) { m_graph.ref(newChildEdge); m_graph.deref(logicalNotEdge); myNode.children.setChild1(newChildEdge); BlockIndex toBeTaken = myNode.notTakenBlockIndex(); BlockIndex toBeNotTaken = myNode.takenBlockIndex(); myNode.setTakenBlockIndex(toBeTaken); myNode.setNotTakenBlockIndex(toBeNotTaken); } } break; } case SetLocal: { if (node.variableAccessData()->isCaptured()) break; if (!node.variableAccessData()->shouldUseDoubleFormat()) break; fixDoubleEdge(0); break; } case ArithAdd: case ValueAdd: { if (m_graph.addShouldSpeculateInteger(node)) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithSub: { if (m_graph.addShouldSpeculateInteger(node) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithNegate: { if (m_graph.negateShouldSpeculateInteger(node)) break; fixDoubleEdge(0); break; } case ArithMin: case ArithMax: case ArithMod: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithMul: { if (m_graph.mulShouldSpeculateInteger(node)) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithDiv: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) { if (isX86()) break; fixDoubleEdge(0); fixDoubleEdge(1); Node& oldDivision = m_graph[m_compileIndex]; Node newDivision = oldDivision; newDivision.setRefCount(2); newDivision.predict(SpecDouble); NodeIndex newDivisionIndex = m_graph.size(); oldDivision.setOp(DoubleAsInt32); oldDivision.children.initialize(Edge(newDivisionIndex, DoubleUse), Edge(), Edge()); m_graph.append(newDivision); m_insertionSet.append(m_indexInBlock, newDivisionIndex); break; } fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithAbs: { if (m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()) break; fixDoubleEdge(0); break; } case ArithSqrt: { fixDoubleEdge(0); break; } case PutByVal: case PutByValSafe: { Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); Edge child3 = m_graph.varArgChild(node, 2); if (!m_graph[child1].prediction() || !m_graph[child2].prediction()) break; if (!m_graph[child2].shouldSpeculateInteger()) break; if (isActionableIntMutableArraySpeculation(m_graph[child1].prediction())) { if (m_graph[child3].isConstant()) break; if (m_graph[child3].shouldSpeculateInteger()) break; fixDoubleEdge(2); break; } if (isActionableFloatMutableArraySpeculation(m_graph[child1].prediction())) { fixDoubleEdge(2); break; } break; } default: break; } #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) if (!(node.flags() & NodeHasVarArgs)) { dataLog("new children: "); node.dumpChildren(WTF::dataFile()); } dataLog("\n"); #endif }
bool run() { for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; variable->clearVotes(); } // Identify the set of variables that are always subject to the same structure // checks. For now, only consider monomorphic structure checks (one structure). for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; if (!node.shouldGenerate()) continue; switch (node.op()) { case CheckStructure: { Node& child = m_graph[node.child1()]; if (child.op() != GetLocal) break; VariableAccessData* variable = child.variableAccessData(); variable->vote(VoteStructureCheck); if (variable->isCaptured() || variable->structureCheckHoistingFailed()) break; if (!isCellSpeculation(variable->prediction())) break; noticeStructureCheck(variable, node.structureSet()); break; } case ForwardCheckStructure: case ForwardStructureTransitionWatchpoint: // We currently rely on the fact that we're the only ones who would // insert this node. ASSERT_NOT_REACHED(); break; case GetByOffset: case PutByOffset: case PutStructure: case StructureTransitionWatchpoint: case AllocatePropertyStorage: case ReallocatePropertyStorage: case GetPropertyStorage: case GetByVal: case PutByVal: case PutByValAlias: case GetArrayLength: case CheckArray: case GetIndexedPropertyStorage: case Phantom: // Don't count these uses. break; default: m_graph.vote(node, VoteOther); break; } } } // Disable structure hoisting on variables that appear to mostly be used in // contexts where it doesn't make sense. for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->voteRatio() >= Options::structureCheckVoteRatioForHoisting()) continue; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog("Zeroing the structure to hoist for %s because the ratio is %lf.\n", m_graph.nameOfVariableAccessData(variable), variable->voteRatio()); #endif iter->second.m_structure = 0; } // Identify the set of variables that are live across a structure clobber. Operands<VariableAccessData*> live( m_graph.m_blocks[0]->variablesAtTail.numberOfArguments(), m_graph.m_blocks[0]->variablesAtTail.numberOfLocals()); for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; ASSERT(live.numberOfArguments() == block->variablesAtTail.numberOfArguments()); ASSERT(live.numberOfLocals() == block->variablesAtTail.numberOfLocals()); for (unsigned i = live.size(); i--;) { NodeIndex indexAtTail = block->variablesAtTail[i]; VariableAccessData* variable; if (indexAtTail == NoNode) variable = 0; else variable = m_graph[indexAtTail].variableAccessData(); live[i] = variable; } for (unsigned indexInBlock = block->size(); indexInBlock--;) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; if (!node.shouldGenerate()) continue; switch (node.op()) { case GetLocal: case Flush: // This is a birth. live.operand(node.local()) = node.variableAccessData(); break; case SetLocal: case SetArgument: ASSERT(live.operand(node.local())); // Must be live. ASSERT(live.operand(node.local()) == node.variableAccessData()); // Must have the variable we expected. // This is a death. live.operand(node.local()) = 0; break; // Use the CFA's notion of what clobbers the world. case ValueAdd: if (m_graph.addShouldSpeculateInteger(node)) break; if (Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; clobber(live); break; case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareEq: { Node& left = m_graph[node.child1()]; Node& right = m_graph[node.child2()]; if (Node::shouldSpeculateInteger(left, right)) break; if (Node::shouldSpeculateNumber(left, right)) break; if (node.op() == CompareEq) { if ((m_graph.isConstant(node.child1().index()) && m_graph.valueOfJSConstant(node.child1().index()).isNull()) || (m_graph.isConstant(node.child2().index()) && m_graph.valueOfJSConstant(node.child2().index()).isNull())) break; if (Node::shouldSpeculateFinalObject(left, right)) break; if (Node::shouldSpeculateArray(left, right)) break; if (left.shouldSpeculateFinalObject() && right.shouldSpeculateFinalObjectOrOther()) break; if (right.shouldSpeculateFinalObject() && left.shouldSpeculateFinalObjectOrOther()) break; if (left.shouldSpeculateArray() && right.shouldSpeculateArrayOrOther()) break; if (right.shouldSpeculateArray() && left.shouldSpeculateArrayOrOther()) break; } clobber(live); break; } case GetByVal: case PutByVal: case PutByValAlias: if (m_graph.byValIsPure(node)) break; clobber(live); break; case GetMyArgumentsLengthSafe: case GetMyArgumentByValSafe: case GetById: case GetByIdFlush: case PutStructure: case PhantomPutStructure: case PutById: case PutByIdDirect: case Call: case Construct: case Resolve: case ResolveBase: case ResolveBaseStrictPut: case ResolveGlobal: clobber(live); break; default: ASSERT(node.op() != Phi); break; } } } bool changed = false; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) for (HashMap<VariableAccessData*, CheckData>::iterator it = m_map.begin(); it != m_map.end(); ++it) { if (!it->second.m_structure) { dataLog("Not hoisting checks for %s because of heuristics.\n", m_graph.nameOfVariableAccessData(it->first)); continue; } if (it->second.m_isClobbered && !it->second.m_structure->transitionWatchpointSetIsStillValid()) { dataLog("Not hoisting checks for %s because the structure is clobbered and has an invalid watchpoint set.\n", m_graph.nameOfVariableAccessData(it->first)); continue; } dataLog("Hoisting checks for %s\n", m_graph.nameOfVariableAccessData(it->first)); } #endif // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) // Make changes: // 1) If a variable's live range does not span a clobber, then inject structure // checks before the SetLocal. // 2) If a variable's live range spans a clobber but is watchpointable, then // inject structure checks before the SetLocal and replace all other structure // checks on that variable with structure transition watchpoints. InsertionSet<NodeIndex> insertionSet; for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { NodeIndex nodeIndex = block->at(indexInBlock); Node& node = m_graph[nodeIndex]; // Be careful not to use 'node' after appending to the graph. In those switch // cases where we need to append, we first carefully extract everything we need // from the node, before doing any appending. if (!node.shouldGenerate()) continue; switch (node.op()) { case SetArgument: { ASSERT(!blockIndex); // Insert a GetLocal and a CheckStructure immediately following this // SetArgument, if the variable was a candidate for structure hoisting. // If the basic block previously only had the SetArgument as its // variable-at-tail, then replace it with this GetLocal. VariableAccessData* variable = node.variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; node.ref(); CodeOrigin codeOrigin = node.codeOrigin; Node getLocal(GetLocal, codeOrigin, OpInfo(variable), nodeIndex); getLocal.predict(variable->prediction()); getLocal.ref(); NodeIndex getLocalIndex = m_graph.size(); m_graph.append(getLocal); insertionSet.append(indexInBlock + 1, getLocalIndex); Node checkStructure(CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), getLocalIndex); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); insertionSet.append(indexInBlock + 1, checkStructureIndex); if (block->variablesAtTail.operand(variable->local()) == nodeIndex) block->variablesAtTail.operand(variable->local()) = getLocalIndex; m_graph.substituteGetLocal(*block, indexInBlock, variable, getLocalIndex); changed = true; break; } case SetLocal: { VariableAccessData* variable = node.variableAccessData(); HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; // First insert a dead SetLocal to tell OSR that the child's value should // be dropped into this bytecode variable if the CheckStructure decides // to exit. CodeOrigin codeOrigin = node.codeOrigin; NodeIndex child1 = node.child1().index(); Node setLocal(SetLocal, codeOrigin, OpInfo(variable), child1); NodeIndex setLocalIndex = m_graph.size(); m_graph.append(setLocal); insertionSet.append(indexInBlock, setLocalIndex); m_graph[child1].ref(); // Use a ForwardCheckStructure to indicate that we should exit to the // next bytecode instruction rather than reexecuting the current one. Node checkStructure(ForwardCheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), child1); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); insertionSet.append(indexInBlock, checkStructureIndex); changed = true; break; } case CheckStructure: { Node& child = m_graph[node.child1()]; if (child.op() != GetLocal) break; HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(child.variableAccessData()); if (iter == m_map.end()) break; if (!iter->second.m_structure) break; if (!iter->second.m_isClobbered) { node.setOpAndDefaultFlags(Phantom); ASSERT(node.refCount() == 1); break; } if (!iter->second.m_structure->transitionWatchpointSetIsStillValid()) break; ASSERT(iter->second.m_structure == node.structureSet().singletonStructure()); node.convertToStructureTransitionWatchpoint(); changed = true; break; } default: break; } } insertionSet.execute(*block); } return changed; }
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); ASSERT(count); Vector<JmpSrc> bucketsOfFail; // Check eax is an object of the right Structure. JmpSrc baseObjectCheck = checkStructure(X86::eax, structure); bucketsOfFail.append(baseObjectCheck); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); JSObject* protoObject = 0; for (unsigned i = 0; i < count; ++i) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = chainEntries[i].get(); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); bucketsOfFail.append(__ jne()); } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; for (unsigned i = 0; i < bucketsOfFail.size(); ++i) X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); // Track the stub we have created so that it will be deleted later. stubInfo->stubRoutine = code; // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); #else ASSERT(count); Vector<JmpSrc> bucketsOfFail; // Check eax is an object of the right Structure. __ testl_i32r(JSImmediate::TagMask, X86::eax); bucketsOfFail.append(__ jne()); bucketsOfFail.append(checkStructure(X86::eax, structure)); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); JSObject* protoObject = 0; for (unsigned i = 0; i < count; ++i) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = chainEntries[i].get(); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); bucketsOfFail.append(__ jne()); } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); __ ret(); void* code = __ executableCopy(m_codeBlock->executablePool()); for (unsigned i = 0; i < bucketsOfFail.size(); ++i) X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); stubInfo->stubRoutine = code; ctiRepatchCallByReturnAddress(returnAddress, code); #endif }
void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); // Check eax is an object of the right Structure. JmpSrc failureCases1 = checkStructure(X86::eax, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); JmpSrc failureCases2 = __ jne(); // Checks out okay! - getDirectOffset __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; X86Assembler::link(code, failureCases1, slowCaseBegin); X86Assembler::link(code, failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); // Track the stub we have created so that it will be deleted later. stubInfo->stubRoutine = code; // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); #else // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); // Check eax is an object of the right Structure. __ testl_i32r(JSImmediate::TagMask, X86::eax); JmpSrc failureCases1 = __ jne(); JmpSrc failureCases2 = checkStructure(X86::eax, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); JmpSrc failureCases3 = __ jne(); // Checks out okay! - getDirectOffset __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); __ ret(); void* code = __ executableCopy(m_codeBlock->executablePool()); X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); stubInfo->stubRoutine = code; ctiRepatchCallByReturnAddress(returnAddress, code); #endif }