void Graph::predictArgumentTypes(CodeBlock* codeBlock) { ASSERT(codeBlock); ASSERT(codeBlock->alternative()); CodeBlock* profiledCodeBlock = codeBlock->alternative(); ASSERT(codeBlock->numParameters() >= 1); for (size_t arg = 0; arg < static_cast<size_t>(codeBlock->numParameters()); ++arg) { ValueProfile* profile = profiledCodeBlock->valueProfileForArgument(arg); if (!profile) continue; at(m_arguments[arg]).variableAccessData()->predict(profile->computeUpdatedPrediction()); #if DFG_ENABLE(DEBUG_VERBOSE) printf("Argument [%zu] prediction: %s\n", arg, predictionToString(at(m_arguments[arg]).variableAccessData()->prediction())); #endif } }
void Graph::dump(NodeIndex nodeIndex, CodeBlock* codeBlock) { Node& node = at(nodeIndex); NodeType op = node.op; unsigned refCount = node.refCount(); bool skipped = !refCount; bool mustGenerate = node.mustGenerate(); if (mustGenerate) { ASSERT(refCount); --refCount; } dumpCodeOrigin(nodeIndex); printWhiteSpace((node.codeOrigin.inlineDepth() - 1) * 2); // Example/explanation of dataflow dump output // // 14: <!2:7> GetByVal(@3, @13) // ^1 ^2 ^3 ^4 ^5 // // (1) The nodeIndex of this operation. // (2) The reference count. The number printed is the 'real' count, // not including the 'mustGenerate' ref. If the node is // 'mustGenerate' then the count it prefixed with '!'. // (3) The virtual register slot assigned to this node. // (4) The name of the operation. // (5) The arguments to the operation. The may be of the form: // @# - a NodeIndex referencing a prior node in the graph. // arg# - an argument number. // $# - the index in the CodeBlock of a constant { for numeric constants the value is displayed | for integers, in both decimal and hex }. // id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }. // var# - the index of a var on the global object, used by GetGlobalVar/PutGlobalVar operations. printf("% 4d:%s<%c%u:", (int)nodeIndex, skipped ? " skipped " : " ", mustGenerate ? '!' : ' ', refCount); if (node.hasResult() && !skipped && node.hasVirtualRegister()) printf("%u", node.virtualRegister()); else printf("-"); printf(">\t%s(", opName(op)); bool hasPrinted = false; if (op & NodeHasVarArgs) { for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++) { if (hasPrinted) printf(", "); else hasPrinted = true; printf("@%u", m_varArgChildren[childIdx].index()); } } else { if (!!node.child1()) printf("@%u", node.child1().index()); if (!!node.child2()) printf(", @%u", node.child2().index()); if (!!node.child3()) printf(", @%u", node.child3().index()); hasPrinted = !!node.child1(); } if (node.hasArithNodeFlags()) { printf("%s%s", hasPrinted ? ", " : "", arithNodeFlagsAsString(node.rawArithNodeFlags())); hasPrinted = true; } if (node.hasVarNumber()) { printf("%svar%u", hasPrinted ? ", " : "", node.varNumber()); hasPrinted = true; } if (node.hasIdentifier()) { if (codeBlock) printf("%sid%u{%s}", hasPrinted ? ", " : "", node.identifierNumber(), codeBlock->identifier(node.identifierNumber()).ustring().utf8().data()); else printf("%sid%u", hasPrinted ? ", " : "", node.identifierNumber()); hasPrinted = true; } if (node.hasStructureSet()) { for (size_t i = 0; i < node.structureSet().size(); ++i) { printf("%sstruct(%p)", hasPrinted ? ", " : "", node.structureSet()[i]); hasPrinted = true; } } if (node.hasStructureTransitionData()) { printf("%sstruct(%p -> %p)", hasPrinted ? ", " : "", node.structureTransitionData().previousStructure, node.structureTransitionData().newStructure); hasPrinted = true; } if (node.hasStorageAccessData()) { StorageAccessData& storageAccessData = m_storageAccessData[node.storageAccessDataIndex()]; if (codeBlock) printf("%sid%u{%s}", hasPrinted ? ", " : "", storageAccessData.identifierNumber, codeBlock->identifier(storageAccessData.identifierNumber).ustring().utf8().data()); else printf("%sid%u", hasPrinted ? ", " : "", storageAccessData.identifierNumber); printf(", %lu", static_cast<unsigned long>(storageAccessData.offset)); hasPrinted = true; } ASSERT(node.hasVariableAccessData() == node.hasLocal()); if (node.hasVariableAccessData()) { VariableAccessData* variableAccessData = node.variableAccessData(); int operand = variableAccessData->operand(); if (operandIsArgument(operand)) printf("%sarg%u(%s)", hasPrinted ? ", " : "", operandToArgument(operand), nameOfVariableAccessData(variableAccessData)); else printf("%sr%u(%s)", hasPrinted ? ", " : "", operand, nameOfVariableAccessData(variableAccessData)); hasPrinted = true; } if (node.hasConstantBuffer() && codeBlock) { if (hasPrinted) printf(", "); printf("%u:[", node.startConstant()); for (unsigned i = 0; i < node.numConstants(); ++i) { if (i) printf(", "); printf("%s", codeBlock->constantBuffer(node.startConstant())[i].description()); } printf("]"); hasPrinted = true; } if (op == JSConstant) { printf("%s$%u", hasPrinted ? ", " : "", node.constantNumber()); if (codeBlock) { JSValue value = valueOfJSConstant(codeBlock, nodeIndex); printf(" = %s", value.description()); } hasPrinted = true; } if (op == WeakJSConstant) { printf("%s%p", hasPrinted ? ", " : "", node.weakConstant()); hasPrinted = true; } if (node.isBranch() || node.isJump()) { printf("%sT:#%u", hasPrinted ? ", " : "", node.takenBlockIndex()); hasPrinted = true; } if (node.isBranch()) { printf("%sF:#%u", hasPrinted ? ", " : "", node.notTakenBlockIndex()); hasPrinted = true; } (void)hasPrinted; printf(")"); if (!skipped) { if (node.hasVariableAccessData()) printf(" predicting %s, double ratio %lf%s", predictionToString(node.variableAccessData()->prediction()), node.variableAccessData()->doubleVoteRatio(), node.variableAccessData()->shouldUseDoubleFormat() ? ", forcing double" : ""); else if (node.hasHeapPrediction()) printf(" predicting %s", predictionToString(node.getHeapPrediction())); else if (node.hasVarNumber()) printf(" predicting %s", predictionToString(getGlobalVarPrediction(node.varNumber()))); } printf("\n"); }
void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex) { #if ENABLE(DFG_OSR_ENTRY) ASSERT(codeBlock->getJITType() == JITCode::DFGJIT); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); #if ENABLE(JIT_VERBOSE_OSR) printf("OSR in %p(%p) from bc#%u\n", codeBlock, codeBlock->alternative(), bytecodeIndex); #endif JSGlobalData* globalData = &exec->globalData(); CodeBlock* baselineCodeBlock = codeBlock->alternative(); // The code below checks if it is safe to perform OSR entry. It may find // that it is unsafe to do so, for any number of reasons, which are documented // below. If the code decides not to OSR then it returns 0, and it's the caller's // responsibility to patch up the state in such a way as to ensure that it's // both safe and efficient to continue executing baseline code for now. This // should almost certainly include calling either codeBlock->optimizeAfterWarmUp() // or codeBlock->dontOptimizeAnytimeSoon(). // 1) Check if the DFG code set a code map. If it didn't, it means that it // cannot handle OSR entry. This currently only happens if we disable // dynamic speculation termination and end up with a DFG code block that // was compiled entirely with the non-speculative JIT. The non-speculative // JIT does not support OSR entry and probably never will, since it is // kind of a deprecated compiler right now. #if ENABLE(DYNAMIC_TERMINATE_SPECULATION) ASSERT(codeBlock->jitCodeMap()); #else if (!codeBlock->jitCodeMap()) { #if ENABLE(JIT_VERBOSE_OSR) printf(" OSR failed because of a missing JIT code map.\n"); #endif return 0; } #endif // 2) Verify predictions. If the predictions are inconsistent with the actual // values, then OSR entry is not possible at this time. It's tempting to // assume that we could somehow avoid this case. We can certainly avoid it // for first-time loop OSR - that is, OSR into a CodeBlock that we have just // compiled. Then we are almost guaranteed that all of the predictions will // check out. It would be pretty easy to make that a hard guarantee. But // then there would still be the case where two call frames with the same // baseline CodeBlock are on the stack at the same time. The top one // triggers compilation and OSR. In that case, we may no longer have // accurate value profiles for the one deeper in the stack. Hence, when we // pop into the CodeBlock that is deeper on the stack, we might OSR and // realize that the predictions are wrong. Probably, in most cases, this is // just an anomaly in the sense that the older CodeBlock simply went off // into a less-likely path. So, the wisest course of action is to simply not // OSR at this time. PredictionTracker* predictions = baselineCodeBlock->predictions(); if (predictions->numberOfArguments() > exec->argumentCountIncludingThis()) return 0; for (unsigned i = 1; i < predictions->numberOfArguments(); ++i) { if (!predictionIsValid(globalData, exec->argument(i - 1), predictions->getArgumentPrediction(i))) { #if ENABLE(JIT_VERBOSE_OSR) printf(" OSR failed because argument %u is %s, expected %s.\n", i, exec->argument(i - 1).description(), predictionToString(predictions->getArgumentPrediction(i))); #endif return 0; } } // FIXME: we need to know if at an OSR entry, a variable is live. If it isn't // then we shouldn't try to verify its prediction. for (unsigned i = 0; i < predictions->numberOfVariables(); ++i) { if (!predictionIsValid(globalData, exec->registers()[i].jsValue(), predictions->getPrediction(i))) { #if ENABLE(JIT_VERBOSE_OSR) printf(" OSR failed because variable %u is %s, expected %s.\n", i, exec->registers()[i].jsValue().description(), predictionToString(predictions->getPrediction(i))); #endif return 0; } } // 3) Check the stack height. The DFG JIT may require a taller stack than the // baseline JIT, in some cases. If we can't grow the stack, then don't do // OSR right now. That's the only option we have unless we want basic block // boundaries to start throwing RangeErrors. Although that would be possible, // it seems silly: you'd be diverting the program to error handling when it // would have otherwise just kept running albeit less quickly. if (!globalData->interpreter->registerFile().grow(&exec->registers()[codeBlock->m_numCalleeRegisters])) { #if ENABLE(JIT_VERBOSE_OSR) printf(" OSR failed because stack growth failed..\n"); #endif return 0; } #if ENABLE(JIT_VERBOSE_OSR) printf(" OSR should succeed.\n"); #endif // 4) Fix the call frame. exec->setCodeBlock(codeBlock); // 5) Find and return the destination machine code address. The DFG stores // the machine code offsets of OSR targets in a CompactJITCodeMap. // Decoding it is not super efficient, but we expect that OSR entry // happens sufficiently rarely, and that OSR entrypoints are sufficiently // few, that this won't hurt throughput. Note that the only real // reason why we use a CompactJITCodeMap is to avoid having to introduce // yet another data structure for mapping between bytecode indices and // machine code offsets. CompactJITCodeMap::Decoder decoder(codeBlock->jitCodeMap()); unsigned machineCodeOffset = std::numeric_limits<unsigned>::max(); while (decoder.numberOfEntriesRemaining()) { unsigned currentBytecodeIndex; unsigned currentMachineCodeOffset; decoder.read(currentBytecodeIndex, currentMachineCodeOffset); if (currentBytecodeIndex == bytecodeIndex) { machineCodeOffset = currentMachineCodeOffset; break; } } ASSERT(machineCodeOffset != std::numeric_limits<unsigned>::max()); void* result = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(codeBlock->getJITCode().start()) + machineCodeOffset); #if ENABLE(JIT_VERBOSE_OSR) printf(" OSR returning machine code address %p.\n", result); #endif return result; #else // ENABLE(DFG_OSR_ENTRY) UNUSED_PARAM(exec); UNUSED_PARAM(codeBlock); UNUSED_PARAM(bytecodeIndex); return 0; #endif }