void Graph::predictArgumentTypes() { ASSERT(m_codeBlock->numParameters() >= 1); for (size_t arg = 0; arg < static_cast<size_t>(m_codeBlock->numParameters()); ++arg) { ValueProfile* profile = m_profiledBlock->valueProfileForArgument(arg); if (!profile) continue; at(m_arguments[arg]).variableAccessData()->predict(profile->computeUpdatedPrediction()); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("Argument [%zu] prediction: %s\n", arg, speculationToString(at(m_arguments[arg]).variableAccessData()->prediction())); #endif } }
void recordType(TypeProfileKey key, DataType dt) { if (!profiles) return; if (!isProfileRequest()) return; assert(dt != KindOfUninit); // Normalize strings to KindOfString. if (dt == KindOfStaticString) dt = KindOfString; TRACE(1, "recordType lookup: %s -> %d\n", key.m_name->data(), dt); ValueProfile *prof = keyToVP(key, KeyToVPMode::Write); if (prof->m_totalSamples != kMaxCounter) { prof->m_totalSamples++; // NB: we can't quite assert that we have fewer than kMaxCounter samples, // because other threads are updating this structure without locks. int dtIndex = getDataTypeIndex(dt); if (prof->m_samples[dtIndex] < kMaxCounter) { prof->m_samples[dtIndex]++; } } ONTRACE(2, prof->dump()); }
bool run() { ASSERT(m_graph.m_form == ThreadedCPS); ASSERT(m_graph.m_unificationState == GloballyUnified); ASSERT(codeBlock()->numParameters() >= 1); for (size_t arg = 0; arg < static_cast<size_t>(codeBlock()->numParameters()); ++arg) { ValueProfile* profile = profiledBlock()->valueProfileForArgument(arg); if (!profile) continue; m_graph.m_arguments[arg]->variableAccessData()->predict(profile->computeUpdatedPrediction()); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog( "Argument [", arg, "] prediction: ", SpeculationDump(m_graph.m_arguments[arg]->variableAccessData()->prediction()), "\n"); #endif } for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { BasicBlock* block = m_graph.m_blocks[blockIndex].get(); if (!block) continue; if (!block->isOSRTarget) continue; if (block->bytecodeBegin != m_graph.m_osrEntryBytecodeIndex) continue; for (size_t i = 0; i < m_graph.m_mustHandleValues.size(); ++i) { Node* node = block->variablesAtHead.operand( m_graph.m_mustHandleValues.operandForIndex(i)); if (!node) continue; ASSERT(node->hasLocal()); node->variableAccessData()->predict( speculationFromValue(m_graph.m_mustHandleValues[i])); } } return true; }
bool run() { ASSERT(m_graph.m_form == ThreadedCPS); ASSERT(m_graph.m_unificationState == GloballyUnified); ASSERT(codeBlock()->numParameters() >= 1); { ConcurrentJITLocker locker(profiledBlock()->m_lock); for (size_t arg = 0; arg < static_cast<size_t>(codeBlock()->numParameters()); ++arg) { ValueProfile* profile = profiledBlock()->valueProfileForArgument(arg); if (!profile) continue; m_graph.m_arguments[arg]->variableAccessData()->predict( profile->computeUpdatedPrediction(locker)); } } for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; if (!block->isOSRTarget) continue; if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex) continue; for (size_t i = 0; i < m_graph.m_plan.mustHandleValues.size(); ++i) { int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i); Node* node = block->variablesAtHead.operand(operand); if (!node) continue; ASSERT(node->hasLocal(m_graph)); node->variableAccessData()->predict( speculationFromValue(m_graph.m_plan.mustHandleValues[i])); } } return true; }