static void generateCheckInICFastPath( State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC) { VM& vm = state.graph.m_vm; StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. return; } Vector<StackMaps::Record>& records = iter->value; RELEASE_ASSERT(records.size() == ic.m_generators.size()); for (unsigned i = records.size(); i--;) { StackMaps::Record& record = records[i]; auto generator = ic.m_generators[i]; StructureStubInfo& stubInfo = *generator.m_stub; auto call = generator.m_slowCall; auto slowPathBegin = generator.m_beginLabel; CCallHelpers fastPathJIT(&vm, codeBlock); auto jump = fastPathJIT.patchableJump(); auto done = fastPathJIT.label(); char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset; LinkBuffer fastPath(vm, fastPathJIT, startOfIC, sizeOfIC); LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer; // Note: we could handle the !isValid() case. We just don't appear to have a // reason to do so, yet. RELEASE_ASSERT(fastPath.isValid()); MacroAssembler::AssemblerType_T::fillNops( startOfIC + fastPath.size(), sizeOfIC - fastPath.size()); state.finalizer->sideCodeLinkBuffer->link( ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC)); CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin); fastPath.link(jump, slowPathBeginLoc); CodeLocationCall callReturnLocation = slowPath.locationOf(call); stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(done)); stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(jump)); stubInfo.callReturnLocation = callReturnLocation; stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, slowPathBeginLoc); } }
static void generateCheckInICFastPath( State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC) { VM& vm = state.graph.m_vm; StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. return; } Vector<StackMaps::Record>& records = iter->value; RELEASE_ASSERT(records.size() == ic.m_generators.size()); for (unsigned i = records.size(); i--;) { StackMaps::Record& record = records[i]; auto generator = ic.m_generators[i]; StructureStubInfo& stubInfo = *generator.m_stub; auto call = generator.m_slowCall; auto slowPathBegin = generator.m_beginLabel; CCallHelpers fastPathJIT(&vm, codeBlock); auto jump = fastPathJIT.patchableJump(); auto done = fastPathJIT.label(); char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset; auto postLink = [&] (LinkBuffer& fastPath, CCallHelpers&, bool) { LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer; state.finalizer->sideCodeLinkBuffer->link( ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC)); CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin); fastPath.link(jump, slowPathBeginLoc); CodeLocationCall callReturnLocation = slowPath.locationOf(call); stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(done)); stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(jump)); stubInfo.callReturnLocation = callReturnLocation; stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, slowPathBeginLoc); }; generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "CheckIn inline cache", postLink); } }