void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned i, unsigned) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval JmpSrc wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx, i); compileOpCallEvalSetupArgs(instruction); emitCTICall(i, Interpreter::cti_op_call_eval); __ cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::eax); wasEval = __ jne(); } emitGetVirtualRegister(callee, X86::ecx, i); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. emitJumpSlowCaseIfNotJSCell(X86::ecx, i); __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx); m_slowCases.append(SlowCaseEntry(__ jne(), i)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(i, Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx, i); } // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); __ movl_i32r(argCount, X86::edx); emitNakedCall(i, m_interpreter->m_ctiVirtualCall); if (opcodeID == op_call_eval) __ link(wasEval, __ label()); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JitFragmentWriter::_emitSideExit(RewriterVar* var, RewriterVar* val_constant, CFGBlock* next_block, RewriterVar* next_block_var) { assert(val_constant->is_constant); assert(next_block_var->is_constant); uint64_t val = val_constant->constant_value; assembler::Register var_reg = var->getInReg(); if (isLargeConstant(val)) { assembler::Register reg = val_constant->getInReg(Location::any(), true, /* otherThan */ var_reg); assembler->cmp(var_reg, reg); } else { assembler->cmp(var_reg, assembler::Immediate(val)); } { assembler::ForwardJump jne(*assembler, assembler::COND_EQUAL); int exit_size = 0; _emitJump(next_block, next_block_var, exit_size); if (exit_size) { RELEASE_ASSERT(!side_exit_patch_location.first, "if we start to emit more than one side exit we should make this a vector"); side_exit_patch_location = std::make_pair(next_block, assembler->bytesWritten() - exit_size); } } var->bumpUse(); val_constant->bumpUse(); assertConsistent(); }
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) { ASSERT(count); Vector<JmpSrc> bucketsOfFail; // Check eax is an object of the right Structure. JmpSrc baseObjectCheck = checkStructure(X86::eax, structure); bucketsOfFail.append(baseObjectCheck); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); JSObject* protoObject = 0; for (unsigned i = 0; i < count; ++i) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = chainEntries[i].get(); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); bucketsOfFail.append(__ jne()); } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; for (unsigned i = 0; i < bucketsOfFail.size(); ++i) X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); // Track the stub we have created so that it will be deleted later. structure->ref(); chain->ref(); prototypeStructures->list[currentIndex].set(code, structure, chain); // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); }
void JIT::privateCompilePatchGetArrayLength(void* returnAddress) { StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); // Check eax is an array __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), 0, X86::eax); JmpSrc failureCases1 = __ jne(); // Checks out okay! - get the length from the storage __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx); __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx); __ cmpl_ir(JSImmediate::maxImmediateInt, X86::ecx); JmpSrc failureCases2 = __ ja(); __ addl_rr(X86::ecx, X86::ecx); __ addl_ir(1, X86::ecx); __ movl_rr(X86::ecx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; X86Assembler::link(code, failureCases1, slowCaseBegin); X86Assembler::link(code, failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); // Track the stub we have created so that it will be deleted later. stubInfo->stubRoutine = code; // Finally repatch the jump to sow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); }
void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) { // Check eax is an object of the right Structure. __ testl_i32r(JSImmediate::TagMask, X86::eax); JmpSrc failureCases1 = __ jne(); JmpSrc failureCases2 = checkStructure(X86::eax, structure); // checks out okay! - putDirectOffset __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax); __ ret(); void* code = __ executableCopy(m_codeBlock->executablePool()); X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); stubInfo->stubRoutine = code; ctiRepatchCallByReturnAddress(returnAddress, code); }
void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) { // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); // Check eax is an object of the right Structure. JmpSrc failureCases1 = checkStructure(X86::eax, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); JmpSrc failureCases2 = __ jne(); // Checks out okay! - getDirectOffset __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; X86Assembler::link(code, failureCases1, lastProtoBegin); X86Assembler::link(code, failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); structure->ref(); prototypeStructure->ref(); prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure); // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); }
void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex) { // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched. // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. emitGetVirtualRegister(baseVReg, X86::eax); emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); JmpDst hotPathBegin = __ label(); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; __ cmpl_im_force32(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure); addSlowCase(__ jne()); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase); __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset); emitPutVirtualRegister(resultVReg); }
void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned propertyAccessInstructionIndex) { // In order to be able to repatch both the Structure, and the object offset, we store one pointer, // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code // such that the Structure & offset are always at the same distance from this. emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx); // Jump to a slow case if either the base object is an immediate, or if the Structure does not match. emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); JmpDst hotPathBegin = __ label(); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. __ cmpl_im_force32(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdStructure); addSlowCase(__ jne()); // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdPropertyMapOffset); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; linkSlowCase(iter); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Fast check for JS function. __ testl_i32r(JSImmediate::TagMask, X86::ecx); JmpSrc callLinkFailNotObject = __ jne(); __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), 0, X86::ecx); JmpSrc callLinkFailNotJSFunction = __ jne(); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } __ movl_i32r(argCount, X86::edx); // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); JmpSrc storeResultForFirstRun = __ jmp(); // This is the address for the cold path *after* the first run (which tries to link the call). m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = __ label(); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. __ testl_i32r(JSImmediate::TagMask, X86::ecx); JmpSrc isNotObject = __ jne(); __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), 0, X86::ecx); JmpSrc isJSFunction = __ je(); // This handles host functions JmpDst notJSFunctionlabel = __ label(); __ link(isNotObject, notJSFunctionlabel); __ link(callLinkFailNotObject, notJSFunctionlabel); __ link(callLinkFailNotJSFunction, notJSFunctionlabel); emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); JmpSrc wasNotJSFunction = __ jmp(); // Next, handle JSFunctions... __ link(isJSFunction, __ label()); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); __ movl_i32r(argCount, X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); // Put the return value in dst. In the interpreter, op_ret does this. JmpDst storeResult = __ label(); __ link(wasNotJSFunction, storeResult); __ link(storeResultForFirstRun, storeResult); emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval JmpSrc wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); __ cmpl_ir(asInteger(JSImmediate::impossibleValue()), X86::eax); wasEval = __ jne(); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below emitGetVirtualRegister(callee, X86::ecx); __ cmpl_ir_force32(asInteger(JSImmediate::impossibleValue()), X86::ecx); JmpDst addressOfLinkedFunctionCheck = __ label(); addSlowCase(__ jne()); ASSERT(X86Assembler::getDifferenceBetweenLabels(addressOfLinkedFunctionCheck, __ label()) == repatchOffsetOpCallCall); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; // The following is the fast case, only used whan a callee can be linked. // In the case of OpConstruct, call out to a cti_ function to create the new object. if (opcodeID == op_construct) { int proto = instruction[5].u.operand; int thisRegister = instruction[6].u.operand; emitPutJITStubArg(X86::ecx, 1); emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(thisRegister); emitGetVirtualRegister(callee, X86::ecx); } // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee __ movl_i32m(asInteger(noValue()), (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::ecx, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_mr(FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node), X86::ecx, X86::edx); // newScopeChain __ movl_i32m(argCount, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::edi, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::edx, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * sizeof(Register), X86::edi); // Call to the callee m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable)); if (opcodeID == op_call_eval) __ link(wasEval, __ label()); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void compile(RuntimeBlockInfo* block, bool force_checks, bool reset, bool staging, bool optimise) { mov(rax, (size_t)&cycle_counter); sub(dword[rax], block->guest_cycles); sub(rsp, 0x28); for (size_t i = 0; i < block->oplist.size(); i++) { shil_opcode& op = block->oplist[i]; switch (op.op) { case shop_ifb: if (op.rs1._imm) { mov(rax, (size_t)&next_pc); mov(dword[rax], op.rs2._imm); } mov(call_regs[0], op.rs3._imm); call((void*)OpDesc[op.rs3._imm]->oph); break; case shop_jcond: case shop_jdyn: { mov(rax, (size_t)op.rs1.reg_ptr()); mov(ecx, dword[rax]); if (op.rs2.is_imm()) { add(ecx, op.rs2._imm); } mov(rdx, (size_t)op.rd.reg_ptr()); mov(dword[rdx], ecx); } break; case shop_mov32: { verify(op.rd.is_reg()); verify(op.rs1.is_reg() || op.rs1.is_imm()); sh_to_reg(op.rs1, mov, ecx); reg_to_sh(op.rd, ecx); } break; case shop_mov64: { verify(op.rd.is_reg()); verify(op.rs1.is_reg() || op.rs1.is_imm()); sh_to_reg(op.rs1, mov, rcx); reg_to_sh(op.rd, rcx); } break; case shop_readm: { sh_to_reg(op.rs1, mov, call_regs[0]); sh_to_reg(op.rs3, add, call_regs[0]); u32 size = op.flags & 0x7f; if (size == 1) { call((void*)ReadMem8); movsx(rcx, al); } else if (size == 2) { call((void*)ReadMem16); movsx(rcx, ax); } else if (size == 4) { call((void*)ReadMem32); mov(rcx, rax); } else if (size == 8) { call((void*)ReadMem64); mov(rcx, rax); } else { die("1..8 bytes"); } if (size != 8) reg_to_sh(op.rd, ecx); else reg_to_sh(op.rd, rcx); } break; case shop_writem: { u32 size = op.flags & 0x7f; sh_to_reg(op.rs1, mov, call_regs[0]); sh_to_reg(op.rs3, add, call_regs[0]); if (size != 8) sh_to_reg(op.rs2, mov, call_regs[1]); else sh_to_reg(op.rs2, mov, call_regs64[1]); if (size == 1) call((void*)WriteMem8); else if (size == 2) call((void*)WriteMem16); else if (size == 4) call((void*)WriteMem32); else if (size == 8) call((void*)WriteMem64); else { die("1..8 bytes"); } } break; default: shil_chf[op.op](&op); break; } } mov(rax, (size_t)&next_pc); switch (block->BlockType) { case BET_StaticJump: case BET_StaticCall: //next_pc = block->BranchBlock; mov(dword[rax], block->BranchBlock); break; case BET_Cond_0: case BET_Cond_1: { //next_pc = next_pc_value; //if (*jdyn == 0) //next_pc = branch_pc_value; mov(dword[rax], block->NextBlock); if (block->has_jcond) mov(rdx, (size_t)&Sh4cntx.jdyn); else mov(rdx, (size_t)&sr.T); cmp(dword[rdx], block->BlockType & 1); Xbyak::Label branch_not_taken; jne(branch_not_taken, T_SHORT); mov(dword[rax], block->BranchBlock); L(branch_not_taken); } break; case BET_DynamicJump: case BET_DynamicCall: case BET_DynamicRet: //next_pc = *jdyn; mov(rdx, (size_t)&Sh4cntx.jdyn); mov(edx, dword[rdx]); mov(dword[rax], edx); break; case BET_DynamicIntr: case BET_StaticIntr: if (block->BlockType == BET_DynamicIntr) { //next_pc = *jdyn; mov(rdx, (size_t)&Sh4cntx.jdyn); mov(edx, dword[rdx]); mov(dword[rax], edx); } else { //next_pc = next_pc_value; mov(dword[rax], block->NextBlock); } call((void*)UpdateINTC); break; default: die("Invalid block end type"); } add(rsp, 0x28); ret(); ready(); block->code = (DynarecCodeEntryPtr)getCode(); emit_Skip(getSize()); }
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); ASSERT(count); Vector<JmpSrc> bucketsOfFail; // Check eax is an object of the right Structure. JmpSrc baseObjectCheck = checkStructure(X86::eax, structure); bucketsOfFail.append(baseObjectCheck); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); JSObject* protoObject = 0; for (unsigned i = 0; i < count; ++i) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = chainEntries[i].get(); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); bucketsOfFail.append(__ jne()); } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; for (unsigned i = 0; i < bucketsOfFail.size(); ++i) X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); // Track the stub we have created so that it will be deleted later. stubInfo->stubRoutine = code; // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); #else ASSERT(count); Vector<JmpSrc> bucketsOfFail; // Check eax is an object of the right Structure. __ testl_i32r(JSImmediate::TagMask, X86::eax); bucketsOfFail.append(__ jne()); bucketsOfFail.append(checkStructure(X86::eax, structure)); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); JSObject* protoObject = 0; for (unsigned i = 0; i < count; ++i) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = chainEntries[i].get(); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress); bucketsOfFail.append(__ jne()); } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); __ ret(); void* code = __ executableCopy(m_codeBlock->executablePool()); for (unsigned i = 0; i < bucketsOfFail.size(); ++i) X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); stubInfo->stubRoutine = code; ctiRepatchCallByReturnAddress(returnAddress, code); #endif }
void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic. ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); // Check eax is an object of the right Structure. JmpSrc failureCases1 = checkStructure(X86::eax, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); JmpSrc failureCases2 = __ jne(); // Checks out okay! - getDirectOffset __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); JmpSrc success = __ jmp(); void* code = __ executableCopy(m_codeBlock->executablePool()); // Use the repatch information to link the failure cases back to the original slow case routine. void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall; X86Assembler::link(code, failureCases1, slowCaseBegin); X86Assembler::link(code, failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset; X86Assembler::link(code, success, reinterpret_cast<void*>(successDest)); // Track the stub we have created so that it will be deleted later. stubInfo->stubRoutine = code; // Finally repatch the jump to slow case back in the hot path to jump here instead. intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase; X86Assembler::repatchBranchOffset(jmpLocation, code); #else // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx); // Check eax is an object of the right Structure. __ testl_i32r(JSImmediate::TagMask, X86::eax); JmpSrc failureCases1 = __ jne(); JmpSrc failureCases2 = checkStructure(X86::eax, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress); JmpSrc failureCases3 = __ jne(); // Checks out okay! - getDirectOffset __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax); __ ret(); void* code = __ executableCopy(m_codeBlock->executablePool()); X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); stubInfo->stubRoutine = code; ctiRepatchCallByReturnAddress(returnAddress, code); #endif }
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress) { Vector<JmpSrc, 16> failureCases; // Check eax is an object of the right Structure. __ testl_i32r(JSImmediate::TagMask, X86::eax); failureCases.append(__ jne()); __ cmpl_im(reinterpret_cast<uint32_t>(oldStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax); failureCases.append(__ jne()); Vector<JmpSrc> successCases; // ecx = baseObject __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx); // proto(ecx) = baseObject->structure()->prototype() __ cmpl_im(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx); failureCases.append(__ jne()); __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx); // ecx = baseObject->m_structure for (RefPtr<Structure>* it = chain->head(); *it; ++it) { // null check the prototype __ cmpl_ir(asInteger(jsNull()), X86::ecx); successCases.append(__ je()); // Check the structure id __ cmpl_im(reinterpret_cast<uint32_t>(it->get()), FIELD_OFFSET(JSCell, m_structure), X86::ecx); failureCases.append(__ jne()); __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx); __ cmpl_im(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx); failureCases.append(__ jne()); __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx); } failureCases.append(__ jne()); for (unsigned i = 0; i < successCases.size(); ++i) __ link(successCases[i], __ label()); JmpSrc callTarget; // emit a call only if storage realloc is needed if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) { __ push_r(X86::edx); __ push_i32(newStructure->propertyStorageCapacity()); __ push_i32(oldStructure->propertyStorageCapacity()); __ push_r(X86::eax); callTarget = __ call(); __ addl_ir(3 * sizeof(void*), X86::esp); __ pop_r(X86::edx); } // Assumes m_refCount can be decremented easily, refcount decrement is safe as // codeblock should ensure oldStructure->m_refCount > 0 __ subl_im(1, reinterpret_cast<void*>(oldStructure)); __ addl_im(1, reinterpret_cast<void*>(newStructure)); __ movl_i32m(reinterpret_cast<uint32_t>(newStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax); // write the value __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax); __ ret(); JmpSrc failureJump; if (failureCases.size()) { for (unsigned i = 0; i < failureCases.size(); ++i) __ link(failureCases[i], __ label()); restoreArgumentReferenceForTrampoline(); failureJump = __ jmp(); } void* code = __ executableCopy(m_codeBlock->executablePool()); if (failureCases.size()) X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage)); stubInfo->stubRoutine = code; ctiRepatchCallByReturnAddress(returnAddress, code); }
int encode_op(char *opcode, char *op_data) { int rd,rs,rt,imm,funct,shaft,target; char tmp[256]; const char *fi = "%s %d"; const char *fg = "%s %%g%d"; const char *ff = "%s %%f%d"; const char *fl = "%s %s"; const char *fgi = "%s %%g%d, %d"; const char *fgl = "%s %%g%d, %s"; const char *fgg = "%s %%g%d, %%g%d"; const char *fggl = "%s %%g%d, %%g%d, %s"; const char *fggi = "%s %%g%d, %%g%d, %d"; const char *fggg = "%s %%g%d, %%g%d, %%g%d"; const char *fff = "%s %%f%d, %%f%d"; const char *fgf = "%s %%g%d, %%f%d"; const char *ffg = "%s %%f%d, %%g%d"; const char *fffl = "%s %%f%d, %%f%d, %s"; const char *ffff = "%s %%f%d, %%f%d, %%f%d"; const char *ffgi = "%s %%f%d, %%g%d, %d"; const char *ffgg = "%s %%f%d, %%g%d, %%g%d"; char lname[256]; shaft = funct = target = 0; if(strcmp(opcode, "mvhi") == 0){ if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3) return mvhi(rs,0,imm); } if(strcmp(opcode, "mvlo") == 0){ if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3) return mvlo(rs,0,imm); } if(strcmp(opcode, "add") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return add(rs,rt,rd,0); } if(strcmp(opcode, "nor") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return nor(rs,rt,rd,0); } if(strcmp(opcode, "sub") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return sub(rs,rt,rd,0); } if(strcmp(opcode, "mul") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return mul(rs,rt,rd,0); } if(strcmp(opcode, "addi") == 0){ if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4) return addi(rs,rt,imm); } if(strcmp(opcode, "subi") == 0){ if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4) return subi(rs,rt,imm); } if(strcmp(opcode, "muli") == 0){ if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4) return muli(rs,rt,imm); } if(strcmp(opcode, "input") == 0){ if(sscanf(op_data, fg, tmp, &rd) == 2) return input(0,0,rd,0); } if(strcmp(opcode, "inputw") == 0){ if(sscanf(op_data, fg, tmp, &rd) == 2) return inputw(0,0,rd,0); } if(strcmp(opcode, "inputf") == 0){ if(sscanf(op_data, ff, tmp, &rd) == 2) return inputf(0,0,rd,0); } if(strcmp(opcode, "output") == 0){ if(sscanf(op_data, fg, tmp, &rs) == 2) return output(rs,0,0,0); } if(strcmp(opcode, "outputw") == 0){ if(sscanf(op_data, fg, tmp, &rs) == 2) return outputw(rs,0,0,0); } if(strcmp(opcode, "outputf") == 0){ if(sscanf(op_data, ff, tmp, &rs) == 2) return outputf(rs,0,0,0); } if(strcmp(opcode, "and") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return _and(rs,rt,rd,0); } if(strcmp(opcode, "or") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return _or(rs,rt,rd,0); } if(strcmp(opcode, "sll") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return sll(rs,rt,rd,0); } if(strcmp(opcode, "srl") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return srl(rs,rt,rd,0); } if(strcmp(opcode, "slli") == 0){ if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4) return slli(rs,rt,imm); } if(strcmp(opcode, "srli") == 0){ if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4) return srli(rs,rt,imm); } if(strcmp(opcode, "b") == 0){ if(sscanf(op_data, fg, tmp, &rs) == 2) return b(rs,0,0,0); } if(strcmp(opcode, "jmp") == 0){ if(sscanf(op_data, fl, tmp, lname) == 2) { strcpy(label_name[label_cnt],lname); return jmp(label_cnt++); } } if(strcmp(opcode, "jeq") == 0){ if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) { strcpy(label_name[label_cnt],lname); return jeq(rs,rt,label_cnt++); } } if(strcmp(opcode, "jne") == 0){ if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) { strcpy(label_name[label_cnt],lname); return jne(rs,rt,label_cnt++); } } if(strcmp(opcode, "jlt") == 0){ if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) { strcpy(label_name[label_cnt],lname); return jlt(rs,rt,label_cnt++); } } if(strcmp(opcode, "jle") == 0){ if(sscanf(op_data, fggl, tmp, &rs, &rt, lname) == 4) { strcpy(label_name[label_cnt],lname); return jle(rs,rt,label_cnt++); } } if(strcmp(opcode, "call") == 0){ if(sscanf(op_data, fl, tmp, lname) == 2) { strcpy(label_name[label_cnt],lname); return call(label_cnt++); } } if(strcmp(opcode, "callR") == 0){ if(sscanf(op_data, fg, tmp, &rs) == 2) return callr(rs,0,0,0); } if(strcmp(opcode, "return") == 0){ return _return(0); } if(strcmp(opcode, "ld") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return ld(rs,rt,rd,0); } if(strcmp(opcode, "ldi") == 0){ if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4) return ldi(rs,rt,imm); } if(strcmp(opcode, "ldlr") == 0){ if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3) return ldlr(rs,0,imm); } if(strcmp(opcode, "fld") == 0){ if(sscanf(op_data, ffgg, tmp, &rd, &rs,&rt) == 4) return fld(rs,rt,rd,0); } if(strcmp(opcode, "st") == 0){ if(sscanf(op_data, fggg, tmp, &rd, &rs,&rt) == 4) return st(rs,rt,rd,0); } if(strcmp(opcode, "sti") == 0){ if(sscanf(op_data, fggi, tmp, &rt, &rs, &imm) == 4) return sti(rs,rt,imm); } if(strcmp(opcode, "stlr") == 0){ if(sscanf(op_data, fgi, tmp, &rs, &imm) == 3) return stlr(rs,0,imm); } if(strcmp(opcode, "fst") == 0){ if(sscanf(op_data, ffgg, tmp, &rd, &rs,&rt) == 4) return fst(rs,rt,rd,0); } if(strcmp(opcode, "fadd") == 0){ if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4) return fadd(rs,rt,rd,0); } if(strcmp(opcode, "fsub") == 0){ if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4) return fsub(rs,rt,rd,0); } if(strcmp(opcode, "fmul") == 0){ if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4) return fmul(rs,rt,rd,0); } if(strcmp(opcode, "fdiv") == 0){ if(sscanf(op_data, ffff, tmp, &rd, &rs, &rt) == 4) return fdiv(rs,rt,rd,0); } if(strcmp(opcode, "fsqrt") == 0){ if(sscanf(op_data, fff, tmp, &rd, &rs) == 3) return fsqrt(rs,0,rd,0); } if(strcmp(opcode, "fabs") == 0){ if(sscanf(op_data, fff, tmp, &rd, &rs) == 3) return _fabs(rs,0,rd,0); } if(strcmp(opcode, "fmov") == 0){ if(sscanf(op_data, fff, tmp, &rd, &rs) == 3) return fmov(rs,0,rd,0); } if(strcmp(opcode, "fneg") == 0){ if(sscanf(op_data, fff, tmp, &rd, &rs) == 3) return fneg(rs,0,rd,0); } if(strcmp(opcode, "fldi") == 0){ if(sscanf(op_data, ffgi, tmp, &rt, &rs, &imm) == 4) return fldi(rs,rt,imm); } if(strcmp(opcode, "fsti") == 0){ if(sscanf(op_data, ffgi, tmp, &rt, &rs, &imm) == 4) return fsti(rs,rt,imm); } if(strcmp(opcode, "fjeq") == 0){ if(sscanf(op_data, fffl, tmp, &rs, &rt, lname) == 4) { strcpy(label_name[label_cnt],lname); return fjeq(rs,rt,label_cnt++); } } if(strcmp(opcode, "fjlt") == 0){ if(sscanf(op_data, fffl, tmp, &rs, &rt, lname) == 4) { strcpy(label_name[label_cnt],lname); return fjlt(rs,rt,label_cnt++); } } if(strcmp(opcode, "halt") == 0){ return halt(0,0,0,0); } if(strcmp(opcode, "setL") == 0){ if(sscanf(op_data, fgl, tmp, &rd, lname) == 3) { strcpy(label_name[label_cnt],lname); return setl(0,rd,label_cnt++); } } if(strcmp(opcode, "padd") == 0){ if(sscanf(op_data, fgi, tmp, &rt, &imm) == 3) { return padd(0,rt,imm); } } if(strcmp(opcode, "link") == 0){ if(sscanf(op_data, fi, tmp, &imm) == 2) { return link(0,0,imm); } } if(strcmp(opcode, "movlr") == 0){ return movlr(0,0,0,0); } if(strcmp(opcode, "btmplr") == 0){ return btmplr(0,0,0,0); } /* if(strcmp(opcode, "padd") == 0){ if(sscanf(op_data, fgg, tmp, &rd, &rt) == 3) { return padd(0,rt,d,0); } } */ return -1; }