コード例 #1
0
ファイル: Jit.cpp プロジェクト: Almamu/ppsspp
bool Jit::JitSafeMem::PrepareSlowRead(void *safeFunc)
{
	if (!g_Config.bFastMemory)
	{
		if (iaddr_ != (u32) -1)
		{
			// No slow read necessary.
			if (Memory::IsValidAddress(iaddr_))
				return false;
			jit_->MOV(32, R(EAX), Imm32(iaddr_));
		}
		else
		{
			PrepareSlowAccess();
			jit_->LEA(32, EAX, MDisp(xaddr_, offset_));
		}

		jit_->ABI_CallFunctionA(jit_->thunks.ProtectFunction(safeFunc, 1), R(EAX));
		needsCheck_ = true;
		return true;
	}
	else
		return false;
}
コード例 #2
0
void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment)
{
    intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
    add32(Imm32(increment), AbsoluteAddress(counter.addressOfCounter()));
    addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
}
コード例 #3
0
ファイル: Jit.cpp プロジェクト: ToadKing/ppsspp
const u8 *Jit::DoJit(u32 em_address, JitBlock *b)
{
	js.cancel = false;
	js.blockStart = js.compilerPC = mips_->pc;
	js.nextExit = 0;
	js.downcountAmount = 0;
	js.curBlock = b;
	js.compiling = true;
	js.inDelaySlot = false;
	js.afterOp = JitState::AFTER_NONE;
	js.PrefixStart();

	// We add a check before the block, used when entering from a linked block.
	b->checkedEntry = GetCodePtr();
	// Downcount flag check. The last block decremented downcounter, and the flag should still be available.
	FixupBranch skip = J_CC(CC_NBE);
	MOV(32, M(&mips_->pc), Imm32(js.blockStart));
	JMP(asm_.outerLoop, true);  // downcount hit zero - go advance.
	SetJumpTarget(skip);

	b->normalEntry = GetCodePtr();

	MIPSAnalyst::AnalysisResults analysis = MIPSAnalyst::Analyze(em_address);

	gpr.Start(mips_, analysis);
	fpr.Start(mips_, analysis);

	js.numInstructions = 0;
	while (js.compiling) {
		// Jit breakpoints are quite fast, so let's do them in release too.
		CheckJitBreakpoint(js.compilerPC, 0);

		MIPSOpcode inst = Memory::Read_Opcode_JIT(js.compilerPC);
		js.downcountAmount += MIPSGetInstructionCycleEstimate(inst);

		MIPSCompileOp(inst);

		if (js.afterOp & JitState::AFTER_CORE_STATE) {
			// TODO: Save/restore?
			FlushAll();

			// If we're rewinding, CORE_NEXTFRAME should not cause a rewind.
			// It doesn't really matter either way if we're not rewinding.
			// CORE_RUNNING is <= CORE_NEXTFRAME.
			CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME));
			FixupBranch skipCheck = J_CC(CC_LE);
			if (js.afterOp & JitState::AFTER_REWIND_PC_BAD_STATE)
				MOV(32, M(&mips_->pc), Imm32(js.compilerPC));
			else
				MOV(32, M(&mips_->pc), Imm32(js.compilerPC + 4));
			WriteSyscallExit();
			SetJumpTarget(skipCheck);

			js.afterOp = JitState::AFTER_NONE;
		}
		if (js.afterOp & JitState::AFTER_MEMCHECK_CLEANUP) {
			js.afterOp &= ~JitState::AFTER_MEMCHECK_CLEANUP;
		}

		js.compilerPC += 4;
		js.numInstructions++;

		// Safety check, in case we get a bunch of really large jit ops without a lot of branching.
		if (GetSpaceLeft() < 0x800)
		{
			FlushAll();
			WriteExit(js.compilerPC, js.nextExit++);
			js.compiling = false;
		}
	}

	b->codeSize = (u32)(GetCodePtr() - b->normalEntry);
	NOP();
	AlignCode4();
	b->originalSize = js.numInstructions;
	return b->normalEntry;
}
コード例 #4
0
ファイル: JITCall.cpp プロジェクト: fatman2021/webkitgtk
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[2].u.operand;
    int arguments = instruction[3].u.operand;
    int firstFreeRegister = instruction[4].u.operand;

    killLastResultRegister();

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT1);
        lshift32(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        neg32(regT0);
        signExtend32ToPtr(regT0, regT0);
        end.append(branchAdd64(Zero, TrustedImm32(1), regT0));
        // regT0: -argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchAdd64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue, regT0);
    stubCall.addArgument(arguments, regT0);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT1);

    if (canOptimize)
        end.link(this);
}
コード例 #5
0
ファイル: BaselineIC-mips.cpp プロジェクト: JasonGross/mozjs
bool
ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
{
    // Guard that R0 is an integer and R1 is an integer.
    Label failure;
    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);

    // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
    Register scratchReg = R2.payloadReg();

    // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
    GeneralRegisterSet savedRegs = availableGeneralRegs(2);
    savedRegs = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs);
    ValueOperand savedValue = savedRegs.takeAnyValue();

    Label goodMul, divTest1, divTest2;
    switch(op_) {
      case JSOP_ADD:
        // We know R0.typeReg() already contains the integer tag. No boxing
        // required.
        masm.ma_addTestOverflow(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), &failure);
        break;
      case JSOP_SUB:
        masm.ma_subTestOverflow(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), &failure);
        break;
      case JSOP_MUL: {
        masm.ma_mul_branch_overflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);

        masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump);

        // Result is -0 if operands have different signs.
        masm.as_xor(t8, R0.payloadReg(), R1.payloadReg());
        masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump);

        masm.bind(&goodMul);
        masm.move32(scratchReg, R0.payloadReg());
        break;
      }
      case JSOP_DIV:
      case JSOP_MOD: {
        // Check for INT_MIN / -1, it results in a double.
        masm.ma_b(R0.payloadReg(), Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump);
        masm.ma_b(R1.payloadReg(), Imm32(-1), &failure, Assembler::Equal, ShortJump);
        masm.bind(&divTest1);

        // Check for division by zero
        masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::Equal, ShortJump);

        // Check for 0 / X with X < 0 (results in -0).
        masm.ma_b(R0.payloadReg(), Imm32(0), &divTest2, Assembler::NotEqual, ShortJump);
        masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
        masm.bind(&divTest2);

        masm.as_div(R0.payloadReg(), R1.payloadReg());

        if (op_ == JSOP_DIV) {
            // Result is a double if the remainder != 0.
            masm.as_mfhi(scratchReg);
            masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump);
            masm.as_mflo(scratchReg);
            masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
        } else {
            Label done;
            // If X % Y == 0 and X < 0, the result is -0.
            masm.as_mfhi(scratchReg);
            masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump);
            masm.ma_b(R0.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
            masm.bind(&done);
            masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
        }
        break;
      }
      case JSOP_BITOR:
        masm.ma_or(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
        break;
      case JSOP_BITXOR:
        masm.ma_xor(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
        break;
      case JSOP_BITAND:
        masm.ma_and(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
        break;
      case JSOP_LSH:
        // MIPS will only use 5 lowest bits in R1 as shift offset.
        masm.ma_sll(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
        break;
      case JSOP_RSH:
        masm.ma_sra(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
        break;
      case JSOP_URSH:
        masm.ma_srl(scratchReg, R0.payloadReg(), R1.payloadReg());
        if (allowDouble_) {
            Label toUint;
            masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump);

            // Move result and box for return.
            masm.move32(scratchReg, R0.payloadReg());
            EmitReturnFromIC(masm);

            masm.bind(&toUint);
            masm.convertUInt32ToDouble(scratchReg, FloatReg1);
            masm.boxDouble(FloatReg1, R0);
        } else {
            masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump);
            // Move result for return.
            masm.move32(scratchReg, R0.payloadReg());
        }
        break;
      default:
        MOZ_ASSUME_UNREACHABLE("Unhandled op for BinaryArith_Int32.");
    }

    EmitReturnFromIC(masm);

    // Failure case - jump to next stub
    masm.bind(&failure);
    EmitStubGuardFailure(masm);

    return true;
}
コード例 #6
0
void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
{
    // === Stage 1 - Function header code generation ===
    //
    // This code currently matches the old JIT. In the function header we need to
    // pop the return address (since we do not allow any recursion on the machine
    // stack), and perform a fast register file check.

    // This is the main entry point, without performing an arity check.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
    // We'll need to convert the remaining cti_ style calls (specifically the register file
    // check) which will be dependent on stack layout. (We'd need to account for this in
    // both normal return code and when jumping to an exception handler).
    preserveReturnAddressAfterCall(GPRInfo::regT2);
    emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);

    // Setup a pointer to the codeblock in the CallFrameHeader.
    emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);

    // Plant a check that sufficient space is available in the RegisterFile.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
    addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
    // Return here after register file check.
    Label fromRegisterFileCheck = label();


    // === Stage 2 - Function body code generation ===
    //
    // We generate the speculative code path, followed by the non-speculative
    // code for the function. Next we need to link the two together, making
    // bail-outs from the speculative path jump to the corresponding point on
    // the non-speculative one (and generating any code necessary to juggle
    // register values around, rebox values, and ensure spilled, to match the
    // non-speculative path's requirements).

#if DFG_JIT_BREAK_ON_EVERY_FUNCTION
    // Handy debug tool!
    breakpoint();
#endif

    // First generate the speculative path.
    Label speculativePathBegin = label();
    SpeculativeJIT speculative(*this);
#if !DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE
    bool compiledSpeculative = speculative.compile();
#else
    bool compiledSpeculative = false;
#endif

    // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator
    // to allow it to check which nodes in the graph may bail out, and may need to reenter the
    // non-speculative path.
    if (compiledSpeculative) {
        SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks());
        NonSpeculativeJIT nonSpeculative(*this);
        nonSpeculative.compile(checkIterator);

        // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one.
        linkSpeculationChecks(speculative, nonSpeculative);
    } else {
        // If compilation through the SpeculativeJIT failed, throw away the code we generated.
        m_calls.clear();
        rewindToLabel(speculativePathBegin);

        SpeculationCheckVector noChecks;
        SpeculationCheckIndexIterator checkIterator(noChecks);
        NonSpeculativeJIT nonSpeculative(*this);
        nonSpeculative.compile(checkIterator);
    }

    // === Stage 3 - Function footer code generation ===
    //
    // Generate code to lookup and jump to exception handlers, to perform the slow
    // register file check (if the fast one in the function header fails), and
    // generate the entry point with arity check.

    // Iterate over the m_calls vector, checking for exception checks,
    // and linking them to here.
    unsigned exceptionCheckCount = 0;
    for (unsigned i = 0; i < m_calls.size(); ++i) {
        Jump& exceptionCheck = m_calls[i].m_exceptionCheck;
        if (exceptionCheck.isSet()) {
            exceptionCheck.link(this);
            ++exceptionCheckCount;
        }
    }
    // If any exception checks were linked, generate code to lookup a handler.
    if (exceptionCheckCount) {
        // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
        // an identifier for the operation that threw the exception, which we can use
        // to look up handler information. The identifier we use is the return address
        // of the call out from JIT code that threw the exception; this is still
        // available on the stack, just below the stack pointer!
        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
        peek(GPRInfo::argumentGPR1, -1);
        m_calls.append(CallRecord(call(), lookupExceptionHandler));
        // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
        // and the address of the handler in returnValueGPR2.
        jump(GPRInfo::returnValueGPR2);
    }

    // Generate the register file check; if the fast check in the function head fails,
    // we need to call out to a helper function to check whether more space is available.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    registerFileCheck.link(this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callRegisterFileCheck = call();
    jump(fromRegisterFileCheck);

    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    Label arityCheck = label();
    preserveReturnAddressAfterCall(GPRInfo::regT2);
    emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
    branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callArityCheck = call();
    move(GPRInfo::regT0, GPRInfo::callFrameRegister);
    jump(fromArityCheck);


    // === Stage 4 - Link ===
    //
    // Link the code, populate data in CodeBlock data structures.

    LinkBuffer linkBuffer(*m_globalData, this, m_globalData->executableAllocator);

#if DFG_DEBUG_VERBOSE
    fprintf(stderr, "JIT code start at %p\n", linkBuffer.debugAddress());
#endif

    // Link all calls out from the JIT code to their respective functions.
    for (unsigned i = 0; i < m_calls.size(); ++i)
        linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);

    if (m_codeBlock->needsCallReturnIndices()) {
        m_codeBlock->callReturnIndexVector().reserveCapacity(exceptionCheckCount);
        for (unsigned i = 0; i < m_calls.size(); ++i) {
            if (m_calls[i].m_exceptionCheck.isSet()) {
                unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call);
                unsigned exceptionInfo = m_calls[i].m_exceptionInfo;
                m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
            }
        }
    }

    // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
    linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
    linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);

    entryWithArityCheck = linkBuffer.locationOf(arityCheck);
    entry = linkBuffer.finalizeCode();
}
コード例 #7
0
ファイル: CompFPU.cpp プロジェクト: AmesianX/ppsspp
void Jit::Comp_FPU2op(MIPSOpcode op) {
	CONDITIONAL_DISABLE(FPU);
	
	int fs = _FS;
	int fd = _FD;

	auto execRounding = [&](void (XEmitter::*conv)(X64Reg, OpArg), int setMXCSR) {
		fpr.SpillLock(fd, fs);
		fpr.MapReg(fd, fs == fd, true);

		// Small optimization: 0 is our default mode anyway.
		if (setMXCSR == 0 && !js.hasSetRounding) {
			setMXCSR = -1;
		}
		if (setMXCSR != -1) {
			STMXCSR(MIPSSTATE_VAR(mxcsrTemp));
			MOV(32, R(TEMPREG), MIPSSTATE_VAR(mxcsrTemp));
			AND(32, R(TEMPREG), Imm32(~(3 << 13)));
			OR(32, R(TEMPREG), Imm32(setMXCSR << 13));
			MOV(32, MIPSSTATE_VAR(temp), R(TEMPREG));
			LDMXCSR(MIPSSTATE_VAR(temp));
		}

		(this->*conv)(TEMPREG, fpr.R(fs));

		// Did we get an indefinite integer value?
		CMP(32, R(TEMPREG), Imm32(0x80000000));
		FixupBranch skip = J_CC(CC_NE);
		if (fd != fs) {
			CopyFPReg(fpr.RX(fd), fpr.R(fs));
		}
		XORPS(XMM1, R(XMM1));
		CMPSS(fpr.RX(fd), R(XMM1), CMP_LT);

		// At this point, -inf = 0xffffffff, inf/nan = 0x00000000.
		// We want -inf to be 0x80000000 inf/nan to be 0x7fffffff, so we flip those bits.
		MOVD_xmm(R(TEMPREG), fpr.RX(fd));
		XOR(32, R(TEMPREG), Imm32(0x7fffffff));

		SetJumpTarget(skip);
		MOVD_xmm(fpr.RX(fd), R(TEMPREG));

		if (setMXCSR != -1) {
			LDMXCSR(MIPSSTATE_VAR(mxcsrTemp));
		}
	};

	switch (op & 0x3f) {
	case 5:	//F(fd)	= fabsf(F(fs)); break; //abs
		fpr.SpillLock(fd, fs);
		fpr.MapReg(fd, fd == fs, true);
		MOV(PTRBITS, R(TEMPREG), ImmPtr(&ssNoSignMask[0]));
		if (fd != fs && fpr.IsMapped(fs)) {
			MOVAPS(fpr.RX(fd), MatR(TEMPREG));
			ANDPS(fpr.RX(fd), fpr.R(fs));
		} else {
			if (fd != fs) {
				MOVSS(fpr.RX(fd), fpr.R(fs));
			}
			ANDPS(fpr.RX(fd), MatR(TEMPREG));
		}
		break;

	case 6:	//F(fd)	= F(fs);				break; //mov
		if (fd != fs) {
			fpr.SpillLock(fd, fs);
			fpr.MapReg(fd, fd == fs, true);
			CopyFPReg(fpr.RX(fd), fpr.R(fs));
		}
		break;

	case 7:	//F(fd)	= -F(fs);			 break; //neg
		fpr.SpillLock(fd, fs);
		fpr.MapReg(fd, fd == fs, true);
		MOV(PTRBITS, R(TEMPREG), ImmPtr(&ssSignBits2[0]));
		if (fd != fs && fpr.IsMapped(fs)) {
			MOVAPS(fpr.RX(fd), MatR(TEMPREG));
			XORPS(fpr.RX(fd), fpr.R(fs));
		} else {
			if (fd != fs) {
				MOVSS(fpr.RX(fd), fpr.R(fs));
			}
			XORPS(fpr.RX(fd), MatR(TEMPREG));
		}
		break;

	case 4:	//F(fd)	= sqrtf(F(fs)); break; //sqrt
		fpr.SpillLock(fd, fs);
		fpr.MapReg(fd, fd == fs, true);
		SQRTSS(fpr.RX(fd), fpr.R(fs));
		break;

	case 13: //FsI(fd) = F(fs)>=0 ? (int)floorf(F(fs)) : (int)ceilf(F(fs)); break; //trunc.w.s
		execRounding(&XEmitter::CVTTSS2SI, -1);
		break;

	case 32: //F(fd)	= (float)FsI(fs);			break; //cvt.s.w
		fpr.SpillLock(fd, fs);
		fpr.MapReg(fd, fs == fd, true);
		if (fpr.IsMapped(fs)) {
			CVTDQ2PS(fpr.RX(fd), fpr.R(fs));
		} else {
			// If fs was fd, we'd be in the case above since we mapped fd.
			MOVSS(fpr.RX(fd), fpr.R(fs));
			CVTDQ2PS(fpr.RX(fd), fpr.R(fd));
		}
		break;

	case 36: //FsI(fd) = (int)	F(fs);			 break; //cvt.w.s
		// Uses the current rounding mode.
		execRounding(&XEmitter::CVTSS2SI, -1);
		break;

	case 12: //FsI(fd) = (int)floorf(F(fs)+0.5f); break; //round.w.s
		execRounding(&XEmitter::CVTSS2SI, 0);
		break;
	case 14: //FsI(fd) = (int)ceilf (F(fs)); break; //ceil.w.s
		execRounding(&XEmitter::CVTSS2SI, 2);
		break;
	case 15: //FsI(fd) = (int)floorf(F(fs)); break; //floor.w.s
		execRounding(&XEmitter::CVTSS2SI, 1);
		break;
	default:
		DISABLE;
		return;
	}
	fpr.ReleaseSpillLocks();
}
コード例 #8
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int dst = instruction[1].u.operand;
    int callee = instruction[2].u.operand;
    int argCount = instruction[3].u.operand;
    int registerOffset = instruction[4].u.operand;

    // Handle eval
    Jump wasEval;
    if (opcodeID == op_call_eval) {
        emitGetVirtualRegister(callee, X86::ecx);
        compileOpCallEvalSetupArgs(instruction);

        emitCTICall(Interpreter::cti_op_call_eval);
        wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue())));
    }

    // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
    // This deliberately leaves the callee in ecx, used when setting up the stack frame below
    emitGetVirtualRegister(callee, X86::ecx);
    DataLabelPtr addressOfLinkedFunctionCheck;
    Jump jumpToSlow = jnePtrWithPatch(X86::ecx, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue())));
    addSlowCase(jumpToSlow);
    ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;

    // The following is the fast case, only used whan a callee can be linked.

    // In the case of OpConstruct, call out to a cti_ function to create the new object.
    if (opcodeID == op_construct) {
        int proto = instruction[5].u.operand;
        int thisRegister = instruction[6].u.operand;

        emitPutJITStubArg(X86::ecx, 1);
        emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax);
        emitCTICall(Interpreter::cti_op_construct_JSConstruct);
        emitPutVirtualRegister(thisRegister);
        emitGetVirtualRegister(callee, X86::ecx);
    }

    // Fast version of stack frame initialization, directly relative to edi.
    // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
    storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
    storePtr(X86::ecx, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
    loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain
    store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
    storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
    storePtr(X86::edx, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
    addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);

    // Call to the callee
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable));

    if (opcodeID == op_call_eval)
        wasEval.link(this);

    // Put the return value in dst. In the interpreter, op_ret does this.
    emitPutVirtualRegister(dst);

#if ENABLE(CODEBLOCK_SAMPLING)
    storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
#endif
}
コード例 #9
0
void Jit64::lfd(UGeckoInstruction inst)
{
	INSTRUCTION_START
	JITDISABLE(bJITLoadStoreFloatingOff);
	FALLBACK_IF(js.memcheck || !inst.RA);

	int d = inst.RD;
	int a = inst.RA;

	s32 offset = (s32)(s16)inst.SIMM_16;
	gpr.FlushLockX(ABI_PARAM1);
	gpr.Lock(a);
	MOV(32, R(ABI_PARAM1), gpr.R(a));
	// TODO - optimize. This has to load the previous value - upper double should stay unmodified.
	fpr.Lock(d);
	fpr.BindToRegister(d, true);
	X64Reg xd = fpr.RX(d);

	if (cpu_info.bSSSE3)
	{
#if _M_X86_64
		MOVQ_xmm(XMM0, MComplex(RBX, ABI_PARAM1, SCALE_1, offset));
#else
		AND(32, R(ABI_PARAM1), Imm32(Memory::MEMVIEW32_MASK));
		MOVQ_xmm(XMM0, MDisp(ABI_PARAM1, (u32)Memory::base + offset));
#endif
		PSHUFB(XMM0, M((void *)bswapShuffle1x8Dupe));
		MOVSD(xd, R(XMM0));
	} else {
#if _M_X86_64
		LoadAndSwap(64, EAX, MComplex(RBX, ABI_PARAM1, SCALE_1, offset));
		MOV(64, M(&temp64), R(EAX));

		MEMCHECK_START

		MOVSD(XMM0, M(&temp64));
		MOVSD(xd, R(XMM0));

		MEMCHECK_END
#else
		AND(32, R(ABI_PARAM1), Imm32(Memory::MEMVIEW32_MASK));
		MOV(32, R(EAX), MDisp(ABI_PARAM1, (u32)Memory::base + offset));
		BSWAP(32, EAX);
		MOV(32, M((void*)((u8 *)&temp64+4)), R(EAX));

		MEMCHECK_START

		MOV(32, R(EAX), MDisp(ABI_PARAM1, (u32)Memory::base + offset + 4));
		BSWAP(32, EAX);
		MOV(32, M(&temp64), R(EAX));
		MOVSD(XMM0, M(&temp64));
		MOVSD(xd, R(XMM0));

		MEMCHECK_END
#endif
	}

	gpr.UnlockAll();
	gpr.UnlockAllX();
	fpr.UnlockAll();
}
コード例 #10
0
ファイル: CompFPU.cpp プロジェクト: AmesianX/ppsspp
			if (gpr.IsImm(rt)) {
				gpr.SetImm(MIPS_REG_FPCOND, (gpr.GetImm(rt) >> 23) & 1);
				MOV(32, MIPSSTATE_VAR(fcr31), Imm32(gpr.GetImm(rt) & 0x0181FFFF));
				if ((gpr.GetImm(rt) & 0x1000003) == 0) {
					// Default nearest / no-flush mode, just leave it cleared.
				} else {
					UpdateRoundingMode(gpr.GetImm(rt));
					ApplyRoundingMode();
				}
			} else {
				gpr.Lock(rt, MIPS_REG_FPCOND);
				gpr.MapReg(rt, true, false);
				gpr.MapReg(MIPS_REG_FPCOND, false, true);
				MOV(32, gpr.R(MIPS_REG_FPCOND), gpr.R(rt));
				SHR(32, gpr.R(MIPS_REG_FPCOND), Imm8(23));
				AND(32, gpr.R(MIPS_REG_FPCOND), Imm32(1));
				MOV(32, MIPSSTATE_VAR(fcr31), gpr.R(rt));
				AND(32, MIPSSTATE_VAR(fcr31), Imm32(0x0181FFFF));
				gpr.UnlockAll();
				UpdateRoundingMode();
				ApplyRoundingMode();
			}
		} else {
			Comp_Generic(op);
		}
		return;
	}
}

}	// namespace MIPSComp
コード例 #11
0
void Jit64::stfd(UGeckoInstruction inst)
{
	INSTRUCTION_START
	JITDISABLE(bJITLoadStoreFloatingOff);
	FALLBACK_IF(js.memcheck || !inst.RA);

	int s = inst.RS;
	int a = inst.RA;

	u32 mem_mask = Memory::ADDR_MASK_HW_ACCESS;
	if (Core::g_CoreStartupParameter.bMMU ||
		Core::g_CoreStartupParameter.bTLBHack) {
			mem_mask |= Memory::ADDR_MASK_MEM1;
	}
#ifdef ENABLE_MEM_CHECK
	if (Core::g_CoreStartupParameter.bEnableDebugging)
	{
		mem_mask |= Memory::EXRAM_MASK;
	}
#endif

	gpr.FlushLockX(ABI_PARAM1);
	gpr.Lock(a);
	fpr.Lock(s);
	gpr.BindToRegister(a, true, false);

	s32 offset = (s32)(s16)inst.SIMM_16;
	LEA(32, ABI_PARAM1, MDisp(gpr.R(a).GetSimpleReg(), offset));
	TEST(32, R(ABI_PARAM1), Imm32(mem_mask));
	FixupBranch safe = J_CC(CC_NZ);

	// Fast routine
	if (cpu_info.bSSSE3) {
		MOVAPD(XMM0, fpr.R(s));
		PSHUFB(XMM0, M((void*)bswapShuffle1x8));
#if _M_X86_64
		MOVQ_xmm(MComplex(RBX, ABI_PARAM1, SCALE_1, 0), XMM0);
#else
		AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK));
		MOVQ_xmm(MDisp(ABI_PARAM1, (u32)Memory::base), XMM0);
#endif
	} else {
		MOVAPD(XMM0, fpr.R(s));
		MOVD_xmm(R(EAX), XMM0);
		UnsafeWriteRegToReg(EAX, ABI_PARAM1, 32, 4);

		PSRLQ(XMM0, 32);
		MOVD_xmm(R(EAX), XMM0);
		UnsafeWriteRegToReg(EAX, ABI_PARAM1, 32, 0);
	}
	FixupBranch exit = J(true);
	SetJumpTarget(safe);

	// Safe but slow routine
	MOVAPD(XMM0, fpr.R(s));
	PSRLQ(XMM0, 32);
	MOVD_xmm(R(EAX), XMM0);
	SafeWriteRegToReg(EAX, ABI_PARAM1, 32, 0, RegistersInUse() | (1 << (16 + XMM0)));

	MOVAPD(XMM0, fpr.R(s));
	MOVD_xmm(R(EAX), XMM0);
	LEA(32, ABI_PARAM1, MDisp(gpr.R(a).GetSimpleReg(), offset));
	SafeWriteRegToReg(EAX, ABI_PARAM1, 32, 4, RegistersInUse());

	SetJumpTarget(exit);

	gpr.UnlockAll();
	gpr.UnlockAllX();
	fpr.UnlockAll();
}
コード例 #12
0
void JIT::emit_op_ret(Instruction* currentInstruction)
{
    unsigned dst = currentInstruction[1].u.operand;

    // We could JIT generate the deref, only calling out to C when the refcount hits zero.
    if (m_codeBlock->needsFullScopeChain()) {
        Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
        JITStubCall(this, cti_op_ret_scopeChain).call();
        activationNotCreated.link(this);
    }
    emitLoad(dst, regT1, regT0);
#ifdef OPT_SCHED
    emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regLink);
    emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
#else
    emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
    emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);

    restoreReturnAddressBeforeReturn(regT2);
#endif
    ret();
}
コード例 #13
0
void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
{
    unsigned op1 = currentInstruction[1].u.operand;
    unsigned op2 = currentInstruction[2].u.operand;
    unsigned target = currentInstruction[3].u.operand;

    // We generate inline code for the following cases in the slow path:
    // - floating-point number to constant int immediate
    // - constant int immediate to floating-point number
    // - floating-point number to floating-point number.

    if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
        linkSlowCase(iter);
        linkSlowCase(iter);
        linkSlowCase(iter);
        linkSlowCase(iter);
        JITStubCall stubCall(this, cti_op_jlesseq);
        stubCall.addArgument(op1, regT0);
        stubCall.addArgument(op2, regT1);
        stubCall.call();
        emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
        return;
    }

    if (isOperandConstantImmediateInt(op2)) {
        linkSlowCase(iter);

        if (supportsFloatingPoint()) {
            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
            addPtr(tagTypeNumberRegister, regT0);
            movePtrToDouble(regT0, fpRegT0);

            int32_t op2imm = getConstantOperand(op2).asInt32();;

            move(Imm32(op2imm), regT1);
            convertInt32ToDouble(regT1, fpRegT1);

            emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);

            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));

            fail1.link(this);
        }

        JITStubCall stubCall(this, cti_op_jlesseq);
        stubCall.addArgument(regT0);
        stubCall.addArgument(op2, regT2);
        stubCall.call();
        emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);

    } else if (isOperandConstantImmediateInt(op1)) {
        linkSlowCase(iter);

        if (supportsFloatingPoint()) {
            Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
            addPtr(tagTypeNumberRegister, regT1);
            movePtrToDouble(regT1, fpRegT1);

            int32_t op1imm = getConstantOperand(op1).asInt32();;

            move(Imm32(op1imm), regT0);
            convertInt32ToDouble(regT0, fpRegT0);

            emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);

            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));

            fail1.link(this);
        }

        JITStubCall stubCall(this, cti_op_jlesseq);
        stubCall.addArgument(op1, regT2);
        stubCall.addArgument(regT1);
        stubCall.call();
        emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);

    } else {
        linkSlowCase(iter);

        if (supportsFloatingPoint()) {
            Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
            Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
            Jump fail3 = emitJumpIfImmediateInteger(regT1);
            addPtr(tagTypeNumberRegister, regT0);
            addPtr(tagTypeNumberRegister, regT1);
            movePtrToDouble(regT0, fpRegT0);
            movePtrToDouble(regT1, fpRegT1);

            emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);

            emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));

            fail1.link(this);
            fail2.link(this);
            fail3.link(this);
        }

        linkSlowCase(iter);
        JITStubCall stubCall(this, cti_op_jlesseq);
        stubCall.addArgument(regT0);
        stubCall.addArgument(regT1);
        stubCall.call();
        emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
    }
}
コード例 #14
0
ファイル: JITCall32_64.cpp プロジェクト: KnightSwarm/WebKitTi
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitLoadTag(arguments, regT1);
        slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));

        load32(payloadFor(JSStack::ArgumentCount), regT2);
        slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT2: argumentCountIncludingThis

        move(regT2, regT3);
        add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT3);
        lshift32(TrustedImm32(3), regT3);
        addPtr(callFrameRegister, regT3);
        // regT3: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3));

        // Initialize ArgumentCount.
        store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));

        // Initialize 'this'.
        emitLoad(thisValue, regT1, regT0);
        store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));

        // Copy arguments.
        neg32(regT2);
        end.append(branchAdd32(Zero, TrustedImm32(1), regT2));
        // regT2: -argumentCount;

        Label copyLoop = label();
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
        store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        branchAdd32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue);
    stubCall.addArgument(arguments);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT3);

    if (canOptimize)
        end.link(this);
}
コード例 #15
0
ファイル: CompFPU.cpp プロジェクト: AmesianX/ppsspp
void Jit::Comp_mxc1(MIPSOpcode op) {
	CONDITIONAL_DISABLE(FPU_XFER);

	int fs = _FS;
	MIPSGPReg rt = _RT;

	switch ((op >> 21) & 0x1f) {
	case 0: // R(rt) = FI(fs); break; //mfc1
		if (rt == MIPS_REG_ZERO)
			return;
		gpr.MapReg(rt, false, true);
		// If fs is not mapped, most likely it's being abandoned.
		// Just load from memory in that case.
		if (fpr.R(fs).IsSimpleReg()) {
			MOVD_xmm(gpr.R(rt), fpr.RX(fs));
		} else {
			MOV(32, gpr.R(rt), fpr.R(fs));
		}
		break;

	case 2: // R(rt) = currentMIPS->ReadFCR(fs); break; //cfc1
		if (rt == MIPS_REG_ZERO)
			return;
		if (fs == 31) {
			bool wasImm = gpr.IsImm(MIPS_REG_FPCOND);
			if (!wasImm) {
				gpr.Lock(rt, MIPS_REG_FPCOND);
				gpr.MapReg(MIPS_REG_FPCOND, true, false);
			}
			gpr.MapReg(rt, false, true);
			MOV(32, gpr.R(rt), MIPSSTATE_VAR(fcr31));
			if (wasImm) {
				if (gpr.GetImm(MIPS_REG_FPCOND) & 1) {
					OR(32, gpr.R(rt), Imm32(1 << 23));
				} else {
					AND(32, gpr.R(rt), Imm32(~(1 << 23)));
				}
			} else {
				AND(32, gpr.R(rt), Imm32(~(1 << 23)));
				MOV(32, R(TEMPREG), gpr.R(MIPS_REG_FPCOND));
				AND(32, R(TEMPREG), Imm32(1));
				SHL(32, R(TEMPREG), Imm8(23));
				OR(32, gpr.R(rt), R(TEMPREG));
			}
			gpr.UnlockAll();
		} else if (fs == 0) {
			gpr.SetImm(rt, MIPSState::FCR0_VALUE);
		} else {
			Comp_Generic(op);
		}
		return;

	case 4: //FI(fs) = R(rt);	break; //mtc1
		fpr.MapReg(fs, false, true);
		if (gpr.IsImm(rt) && gpr.GetImm(rt) == 0) {
			XORPS(fpr.RX(fs), fpr.R(fs));
		} else {
			gpr.KillImmediate(rt, true, false);
			MOVD_xmm(fpr.RX(fs), gpr.R(rt));
		}
		return;

	case 6: //currentMIPS->WriteFCR(fs, R(rt)); break; //ctc1
		if (fs == 31) {
			// Must clear before setting, since ApplyRoundingMode() assumes it was cleared.
			RestoreRoundingMode();
			if (gpr.IsImm(rt)) {
				gpr.SetImm(MIPS_REG_FPCOND, (gpr.GetImm(rt) >> 23) & 1);
				MOV(32, MIPSSTATE_VAR(fcr31), Imm32(gpr.GetImm(rt) & 0x0181FFFF));
				if ((gpr.GetImm(rt) & 0x1000003) == 0) {
					// Default nearest / no-flush mode, just leave it cleared.
				} else {
					UpdateRoundingMode(gpr.GetImm(rt));
					ApplyRoundingMode();
				}
			} else {
コード例 #16
0
ファイル: CompFPU.cpp プロジェクト: ANR2ME/ppsspp
void Jit::Comp_mxc1(MIPSOpcode op)
{
	CONDITIONAL_DISABLE;

	int fs = _FS;
	MIPSGPReg rt = _RT;

	switch((op >> 21) & 0x1f) 
	{
	case 0: // R(rt) = FI(fs); break; //mfc1
		if (rt != MIPS_REG_ZERO) {
			fpr.MapReg(fs, true, false);  // TODO: Seems the V register becomes dirty here? It shouldn't.
			gpr.MapReg(rt, false, true);
			MOVD_xmm(gpr.R(rt), fpr.RX(fs));
		}
		break;

	case 2: // R(rt) = currentMIPS->ReadFCR(fs); break; //cfc1
		if (fs == 31) {
			bool wasImm = gpr.IsImm(MIPS_REG_FPCOND);
			if (!wasImm) {
				gpr.Lock(rt, MIPS_REG_FPCOND);
				gpr.MapReg(MIPS_REG_FPCOND, true, false);
			}
			gpr.MapReg(rt, false, true);
			MOV(32, gpr.R(rt), M(&mips_->fcr31));
			if (wasImm) {
				if (gpr.GetImm(MIPS_REG_FPCOND) & 1) {
					OR(32, gpr.R(rt), Imm32(1 << 23));
				} else {
					AND(32, gpr.R(rt), Imm32(~(1 << 23)));
				}
			} else {
				AND(32, gpr.R(rt), Imm32(~(1 << 23)));
				MOV(32, R(EAX), gpr.R(MIPS_REG_FPCOND));
				AND(32, R(EAX), Imm32(1));
				SHL(32, R(EAX), Imm8(23));
				OR(32, gpr.R(rt), R(EAX));
			}
			gpr.UnlockAll();
		} else if (fs == 0) {
			gpr.SetImm(rt, MIPSState::FCR0_VALUE);
		} else {
			Comp_Generic(op);
		}
		return;

	case 4: //FI(fs) = R(rt);	break; //mtc1
		gpr.MapReg(rt, true, false);
		fpr.MapReg(fs, false, true);
		MOVD_xmm(fpr.RX(fs), gpr.R(rt));
		return;

	case 6: //currentMIPS->WriteFCR(fs, R(rt)); break; //ctc1
		if (fs == 31) {
			if (gpr.IsImm(rt)) {
				gpr.SetImm(MIPS_REG_FPCOND, (gpr.GetImm(rt) >> 23) & 1);
				MOV(32, M(&mips_->fcr31), Imm32(gpr.GetImm(rt) & 0x0181FFFF));
			} else {
				gpr.Lock(rt, MIPS_REG_FPCOND);
				gpr.MapReg(rt, true, false);
				gpr.MapReg(MIPS_REG_FPCOND, false, true);
				MOV(32, gpr.R(MIPS_REG_FPCOND), gpr.R(rt));
				SHR(32, gpr.R(MIPS_REG_FPCOND), Imm8(23));
				AND(32, gpr.R(MIPS_REG_FPCOND), Imm32(1));
				MOV(32, M(&mips_->fcr31), gpr.R(rt));
				AND(32, M(&mips_->fcr31), Imm32(0x0181FFFF));
				gpr.UnlockAll();
			}
		} else {
コード例 #17
0
// ICBinaryArith_Int32
// by wangqing, 2013-11-22
bool
ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
{
    // Guard that R0 is an integer and R1 is an integer.
    Label failure;
	
    masm.movl(ImmTag(JSVAL_TAG_INT32), cmpTempRegister);
    masm.bne(R0.typeReg(), cmpTempRegister, &failure);
    masm.nop();

    masm.movl(ImmTag(JSVAL_TAG_INT32), cmpTempRegister);
    masm.bne(R1.typeReg(), cmpTempRegister, &failure);
    masm.nop();

    // Add R0 and R1.  Don't need to explicitly unbox, just use the TailCallReg which
    // should be available.
    Register scratchReg = BaselineTailCallReg;

    Label revertRegister, maybeNegZero;
    // xsb:fix me
    // fixed by weizhenwei, 2013.11.05
    switch(op_) {
      case JSOP_ADD:
        // Add R0 and R1.  Don't need to explicitly unbox.
      
        // mov tow oprand to cmp registers to prepared for Overflow check.
        masm.cmpl(R0.payloadReg(), R1.payloadReg());
        masm.negl(cmpTemp2Register);

        // do the add
        masm.movl(R0.payloadReg(), scratchReg);
        masm.addl(R1.payloadReg(), scratchReg);

        // Just jump to failure on overflow.  R0 and R1 are preserved, so we can just jump to
        // the next stub.
    	masm.xorInsn(dataTempRegister, cmpTempRegister, cmpTemp2Register);
		masm.bgez(dataTempRegister, 7);
		masm.nop();
		masm.subu(dataTempRegister, cmpTempRegister, cmpTemp2Register);
		masm.xorInsn(dataTempRegister, dataTempRegister, cmpTempRegister);
		masm.bgez(dataTempRegister, 3);
		masm.nop();
		masm.b(&failure);
		masm.nop();

        // Just overwrite the payload, the tag is still fine.
        masm.movl(scratchReg, R0.payloadReg());
        break;
      case JSOP_SUB:
        masm.cmpl(R0.payloadReg(), R1.payloadReg());

        masm.movl(R0.payloadReg(), scratchReg);
        masm.subl(R1.payloadReg(), scratchReg);

		// jump to failure on overflow, by wangqing, 2013-11-22
		masm.xorInsn(dataTempRegister, cmpTempRegister, cmpTemp2Register);
		masm.bgez(dataTempRegister, 7);
		masm.nop();
		masm.subu(dataTempRegister, cmpTempRegister, cmpTemp2Register);
		masm.xorInsn(dataTempRegister, dataTempRegister, cmpTempRegister);
		masm.bgez(dataTempRegister, 3);
		masm.nop();
		masm.b(&failure);
		masm.nop();

        masm.movl(scratchReg, R0.payloadReg());
        break;
      case JSOP_MUL:
        masm.movl(R0.payloadReg(), scratchReg);
        masm.imull(R1.payloadReg(), scratchReg);

        // test whether signed multiply overflow. by weizhenwei, 2013.11.01
        masm.mfhi(cmpTempRegister);
        masm.mflo(cmpTemp2Register);
        masm.sarl(Imm32(0x1f), cmpTemp2Register);
        masm.bne(cmpTempRegister, cmpTemp2Register, &failure);
		masm.nop();

		masm.beq(scratchReg, zero, &maybeNegZero);
		masm.nop();

        masm.movl(scratchReg, R0.payloadReg());
        break;
      case JSOP_DIV:
        // Prevent division by 0.
		masm.beq(R1.payloadReg(), zero, &failure);
		masm.nop();

        // Prevent negative 0 and -2147483648 / -1.
		/* rewrite testl(reg, imm), avoid use cmpTemp2Register by wangqing, 2013-11-22*/
		masm.movl(R0.payloadReg(), cmpTempRegister);
		masm.andl(Imm32(0x7fffffff), cmpTempRegister);
		masm.beq(cmpTempRegister, zero, &failure);
		masm.nop();

        // Preserve R0.payloadReg()
		masm.div(R0.payloadReg(), R1.payloadReg());

        // A remainder implies a double result.
        // by weizhenwei, 2013.11.02
        masm.mfhi(cmpTempRegister);
		masm.bne(cmpTempRegister, zero, &failure);
		masm.nop();

        // by weizhenwei, 2013.11.05
        masm.mflo(R0.payloadReg());
        break;
      case JSOP_MOD:
      {
        // x % 0 always results in NaN.
		masm.beq(R1.payloadReg(), zero, &failure);
		masm.nop();

        // Prevent negative 0 and -2147483648 % -1.
		/* rewrite testl(reg, imm), avoid use cmpTemp2Register by wangqing, 2013-11-22*/
		masm.movl(R0.payloadReg(), cmpTempRegister);
		masm.andl(Imm32(0x7fffffff), cmpTempRegister);
		masm.beq(cmpTempRegister, zero, &failure);
		masm.nop();

    	masm.div(R0.payloadReg(), R1.payloadReg());

        // Fail when we would need a negative remainder.
        Label done;
        masm.mfhi(cmpTempRegister);
	    masm.bne(cmpTempRegister, zero, &done);
	    masm.nop();

	    masm.bltz(R0.payloadReg(), &failure);
	    masm.nop();

	    masm.bltz(R1.payloadReg(), &failure);
	    masm.nop();

        masm.bindBranch(&done);

        //move reminder to R0.payloadReg, by weizhenwei, 2013.11.05
        masm.mfhi(R0.payloadReg());
        break;
      }
      case JSOP_BITOR:
        // We can overide R0, because the instruction is unfailable.
        // The R0.typeReg() is also still intact.
        masm.orl(R1.payloadReg(), R0.payloadReg());
        break;
      case JSOP_BITXOR:
        masm.xorl(R1.payloadReg(), R0.payloadReg());
        break;
      case JSOP_BITAND:
        masm.andl(R1.payloadReg(), R0.payloadReg());
        break;
      case JSOP_LSH:
        // R0.payloadReg() is result, R1.payloadReg90 is shiftAmount.
        // rewrite by weizhenwei, 2013.11.06
        masm.sllv(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());

        // We need to tag again, because we overwrote it.
        masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
        break;
      case JSOP_RSH:
        // R0.payloadReg() is result, R1.payloadReg90 is shiftAmount.
        // rewrite by weizhenwei, 2013.11.06
        masm.srav(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());

        // We need to tag again, because we overwrote it.
        masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
        break;
      case JSOP_URSH:
        if (!allowDouble_)
            masm.movl(R0.payloadReg(), scratchReg);

        // R0.payloadReg() is result, R1.payloadReg() is shiftAmount.
        // rewrite by weizhenwei, 2013.11.06
        masm.srlv(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());

        // by wangqing. 2013-11-22
        if (allowDouble_) {
            Label toUint;
	        masm.bltz(R0.payloadReg(),&toUint);
	        masm.nop();

            // Box and return.
            masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
            EmitReturnFromIC(masm);

            masm.bindBranch(&toUint);
            masm.convertUInt32ToDouble(R0.payloadReg(), ScratchFloatReg);
            masm.boxDouble(ScratchFloatReg, R0);
        } else {
	        masm.bltz(R0.payloadReg(),&revertRegister);
	        masm.nop();
            masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
        }
        break;
      default:
       JS_NOT_REACHED("Unhandled op for BinaryArith_Int32.  ");
       return false;
    }

    // Return.
    EmitReturnFromIC(masm);

    switch(op_) {
      case JSOP_MUL:
        masm.bindBranch(&maybeNegZero);

        // Result is -0 if exactly one of lhs or rhs is negative.
        masm.movl(R0.payloadReg(), scratchReg);
        masm.orl(R1.payloadReg(), scratchReg);
        //add by QuQiuwen;
	    masm.bltz(scratchReg, &failure);
	    masm.nop();

        // Result is +0.
        masm.xorl(R0.payloadReg(), R0.payloadReg());
        EmitReturnFromIC(masm);
        break;
      case JSOP_URSH:
        // Revert the content of R0 in the fallible >>> case.
        if (!allowDouble_) {
            masm.bindBranch(&revertRegister);
            masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
        }
        break;
      default:
        // No special failure handling required.
        // Fall through to failure.
        break;
    }

    // Failure case - jump to next stub
    masm.bindBranch(&failure);
    EmitStubGuardFailure(masm);

    return true;
}
コード例 #18
0
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
{
    int dst = instruction[1].u.operand;
    int callee = instruction[2].u.operand;
    int argCount = instruction[3].u.operand;
    int registerOffset = instruction[4].u.operand;

    linkSlowCase(iter);

    // The arguments have been set up on the hot path for op_call_eval
    if (opcodeID == op_call)
        compileOpCallSetupArgs(instruction);
    else if (opcodeID == op_construct)
        compileOpConstructSetupArgs(instruction);

    // Fast check for JS function.
    Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx);
    Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr));

    // First, in the case of a construct, allocate the new object.
    if (opcodeID == op_construct) {
        emitCTICall(Interpreter::cti_op_construct_JSConstruct);
        emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
        emitGetVirtualRegister(callee, X86::ecx);
    }

    move(Imm32(argCount), X86::edx);

    // Speculatively roll the callframe, assuming argCount will match the arity.
    storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
    addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);

    m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =
        emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink);

    Jump storeResultForFirstRun = jump();

// FIXME: this label can be removed, since it is a fixed offset from 'callReturnLocation'.
    // This is the address for the cold path *after* the first run (which tries to link the call).
    m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this);

    // The arguments have been set up on the hot path for op_call_eval
    if (opcodeID == op_call)
        compileOpCallSetupArgs(instruction);
    else if (opcodeID == op_construct)
        compileOpConstructSetupArgs(instruction);

    // Check for JSFunctions.
    Jump isNotObject = emitJumpIfNotJSCell(X86::ecx);
    Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr));

    // This handles host functions
    isNotObject.link(this);
    callLinkFailNotObject.link(this);
    callLinkFailNotJSFunction.link(this);
    emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));
    Jump wasNotJSFunction = jump();

    // Next, handle JSFunctions...
    isJSFunction.link(this);

    // First, in the case of a construct, allocate the new object.
    if (opcodeID == op_construct) {
        emitCTICall(Interpreter::cti_op_construct_JSConstruct);
        emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
        emitGetVirtualRegister(callee, X86::ecx);
    }

    // Speculatively roll the callframe, assuming argCount will match the arity.
    storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
    addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
    move(Imm32(argCount), X86::edx);

    emitNakedCall(m_interpreter->m_ctiVirtualCall);

    // Put the return value in dst. In the interpreter, op_ret does this.
    wasNotJSFunction.link(this);
    storeResultForFirstRun.link(this);
    emitPutVirtualRegister(dst);

#if ENABLE(CODEBLOCK_SAMPLING)
    storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
#endif
}
コード例 #19
0
bool
ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm)
{
	// by wangqing, 2013-11-28
    Label failure, notNaN, isTrue;
    /* if R0 is a double, load it into dest. If R0 is int32,  
	   convert it to double. Else, branch to failure.
	   by wangqing, 2013-11-28
	*/	
	    Label isDouble, done;

		masm.movl(ImmTag(JSVAL_TAG_CLEAR), cmpTempRegister);
		masm.sltu(cmpTempRegister, R0.typeReg(), cmpTempRegister);
		masm.bgtz(cmpTempRegister, &isDouble);
		masm.nop();

		masm.movl(ImmTag(JSVAL_TAG_INT32), cmpTempRegister);
		masm.bne(R0.typeReg(), cmpTempRegister, &failure);
		masm.nop();

        masm.convertInt32ToDouble(R0.payloadReg(), FloatReg0);
        masm.b(&done);
		masm.nop();

        masm.bindBranch(&isDouble);
        masm.unboxDouble(R0, FloatReg0);

        masm.bindBranch(&done);

	/* if R1 is a double, load it into dest. If R1 is int32,  
	   convert it to double. Else, branch to failure.
	   by wangqing, 2013-11-28
	*/	
	    Label isDouble1, done1;

		masm.movl(ImmTag(JSVAL_TAG_CLEAR), cmpTempRegister);
		masm.sltu(cmpTempRegister, R1.typeReg(), cmpTempRegister);
		masm.bgtz(cmpTempRegister, &isDouble1);
		masm.nop();

		masm.movl(ImmTag(JSVAL_TAG_INT32), cmpTempRegister);
		masm.bne(R1.typeReg(), cmpTempRegister, &failure);
		masm.nop();

        masm.convertInt32ToDouble(R1.payloadReg(), FloatReg1);
        masm.b(&done1);
		masm.nop();

        masm.bindBranch(&isDouble1);
        masm.unboxDouble(R1, FloatReg1);

        masm.bindBranch(&done1);

    Register dest = R0.scratchReg();

    Assembler::DoubleCondition cond = JSOpToDoubleCondition(op);
    masm.addiu(dest, zero, 1);
    masm.branchDoubleLocal(cond, FloatReg0, FloatReg1, &isTrue);
    masm.xorl(dest, dest);
    masm.bindBranch(&isTrue);


    // Check for NaN, if needed.
    Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
    if (nanCond != Assembler::NaN_HandledByCond) {
	  // check DoubleOrdered, by wangqing, 2013-11-28
	  masm.cud(FloatReg0, FloatReg1);
	  masm.bc1f(&notNaN);
	  masm.nop();
      masm.mov(Imm32(nanCond == Assembler::NaN_IsTrue), dest);
      masm.bindBranch(&notNaN);
    }

    masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
    EmitReturnFromIC(masm);

    // Failure case - jump to next stub
    masm.bindBranch(&failure);
    EmitStubGuardFailure(masm);
    return true;
}
コード例 #20
0
ファイル: CompFPU.cpp プロジェクト: A671DR218/ppsspp
void Jit::Comp_FPU2op(MIPSOpcode op) {
	CONDITIONAL_DISABLE;
	
	int fs = _FS;
	int fd = _FD;

	switch (op & 0x3f) 
	{
	case 5:	//F(fd)	= fabsf(F(fs)); break; //abs
		fpr.SpillLock(fd, fs);
		fpr.MapReg(fd, fd == fs, true);
		MOVSS(fpr.RX(fd), fpr.R(fs));
		PAND(fpr.RX(fd), M(ssNoSignMask));
		break;

	case 6:	//F(fd)	= F(fs);				break; //mov
		if (fd != fs) {
			fpr.SpillLock(fd, fs);
			fpr.MapReg(fd, fd == fs, true);
			MOVSS(fpr.RX(fd), fpr.R(fs));
		}
		break;

	case 7:	//F(fd)	= -F(fs);			 break; //neg
		fpr.SpillLock(fd, fs);
		fpr.MapReg(fd, fd == fs, true);
		MOVSS(fpr.RX(fd), fpr.R(fs));
		PXOR(fpr.RX(fd), M(ssSignBits2));
		break;


	case 4:	//F(fd)	= sqrtf(F(fs)); break; //sqrt
		fpr.SpillLock(fd, fs); // this probably works, just badly tested
		fpr.MapReg(fd, fd == fs, true);
		SQRTSS(fpr.RX(fd), fpr.R(fs));
		break;

	case 13: //FsI(fd) = F(fs)>=0 ? (int)floorf(F(fs)) : (int)ceilf(F(fs)); break;//trunc.w.s
		{
			fpr.SpillLock(fs, fd);
			fpr.StoreFromRegister(fd);
			CVTTSS2SI(EAX, fpr.R(fs));

			// Did we get an indefinite integer value?
			CMP(32, R(EAX), Imm32(0x80000000));
			FixupBranch skip = J_CC(CC_NE);
			MOVSS(XMM0, fpr.R(fs));
			XORPS(XMM1, R(XMM1));
			CMPSS(XMM0, R(XMM1), CMP_LT);

			// At this point, -inf = 0xffffffff, inf/nan = 0x00000000.
			// We want -inf to be 0x80000000 inf/nan to be 0x7fffffff, so we flip those bits.
			MOVD_xmm(R(EAX), XMM0);
			XOR(32, R(EAX), Imm32(0x7fffffff));

			SetJumpTarget(skip);
			MOV(32, fpr.R(fd), R(EAX));
		}
		break;

	case 32: //F(fd)	= (float)FsI(fs);			break; //cvt.s.w
		// Store to memory so we can read it as an integer value.
		fpr.StoreFromRegister(fs);
		CVTSI2SS(XMM0, fpr.R(fs));
		MOVSS(fpr.R(fd), XMM0);
		break;

	case 12: //FsI(fd) = (int)floorf(F(fs)+0.5f); break; //round.w.s
	case 14: //FsI(fd) = (int)ceilf (F(fs)); break; //ceil.w.s
	case 15: //FsI(fd) = (int)floorf(F(fs)); break; //floor.w.s
	case 36: //FsI(fd) = (int)	F(fs);			 break; //cvt.w.s
	default:
		DISABLE;
		return;
	}
	fpr.ReleaseSpillLocks();
}
コード例 #21
0
ファイル: CompFPU.cpp プロジェクト: achurch/ppsspp
		if (fs == 31) {
			ClearRoundingMode();
			if (gpr.IsImm(rt)) {
				gpr.SetImm(MIPS_REG_FPCOND, (gpr.GetImm(rt) >> 23) & 1);
				MOV(32, M(&mips_->fcr31), Imm32(gpr.GetImm(rt) & 0x0181FFFF));
				if ((gpr.GetImm(rt) & 0x1000003) == 0) {
					// Default nearest / no-flush mode, just leave it cleared.
				} else {
					SetRoundingMode();
				}
			} else {
				gpr.Lock(rt, MIPS_REG_FPCOND);
				gpr.MapReg(rt, true, false);
				gpr.MapReg(MIPS_REG_FPCOND, false, true);
				MOV(32, gpr.R(MIPS_REG_FPCOND), gpr.R(rt));
				SHR(32, gpr.R(MIPS_REG_FPCOND), Imm8(23));
				AND(32, gpr.R(MIPS_REG_FPCOND), Imm32(1));
				MOV(32, M(&mips_->fcr31), gpr.R(rt));
				AND(32, M(&mips_->fcr31), Imm32(0x0181FFFF));
				gpr.UnlockAll();
				SetRoundingMode();
			}
		} else {
			Comp_Generic(op);
		}
		return;
	}
}

}	// namespace MIPSComp
コード例 #22
0
ファイル: Jit.cpp プロジェクト: ToadKing/ppsspp
void Jit::WriteDowncount(int offset)
{
	const int downcount = js.downcountAmount + offset;
	SUB(32, M(&currentMIPS->downcount), downcount > 127 ? Imm32(downcount) : Imm8(downcount));
}
コード例 #23
0
bool
ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
{
    // Guard that R0 is an integer and R1 is an integer.
    Label failure;
    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);

    // Add R0 and R1.  Don't need to explicitly unbox, just use the TailCallReg which
    // should be available.
    Register scratchReg = BaselineTailCallReg;

    Label revertRegister, maybeNegZero;
    switch(op_) {
      case JSOP_ADD:
        // Add R0 and R1.  Don't need to explicitly unbox.
        masm.movl(R0.payloadReg(), scratchReg);
        masm.addl(R1.payloadReg(), scratchReg);

        // Just jump to failure on overflow.  R0 and R1 are preserved, so we can just jump to
        // the next stub.
        masm.j(Assembler::Overflow, &failure);

        // Just overwrite the payload, the tag is still fine.
        masm.movl(scratchReg, R0.payloadReg());
        break;
      case JSOP_SUB:
        masm.movl(R0.payloadReg(), scratchReg);
        masm.subl(R1.payloadReg(), scratchReg);
        masm.j(Assembler::Overflow, &failure);
        masm.movl(scratchReg, R0.payloadReg());
        break;
      case JSOP_MUL:
        masm.movl(R0.payloadReg(), scratchReg);
        masm.imull(R1.payloadReg(), scratchReg);
        masm.j(Assembler::Overflow, &failure);

        masm.testl(scratchReg, scratchReg);
        masm.j(Assembler::Zero, &maybeNegZero);

        masm.movl(scratchReg, R0.payloadReg());
        break;
      case JSOP_DIV:
        // Prevent division by 0.
        masm.branchTest32(Assembler::Zero, R1.payloadReg(), R1.payloadReg(), &failure);

        // Prevent negative 0 and -2147483648 / -1.
        masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);

        // For idiv we need eax.
        JS_ASSERT(R1.typeReg() == eax);
        masm.movl(R0.payloadReg(), eax);
        // Preserve R0.payloadReg()/edx, eax is JSVAL_TYPE_INT32.
        masm.movl(R0.payloadReg(), scratchReg);
        // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
        masm.cdq();
        masm.idiv(R1.payloadReg());

        // A remainder implies a double result.
        masm.branchTest32(Assembler::NonZero, edx, edx, &revertRegister);

        masm.movl(eax, R0.payloadReg());
        break;
      case JSOP_MOD:
      {
        // x % 0 always results in NaN.
        masm.branchTest32(Assembler::Zero, R1.payloadReg(), R1.payloadReg(), &failure);

        // Prevent negative 0 and -2147483648 % -1.
        masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);

        // For idiv we need eax.
        JS_ASSERT(R1.typeReg() == eax);
        masm.movl(R0.payloadReg(), eax);
        // Preserve R0.payloadReg()/edx, eax is JSVAL_TYPE_INT32.
        masm.movl(R0.payloadReg(), scratchReg);
        // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
        masm.cdq();
        masm.idiv(R1.payloadReg());

        // Fail when we would need a negative remainder.
        Label done;
        masm.branchTest32(Assembler::NonZero, edx, edx, &done);
        masm.branchTest32(Assembler::Signed, scratchReg, scratchReg, &revertRegister);
        masm.branchTest32(Assembler::Signed, R1.payloadReg(), R1.payloadReg(), &revertRegister);

        masm.bind(&done);
        // Result is in edx, tag in ecx remains untouched.
        JS_ASSERT(R0.payloadReg() == edx);
        JS_ASSERT(R0.typeReg() == ecx);
        break;
      }
      case JSOP_BITOR:
        // We can overide R0, because the instruction is unfailable.
        // The R0.typeReg() is also still intact.
        masm.orl(R1.payloadReg(), R0.payloadReg());
        break;
      case JSOP_BITXOR:
        masm.xorl(R1.payloadReg(), R0.payloadReg());
        break;
      case JSOP_BITAND:
        masm.andl(R1.payloadReg(), R0.payloadReg());
        break;
      case JSOP_LSH:
        // RHS needs to be in ecx for shift operations.
        JS_ASSERT(R0.typeReg() == ecx);
        masm.movl(R1.payloadReg(), ecx);
        masm.shll_cl(R0.payloadReg());
        // We need to tag again, because we overwrote it.
        masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
        break;
      case JSOP_RSH:
        masm.movl(R1.payloadReg(), ecx);
        masm.sarl_cl(R0.payloadReg());
        masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
        break;
      case JSOP_URSH:
        if (!allowDouble_)
            masm.movl(R0.payloadReg(), scratchReg);

        masm.movl(R1.payloadReg(), ecx);
        masm.shrl_cl(R0.payloadReg());
        masm.testl(R0.payloadReg(), R0.payloadReg());
        if (allowDouble_) {
            Label toUint;
            masm.j(Assembler::Signed, &toUint);

            // Box and return.
            masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
            EmitReturnFromIC(masm);

            masm.bind(&toUint);
            masm.convertUInt32ToDouble(R0.payloadReg(), ScratchFloatReg);
            masm.boxDouble(ScratchFloatReg, R0);
        } else {
            masm.j(Assembler::Signed, &revertRegister);
            masm.tagValue(JSVAL_TYPE_INT32, R0.payloadReg(), R0);
        }
        break;
      default:
       MOZ_ASSUME_UNREACHABLE("Unhandled op for BinaryArith_Int32.  ");
    }

    // Return.
    EmitReturnFromIC(masm);

    switch(op_) {
      case JSOP_MUL:
        masm.bind(&maybeNegZero);

        // Result is -0 if exactly one of lhs or rhs is negative.
        masm.movl(R0.payloadReg(), scratchReg);
        masm.orl(R1.payloadReg(), scratchReg);
        masm.j(Assembler::Signed, &failure);

        // Result is +0.
        masm.xorl(R0.payloadReg(), R0.payloadReg());
        EmitReturnFromIC(masm);
        break;
      case JSOP_DIV:
      case JSOP_MOD:
        masm.bind(&revertRegister);
        masm.movl(scratchReg, R0.payloadReg());
        masm.movl(ImmType(JSVAL_TYPE_INT32), R1.typeReg());
        break;
      case JSOP_URSH:
        // Revert the content of R0 in the fallible >>> case.
        if (!allowDouble_) {
            masm.bind(&revertRegister);
            masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
        }
        break;
      default:
        // No special failure handling required.
        // Fall through to failure.
        break;
    }

    // Failure case - jump to next stub
    masm.bind(&failure);
    EmitStubGuardFailure(masm);

    return true;
}
コード例 #24
0
ファイル: BaselineIC-x64.cpp プロジェクト: JasonGross/mozjs
bool
ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
{
    // Guard that R0 is an integer and R1 is an integer.
    Label failure;
    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);

    Label revertRegister, maybeNegZero;
    switch(op_) {
      case JSOP_ADD:
        masm.unboxInt32(R0, ExtractTemp0);
        // Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to
        // the next stub.
        masm.addl(R1.valueReg(), ExtractTemp0);
        masm.j(Assembler::Overflow, &failure);

        // Box the result
        masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
        break;
      case JSOP_SUB:
        masm.unboxInt32(R0, ExtractTemp0);
        masm.subl(R1.valueReg(), ExtractTemp0);
        masm.j(Assembler::Overflow, &failure);
        masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
        break;
      case JSOP_MUL:
        masm.unboxInt32(R0, ExtractTemp0);
        masm.imull(R1.valueReg(), ExtractTemp0);
        masm.j(Assembler::Overflow, &failure);

        masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &maybeNegZero);

        masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
        break;
      case JSOP_DIV:
      {
        JS_ASSERT(R2.scratchReg() == rax);
        JS_ASSERT(R0.valueReg() != rdx);
        JS_ASSERT(R1.valueReg() != rdx);
        masm.unboxInt32(R0, eax);
        masm.unboxInt32(R1, ExtractTemp0);

        // Prevent division by 0.
        masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &failure);

        // Prevent negative 0 and -2147483648 / -1.
        masm.branch32(Assembler::Equal, eax, Imm32(INT32_MIN), &failure);

        Label notZero;
        masm.branch32(Assembler::NotEqual, eax, Imm32(0), &notZero);
        masm.branchTest32(Assembler::Signed, ExtractTemp0, ExtractTemp0, &failure);
        masm.bind(&notZero);

        // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
        masm.cdq();
        masm.idiv(ExtractTemp0);

        // A remainder implies a double result.
        masm.branchTest32(Assembler::NonZero, edx, edx, &failure);

        masm.boxValue(JSVAL_TYPE_INT32, eax, R0.valueReg());
        break;
      }
      case JSOP_MOD:
      {
        JS_ASSERT(R2.scratchReg() == rax);
        JS_ASSERT(R0.valueReg() != rdx);
        JS_ASSERT(R1.valueReg() != rdx);
        masm.unboxInt32(R0, eax);
        masm.unboxInt32(R1, ExtractTemp0);

        // x % 0 always results in NaN.
        masm.branchTest32(Assembler::Zero, ExtractTemp0, ExtractTemp0, &failure);

        // Prevent negative 0 and -2147483648 % -1.
        masm.branchTest32(Assembler::Zero, eax, Imm32(0x7fffffff), &failure);

        // Sign extend eax into edx to make (edx:eax), since idiv is 64-bit.
        masm.cdq();
        masm.idiv(ExtractTemp0);

        // Fail when we would need a negative remainder.
        Label done;
        masm.branchTest32(Assembler::NonZero, edx, edx, &done);
        masm.orl(ExtractTemp0, eax);
        masm.branchTest32(Assembler::Signed, eax, eax, &failure);

        masm.bind(&done);
        masm.boxValue(JSVAL_TYPE_INT32, edx, R0.valueReg());
        break;
      }
      case JSOP_BITOR:
        // We can overide R0, because the instruction is unfailable.
        // Because the tag bits are the same, we don't need to retag.
        masm.orq(R1.valueReg(), R0.valueReg());
        break;
      case JSOP_BITXOR:
        masm.xorl(R1.valueReg(), R0.valueReg());
        masm.tagValue(JSVAL_TYPE_INT32, R0.valueReg(), R0);
        break;
      case JSOP_BITAND:
        masm.andq(R1.valueReg(), R0.valueReg());
        break;
      case JSOP_LSH:
        masm.unboxInt32(R0, ExtractTemp0);
        masm.unboxInt32(R1, ecx); // Unboxing R1 to ecx, clobbers R0.
        masm.shll_cl(ExtractTemp0);
        masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
        break;
      case JSOP_RSH:
        masm.unboxInt32(R0, ExtractTemp0);
        masm.unboxInt32(R1, ecx);
        masm.sarl_cl(ExtractTemp0);
        masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
        break;
      case JSOP_URSH:
        if (!allowDouble_)
            masm.movq(R0.valueReg(), ScratchReg);

        masm.unboxInt32(R0, ExtractTemp0);
        masm.unboxInt32(R1, ecx); // This clobbers R0

        masm.shrl_cl(ExtractTemp0);
        masm.testl(ExtractTemp0, ExtractTemp0);
        if (allowDouble_) {
            Label toUint;
            masm.j(Assembler::Signed, &toUint);

            // Box and return.
            masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
            EmitReturnFromIC(masm);

            masm.bind(&toUint);
            masm.convertUInt32ToDouble(ExtractTemp0, ScratchFloatReg);
            masm.boxDouble(ScratchFloatReg, R0);
        } else {
            masm.j(Assembler::Signed, &revertRegister);
            masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
        }
        break;
      default:
        MOZ_ASSUME_UNREACHABLE("Unhandled op in BinaryArith_Int32");
    }

    // Return from stub.
    EmitReturnFromIC(masm);

    if (op_ == JSOP_MUL) {
        masm.bind(&maybeNegZero);

        // Result is -0 if exactly one of lhs or rhs is negative.
        masm.movl(R0.valueReg(), ScratchReg);
        masm.orl(R1.valueReg(), ScratchReg);
        masm.j(Assembler::Signed, &failure);

        // Result is +0.
        masm.moveValue(Int32Value(0), R0);
        EmitReturnFromIC(masm);
    }

    // Revert the content of R0 in the fallible >>> case.
    if (op_ == JSOP_URSH && !allowDouble_) {
        masm.bind(&revertRegister);
        // Restore tag and payload.
        masm.movq(ScratchReg, R0.valueReg());
        // Fall through to failure.
    }
    // Failure case - jump to next stub
    masm.bind(&failure);
    EmitStubGuardFailure(masm);

    return true;
}
コード例 #25
0
void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment)
{
    addPtr(Imm32(increment), AbsoluteAddress(counter.addressOfCounter()));
}
コード例 #26
0
void JitBlockCache::DestroyBlock(int block_num, bool invalidate) {
    if (block_num < 0 || block_num >= num_blocks_) {
        ERROR_LOG_REPORT(JIT, "DestroyBlock: Invalid block number %d", block_num);
        return;
    }
    JitBlock *b = &blocks_[block_num];
    // No point it being in there anymore.
    RemoveBlockMap(block_num);

    // Pure proxy blocks always point directly to a real block, there should be no chains of
    // proxy-only blocks pointing to proxy-only blocks.
    // Follow a block proxy chain.
    // Destroy the block that transitively has this as a proxy. Likely the root block once inlined
    // this block or its 'parent', so now that this block has changed, the root block must be destroyed.
    if (b->proxyFor) {
        for (size_t i = 0; i < b->proxyFor->size(); i++) {
            int proxied_blocknum = GetBlockNumberFromStartAddress((*b->proxyFor)[i], false);
            // If it was already cleared, we don't know which to destroy.
            if (proxied_blocknum != -1) {
                DestroyBlock(proxied_blocknum, invalidate);
            }
        }
        b->proxyFor->clear();
        delete b->proxyFor;
        b->proxyFor = 0;
    }
    auto range = proxyBlockMap_.equal_range(b->originalAddress);
    for (auto it = range.first; it != range.second; ++it) {
        if (it->second == block_num) {
            // Found it.  Delete and bail.
            proxyBlockMap_.erase(it);
            break;
        }
    }

    // TODO: Handle the case when there's a proxy block and a regular JIT block at the same location.
    // In this case we probably "leak" the proxy block currently (no memory leak but it'll stay enabled).

    if (b->invalid) {
        if (invalidate)
            ERROR_LOG(JIT, "Invalidating invalid block %d", block_num);
        return;
    }

    b->invalid = true;
    if (Memory::ReadUnchecked_U32(b->originalAddress) == GetEmuHackOpForBlock(block_num).encoding)
        Memory::Write_Opcode_JIT(b->originalAddress, b->originalFirstOpcode);

    // It's not safe to set normalEntry to 0 here, since we use a binary search
    // that looks at that later to find blocks. Marking it invalid is enough.

    UnlinkBlock(block_num);

#if defined(ARM)

    // Send anyone who tries to run this block back to the dispatcher.
    // Not entirely ideal, but .. pretty good.
    // I hope there's enough space...
    // checkedEntry is the only "linked" entrance so it's enough to overwrite that.
    ARMXEmitter emit((u8 *)b->checkedEntry);
    emit.MOVI2R(R0, b->originalAddress);
    emit.STR(R0, CTXREG, offsetof(MIPSState, pc));
    emit.B(MIPSComp::jit->dispatcher);
    emit.FlushIcache();

#elif defined(_M_IX86) || defined(_M_X64)

    // Send anyone who tries to run this block back to the dispatcher.
    // Not entirely ideal, but .. pretty good.
    // Spurious entrances from previously linked blocks can only come through checkedEntry
    XEmitter emit((u8 *)b->checkedEntry);
    emit.MOV(32, M(&mips_->pc), Imm32(b->originalAddress));
    emit.JMP(MIPSComp::jit->Asm().dispatcher, true);
#elif defined(PPC)
    PPCXEmitter emit((u8 *)b->checkedEntry);
    emit.MOVI2R(R3, b->originalAddress);
    emit.STW(R0, CTXREG, offsetof(MIPSState, pc));
    emit.B(MIPSComp::jit->dispatcher);
    emit.FlushIcache();
#endif
}
コード例 #27
0
void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
{
    compileEntry();

    // === Function header code generation ===
    // This is the main entry point, without performing an arity check.
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);
    // Plant a check that sufficient space is available in the RegisterFile.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
    addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
    // Return here after register file check.
    Label fromRegisterFileCheck = label();


    // === Function body code generation ===
    SpeculativeJIT speculative(*this);
    compileBody(speculative);

    // === Function footer code generation ===
    //
    // Generate code to perform the slow register file check (if the fast one in
    // the function header fails), and generate the entry point with arity check.
    //
    // Generate the register file check; if the fast check in the function head fails,
    // we need to call out to a helper function to check whether more space is available.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    registerFileCheck.link(this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callRegisterFileCheck = call();
    jump(fromRegisterFileCheck);
    
    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    Label arityCheck = label();
    compileEntry();

    load32(Address(GPRInfo::callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))), GPRInfo::regT1);
    branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callArityCheck = call();
    move(GPRInfo::regT0, GPRInfo::callFrameRegister);
    jump(fromArityCheck);


    // === Link ===
    LinkBuffer linkBuffer(*m_globalData, this);
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);
    
    // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
    linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
    linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);

    entryWithArityCheck = linkBuffer.locationOf(arityCheck);
    entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
}