示例#1
0
    void Assembler::asm_load64(LIns *ins) {

        switch (ins->opcode()) {
            case LIR_ldd:
            CASE64(LIR_ldq:)
                // handled by mainline code below for now
                break;
            case LIR_ldf2d:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
                return;
        }

        LIns* base = ins->oprnd1();
    #ifdef NANOJIT_64BIT
        Register rr = ins->deprecated_getReg();
        if (deprecated_isKnownReg(rr) && (rmask(rr) & FpRegs)) {
            // FPR already assigned, fine, use it
            deprecated_freeRsrcOf(ins);
        } else {
            // use a GPR register; its okay to copy doubles with GPR's
            // but *not* okay to copy non-doubles with FPR's
            rr = deprecated_prepResultReg(ins, GpRegs);
        }
    #else
        Register rr = deprecated_prepResultReg(ins, FpRegs);
    #endif

        int dr = ins->disp();
        Register ra = getBaseReg(base, dr, GpRegs);

    #ifdef NANOJIT_64BIT
        if (rmask(rr) & GpRegs) {
            #if !PEDANTIC
                if (isS16(dr)) {
                    LD(rr, dr, ra);
                    return;
                }
            #endif
            // general case 64bit GPR load
            LDX(rr, ra, R0);
            asm_li(R0, dr);
            return;
        }
    #endif

        // FPR
    #if !PEDANTIC
        if (isS16(dr)) {
            LFD(rr, dr, ra);
            return;
        }
    #endif

        // general case FPR load
        LFDX(rr, ra, R0);
        asm_li(R0, dr);
    }
示例#2
0
void write_code(uint32_t ** caret, uint32_t ** code, uint32_t ** patch, test_func_t patch_func)
{
    *caret = *code;
    emit(caret, PUSH(rmask(4) | rmask(lr)));
    *patch = *caret;  // Store the location of the code that we want to patch.

    emit(caret, MOV(ip, OP2_BYTE0 | (((uint32_t)patch_func >>  0) & 0xff)));
    emit(caret, ORR(ip, OP2_BYTE1 | (((uint32_t)patch_func >>  8) & 0xff)));
    emit(caret, ORR(ip, OP2_BYTE2 | (((uint32_t)patch_func >> 16) & 0xff)));
    emit(caret, ORR(ip, OP2_BYTE3 | (((uint32_t)patch_func >> 24) & 0xff)));
    emit(caret, BLX(ip));
    emit(caret, POP(rmask(4) | rmask(pc)));
}
示例#3
0
	void RegAlloc::retire(Register r)
	{
		NanoAssert(r != UnknownReg);
		NanoAssert(active[r] != NULL);
		active[r] = NULL;
		free |= rmask(r);
	}
示例#4
0
	NIns* Assembler::genPrologue(RegisterMask needSaving)
	{
		/**
		 * Prologue
		 */
		uint32_t stackNeeded = STACK_GRANULARITY * _activation.highwatermark;
		uint32_t savingCount = 0;

		for(Register i=FirstReg; i <= LastReg; i = nextreg(i))
			if (needSaving&rmask(i)) 
				savingCount++;

		// After forcing alignment, we've pushed the pre-alignment SP
		// and savingCount registers.
		uint32_t stackPushed = STACK_GRANULARITY * (1+savingCount);
		uint32_t aligned = alignUp(stackNeeded + stackPushed, NJ_ALIGN_STACK);
		uint32_t amt = aligned - stackPushed;

		// Reserve stackNeeded bytes, padded
		// to preserve NJ_ALIGN_STACK-byte alignment.
		if (amt) 
		{
#if defined NANOJIT_IA32
			SUBi(SP, amt);
#elif defined NANOJIT_AMD64
			SUBQi(SP, amt);
#endif
		}

		verbose_only( verbose_outputf("        %p:",_nIns); )
示例#5
0
void Assembler::asm_load64(LIns *ins) {
    LIns* base = ins->oprnd1();
#ifdef NANOJIT_64BIT
    Register rr = ins->getReg();
    if (isKnownReg(rr) && (rmask(rr) & FpRegs)) {
        // FPR already assigned, fine, use it
        freeRsrcOf(ins, false);
    } else {
        // use a GPR register; its okay to copy doubles with GPR's
        // but *not* okay to copy non-doubles with FPR's
        rr = prepResultReg(ins, GpRegs);
    }
#else
    Register rr = prepResultReg(ins, FpRegs);
#endif

    int dr = ins->disp();
    Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs);

#ifdef NANOJIT_64BIT
    if (rmask(rr) & GpRegs) {
#if !PEDANTIC
        if (isS16(dr)) {
            LD(rr, dr, ra);
            return;
        }
#endif
        // general case 64bit GPR load
        LDX(rr, ra, R0);
        asm_li(R0, dr);
        return;
    }
#endif

    // FPR
#if !PEDANTIC
    if (isS16(dr)) {
        LFD(rr, dr, ra);
        return;
    }
#endif

    // general case FPR load
    LFDX(rr, ra, R0);
    asm_li(R0, dr);
}
示例#6
0
	NIns* Assembler::genPrologue(RegisterMask needSaving)
	{
		/**
		 * Prologue
		 */

		// NJ_RESV_OFFSET is space at the top of the stack for us
		// to use for parameter passing (8 bytes at the moment)
		uint32_t stackNeeded = 4 * _activation.highwatermark + NJ_STACK_OFFSET;
		uint32_t savingCount = 0;

		uint32_t savingMask = 0;
		#if defined(NJ_THUMB_JIT)
		savingCount = 5; // R4-R7, LR
		savingMask = 0xF0;
		(void)needSaving;
		#else
		savingCount = 9; //R4-R10,R11,LR
		savingMask = SavedRegs | rmask(FRAME_PTR);
		(void)needSaving;
		#endif

		// so for alignment purposes we've pushed  return addr, fp, and savingCount registers
		uint32_t stackPushed = 4 * (2+savingCount);
		uint32_t aligned = alignUp(stackNeeded + stackPushed, NJ_ALIGN_STACK);
		int32_t amt = aligned - stackPushed;

		// Make room on stack for what we are doing
		if (amt)
#ifdef NJ_THUMB_JIT
		{
			// largest value is 508 (7-bits << 2)
			if (amt>508)
			{
				int size = 508;
				while (size>0)
				{
					SUBi(SP, size);
					amt -= size;
					size = amt;
					if (size>508)
						size=508;
				}
			}
			else
				SUBi(SP, amt); 

		}
#else
		{ 
			SUBi(SP, amt); 
		}
#endif
		verbose_only( verbose_outputf("         %p:",_nIns); )
示例#7
0
	// scan table for instruction with the lowest priority, meaning it is used
    // furthest in the future.
	LIns* Assembler::findVictim(RegAlloc &regs, RegisterMask allow)
	{
		NanoAssert(allow != 0);
		LIns *i, *a=0;
        int allow_pri = 0x7fffffff;
		for (Register r=FirstReg; r <= LastReg; r = nextreg(r))
		{
            if ((allow & rmask(r)) && (i = regs.getActive(r)) != 0)
            {
                int pri = canRemat(i) ? 0 : regs.getPriority(r);
                if (!a || pri < allow_pri) {
                    a = i;
                    allow_pri = pri;
                }
			}
		}
        NanoAssert(a != 0);
        return a;
	}
示例#8
0
    void Assembler::asm_call(LInsp ins)
    {
        Register retReg = ( ins->isop(LIR_fcall) ? F0 : retRegs[0] );
        prepResultReg(ins, rmask(retReg));

        // Do this after we've handled the call result, so we don't
        // force the call result to be spilled unnecessarily.

        evictScratchRegs();

        const CallInfo* call = ins->callInfo();

        underrunProtect(8);
        NOP();

        ArgSize sizes[MAXARGS];
        uint32_t argc = call->get_sizes(sizes);

        NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
        verbose_only(if (_logc->lcbits & LC_Assembly)
                     outputf("        %p:", _nIns);
                     )
示例#9
0
	void RegAlloc::addFree(Register r)
	{
		NanoAssert(!isFree(r));
		free |= rmask(r);
	}
示例#10
0
	bool RegAlloc::isFree(Register r) 
	{
		NanoAssert(r != UnknownReg);
		return (free & rmask(r)) != 0;
	}