Exemple #1
0
    void Assembler::asm_load64(LIns *ins) {

        switch (ins->opcode()) {
            case LIR_ldd:
            CASE64(LIR_ldq:)
                // handled by mainline code below for now
                break;
            case LIR_ldf2d:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
                return;
        }

        LIns* base = ins->oprnd1();
    #ifdef NANOJIT_64BIT
        Register rr = ins->deprecated_getReg();
        if (deprecated_isKnownReg(rr) && (rmask(rr) & FpRegs)) {
            // FPR already assigned, fine, use it
            deprecated_freeRsrcOf(ins);
        } else {
            // use a GPR register; its okay to copy doubles with GPR's
            // but *not* okay to copy non-doubles with FPR's
            rr = deprecated_prepResultReg(ins, GpRegs);
        }
    #else
        Register rr = deprecated_prepResultReg(ins, FpRegs);
    #endif

        int dr = ins->disp();
        Register ra = getBaseReg(base, dr, GpRegs);

    #ifdef NANOJIT_64BIT
        if (rmask(rr) & GpRegs) {
            #if !PEDANTIC
                if (isS16(dr)) {
                    LD(rr, dr, ra);
                    return;
                }
            #endif
            // general case 64bit GPR load
            LDX(rr, ra, R0);
            asm_li(R0, dr);
            return;
        }
    #endif

        // FPR
    #if !PEDANTIC
        if (isS16(dr)) {
            LFD(rr, dr, ra);
            return;
        }
    #endif

        // general case FPR load
        LFDX(rr, ra, R0);
        asm_li(R0, dr);
    }
Exemple #2
0
    void Assembler::asm_store32(LOpcode op, LIns *value, int32_t dr, LIns *base) {

        switch (op) {
            case LIR_sti:
                // handled by mainline code below for now
                break;
            case LIR_stb:
            case LIR_sts:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
                return;
        }

        Register rs = findRegFor(value, GpRegs);
        Register ra = value == base ? rs : getBaseReg(LIR_sti, base, dr, GpRegs & ~rmask(rs));

    #if !PEDANTIC
        if (isS16(dr)) {
            STW(rs, dr, ra);
            return;
        }
    #endif

        // general case store, any offset size
        STWX(rs, ra, R0);
        asm_li(R0, dr);
    }
Exemple #3
0
    void Assembler::asm_load32(LIns *ins) {
        LIns* base = ins->oprnd1();
        int d = ins->disp();
        Register rr = prepResultReg(ins, GpRegs);
        Register ra = getBaseReg(ins->opcode(), base, d, GpRegs);

        switch(ins->opcode()) {
            case LIR_ldzb:
            case LIR_ldcb:
                if (isS16(d)) {
                    LBZ(rr, d, ra);
                } else {
                    LBZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ldzs:
            case LIR_ldcs:
                // these are expected to be 2 or 4-byte aligned
                if (isS16(d)) {
                    LHZ(rr, d, ra);
                } else {
                    LHZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ld:
            case LIR_ldc:
                // these are expected to be 4-byte aligned
                if (isS16(d)) {
                    LWZ(rr, d, ra);
                } else {
                    LWZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ldsb:
            case LIR_ldss:
            case LIR_ldcsb:
            case LIR_ldcss:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                return;
        }
    }
	/* static */ void RegAlloc::formatRegisters(RegAlloc& regs, char* s, Fragment *frag)
	{
		if (!frag || !frag->lirbuf)
			return;
		LirNameMap *names = frag->lirbuf->names;
		for(int i=0; i<(LastReg+1); i++)
		{
			LIns* ins = regs.active[i];
			Register r = (Register)i;
			if (ins && regs.isFree(r))
				{ NanoAssertMsg( 0, "Coding error; register is both free and active! " ); }
			//if (!ins && !regs.isFree(r))
			//	{ NanoAssertMsg( 0, "Coding error; register is not in the free list when it should be" ); }
			if (!ins)
				continue;				

			s += strlen(s);
			const char* rname = ins->isQuad() ? fpn(r) : gpn(r);
			sprintf(s, " %s(%s)", rname, names->formatRef(ins));
		}
	}
    void RegAlloc::formatRegisters(char* s, Fragment *frag)
    {
        if (!frag || !frag->lirbuf)
            return;
        LirNameMap *names = frag->lirbuf->names;
        for (Register r = FirstReg; r <= LastReg; r = nextreg(r))
        {
            LIns *ins = getActive(r);
            if (!ins)
                continue;
            NanoAssertMsg(!isFree(r), "Coding error; register is both free and active! " );

            if (ins->isop(LIR_param) && ins->paramKind()==1 && r == Assembler::savedRegs[ins->paramArg()]) {
                // dont print callee-saved regs that arent used
                continue;
            }

            s += VMPI_strlen(s);
            const char* rname = ins->isQuad() ? fpn(r) : gpn(r);
            VMPI_sprintf(s, " %s(%s)", rname, names->formatRef(ins));
        }
    }