Exemplo n.º 1
0
    void Assembler::asm_load64(LIns *ins) {

        switch (ins->opcode()) {
            case LIR_ldd:
            CASE64(LIR_ldq:)
                // handled by mainline code below for now
                break;
            case LIR_ldf2d:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
                return;
        }

        LIns* base = ins->oprnd1();
    #ifdef NANOJIT_64BIT
        Register rr = ins->deprecated_getReg();
        if (deprecated_isKnownReg(rr) && (rmask(rr) & FpRegs)) {
            // FPR already assigned, fine, use it
            deprecated_freeRsrcOf(ins);
        } else {
            // use a GPR register; its okay to copy doubles with GPR's
            // but *not* okay to copy non-doubles with FPR's
            rr = deprecated_prepResultReg(ins, GpRegs);
        }
    #else
        Register rr = deprecated_prepResultReg(ins, FpRegs);
    #endif

        int dr = ins->disp();
        Register ra = getBaseReg(base, dr, GpRegs);

    #ifdef NANOJIT_64BIT
        if (rmask(rr) & GpRegs) {
            #if !PEDANTIC
                if (isS16(dr)) {
                    LD(rr, dr, ra);
                    return;
                }
            #endif
            // general case 64bit GPR load
            LDX(rr, ra, R0);
            asm_li(R0, dr);
            return;
        }
    #endif

        // FPR
    #if !PEDANTIC
        if (isS16(dr)) {
            LFD(rr, dr, ra);
            return;
        }
    #endif

        // general case FPR load
        LFDX(rr, ra, R0);
        asm_li(R0, dr);
    }
Exemplo n.º 2
0
    void Assembler::asm_store32(LOpcode op, LIns *value, int32_t dr, LIns *base) {

        switch (op) {
            case LIR_sti:
                // handled by mainline code below for now
                break;
            case LIR_stb:
            case LIR_sts:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
                return;
        }

        Register rs = findRegFor(value, GpRegs);
        Register ra = value == base ? rs : getBaseReg(LIR_sti, base, dr, GpRegs & ~rmask(rs));

    #if !PEDANTIC
        if (isS16(dr)) {
            STW(rs, dr, ra);
            return;
        }
    #endif

        // general case store, any offset size
        STWX(rs, ra, R0);
        asm_li(R0, dr);
    }
Exemplo n.º 3
0
    void Assembler::asm_load32(LIns *ins) {
        LIns* base = ins->oprnd1();
        int d = ins->disp();
        Register rr = prepResultReg(ins, GpRegs);
        Register ra = getBaseReg(ins->opcode(), base, d, GpRegs);

        switch(ins->opcode()) {
            case LIR_ldzb:
            case LIR_ldcb:
                if (isS16(d)) {
                    LBZ(rr, d, ra);
                } else {
                    LBZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ldzs:
            case LIR_ldcs:
                // these are expected to be 2 or 4-byte aligned
                if (isS16(d)) {
                    LHZ(rr, d, ra);
                } else {
                    LHZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ld:
            case LIR_ldc:
                // these are expected to be 4-byte aligned
                if (isS16(d)) {
                    LWZ(rr, d, ra);
                } else {
                    LWZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ldsb:
            case LIR_ldss:
            case LIR_ldcsb:
            case LIR_ldcss:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                return;
        }
    }
Exemplo n.º 4
0
void Assembler::asm_load64(LIns *ins) {
    LIns* base = ins->oprnd1();
#ifdef NANOJIT_64BIT
    Register rr = ins->getReg();
    if (isKnownReg(rr) && (rmask(rr) & FpRegs)) {
        // FPR already assigned, fine, use it
        freeRsrcOf(ins, false);
    } else {
        // use a GPR register; its okay to copy doubles with GPR's
        // but *not* okay to copy non-doubles with FPR's
        rr = prepResultReg(ins, GpRegs);
    }
#else
    Register rr = prepResultReg(ins, FpRegs);
#endif

    int dr = ins->disp();
    Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs);

#ifdef NANOJIT_64BIT
    if (rmask(rr) & GpRegs) {
#if !PEDANTIC
        if (isS16(dr)) {
            LD(rr, dr, ra);
            return;
        }
#endif
        // general case 64bit GPR load
        LDX(rr, ra, R0);
        asm_li(R0, dr);
        return;
    }
#endif

    // FPR
#if !PEDANTIC
    if (isS16(dr)) {
        LFD(rr, dr, ra);
        return;
    }
#endif

    // general case FPR load
    LFDX(rr, ra, R0);
    asm_li(R0, dr);
}
Exemplo n.º 5
0
 void Assembler::asm_li(Register r, int32_t imm) {
 #if !PEDANTIC
     if (isS16(imm)) {
         LI(r, imm);
         return;
     }
     if ((imm & 0xffff) == 0) {
         imm = uint32_t(imm) >> 16;
         LIS(r, imm);
         return;
     }
Exemplo n.º 6
0
void Assembler::asm_store32(LIns *value, int32_t dr, LIns *base) {
    Register rs = findRegFor(value, GpRegs);
    Register ra = value == base ? rs : getBaseReg(LIR_sti, base, dr, GpRegs & ~rmask(rs));

#if !PEDANTIC
    if (isS16(dr)) {
        STW(rs, dr, ra);
        return;
    }
#endif

    // general case store, any offset size
    STWX(rs, ra, R0);
    asm_li(R0, dr);
}
Exemplo n.º 7
0
void Assembler::asm_ld(LIns *ins) {
    LIns* base = ins->oprnd1();
    int d = ins->disp();
    Register rr = prepResultReg(ins, GpRegs);
    Register ra = getBaseReg(ins->opcode(), base, d, GpRegs);

#if !PEDANTIC
    if (isS16(d)) {
        if (ins->isop(LIR_ldcb)) {
            LBZ(rr, d, ra);
        } else {
            LWZ(rr, d, ra);
        }
        return;
    }
#endif

    // general case
    underrunProtect(12);
    LWZX(rr, ra, R0); // rr = [ra+R0]
    asm_li(R0,d);
}