Ejemplo n.º 1
0
void Assembler::asm_load64(LIns *ins) {
    LIns* base = ins->oprnd1();
#ifdef NANOJIT_64BIT
    Register rr = ins->getReg();
    if (isKnownReg(rr) && (rmask(rr) & FpRegs)) {
        // FPR already assigned, fine, use it
        freeRsrcOf(ins, false);
    } else {
        // use a GPR register; its okay to copy doubles with GPR's
        // but *not* okay to copy non-doubles with FPR's
        rr = prepResultReg(ins, GpRegs);
    }
#else
    Register rr = prepResultReg(ins, FpRegs);
#endif

    int dr = ins->disp();
    Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs);

#ifdef NANOJIT_64BIT
    if (rmask(rr) & GpRegs) {
#if !PEDANTIC
        if (isS16(dr)) {
            LD(rr, dr, ra);
            return;
        }
#endif
        // general case 64bit GPR load
        LDX(rr, ra, R0);
        asm_li(R0, dr);
        return;
    }
#endif

    // FPR
#if !PEDANTIC
    if (isS16(dr)) {
        LFD(rr, dr, ra);
        return;
    }
#endif

    // general case FPR load
    LFDX(rr, ra, R0);
    asm_li(R0, dr);
}
Ejemplo n.º 2
0
    void Assembler::asm_load32(LIns *ins) {
        LIns* base = ins->oprnd1();
        int d = ins->disp();
        Register rr = prepResultReg(ins, GpRegs);
        Register ra = getBaseReg(ins->opcode(), base, d, GpRegs);

        switch(ins->opcode()) {
            case LIR_ldzb:
            case LIR_ldcb:
                if (isS16(d)) {
                    LBZ(rr, d, ra);
                } else {
                    LBZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ldzs:
            case LIR_ldcs:
                // these are expected to be 2 or 4-byte aligned
                if (isS16(d)) {
                    LHZ(rr, d, ra);
                } else {
                    LHZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ld:
            case LIR_ldc:
                // these are expected to be 4-byte aligned
                if (isS16(d)) {
                    LWZ(rr, d, ra);
                } else {
                    LWZX(rr, ra, R0); // rr = [ra+R0]
                    asm_li(R0,d);
                }
                return;
            case LIR_ldsb:
            case LIR_ldss:
            case LIR_ldcsb:
            case LIR_ldcss:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                return;
        }
    }
Ejemplo n.º 3
0
    void Assembler::asm_call(LInsp ins)
    {
        Register retReg = ( ins->isop(LIR_fcall) ? F0 : retRegs[0] );
        prepResultReg(ins, rmask(retReg));

        // Do this after we've handled the call result, so we don't
        // force the call result to be spilled unnecessarily.

        evictScratchRegs();

        const CallInfo* call = ins->callInfo();

        underrunProtect(8);
        NOP();

        ArgSize sizes[MAXARGS];
        uint32_t argc = call->get_sizes(sizes);

        NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
        verbose_only(if (_logc->lcbits & LC_Assembly)
                     outputf("        %p:", _nIns);
                     )
Ejemplo n.º 4
0
void Assembler::asm_ld(LIns *ins) {
    LIns* base = ins->oprnd1();
    int d = ins->disp();
    Register rr = prepResultReg(ins, GpRegs);
    Register ra = getBaseReg(ins->opcode(), base, d, GpRegs);

#if !PEDANTIC
    if (isS16(d)) {
        if (ins->isop(LIR_ldcb)) {
            LBZ(rr, d, ra);
        } else {
            LWZ(rr, d, ra);
        }
        return;
    }
#endif

    // general case
    underrunProtect(12);
    LWZX(rr, ra, R0); // rr = [ra+R0]
    asm_li(R0,d);
}
Ejemplo n.º 5
0
    void Assembler::asm_load64(LIns *ins) {

        switch (ins->opcode()) {
            case LIR_ldq:
            case LIR_ldqc:
                // handled by mainline code below for now
                break;
            case LIR_ld32f:
            case LIR_ldc32f:
                NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                return;
            default:
                NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
                return;
        }

        LIns* base = ins->oprnd1();
    #ifdef NANOJIT_64BIT
        Register rr = ins->getReg();
        if (isKnownReg(rr) && (rmask(rr) & FpRegs)) {
            // FPR already assigned, fine, use it
            freeRsrcOf(ins, false);
        } else {
            // use a GPR register; its okay to copy doubles with GPR's
            // but *not* okay to copy non-doubles with FPR's
            rr = prepResultReg(ins, GpRegs);
        }
    #else
        Register rr = prepResultReg(ins, FpRegs);
    #endif

        int dr = ins->disp();
        Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs);

    #ifdef NANOJIT_64BIT
        if (rmask(rr) & GpRegs) {
            #if !PEDANTIC
                if (isS16(dr)) {
                    LD(rr, dr, ra);
                    return;
                }
            #endif
            // general case 64bit GPR load
            LDX(rr, ra, R0);
            asm_li(R0, dr);
            return;
        }
    #endif

        // FPR
    #if !PEDANTIC
        if (isS16(dr)) {
            LFD(rr, dr, ra);
            return;
        }
    #endif

        // general case FPR load
        LFDX(rr, ra, R0);
        asm_li(R0, dr);
    }