void Assembler::asm_load64(LIns *ins) { LIns* base = ins->oprnd1(); #ifdef NANOJIT_64BIT Register rr = ins->getReg(); if (isKnownReg(rr) && (rmask(rr) & FpRegs)) { // FPR already assigned, fine, use it freeRsrcOf(ins, false); } else { // use a GPR register; its okay to copy doubles with GPR's // but *not* okay to copy non-doubles with FPR's rr = prepResultReg(ins, GpRegs); } #else Register rr = prepResultReg(ins, FpRegs); #endif int dr = ins->disp(); Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs); #ifdef NANOJIT_64BIT if (rmask(rr) & GpRegs) { #if !PEDANTIC if (isS16(dr)) { LD(rr, dr, ra); return; } #endif // general case 64bit GPR load LDX(rr, ra, R0); asm_li(R0, dr); return; } #endif // FPR #if !PEDANTIC if (isS16(dr)) { LFD(rr, dr, ra); return; } #endif // general case FPR load LFDX(rr, ra, R0); asm_li(R0, dr); }
void Assembler::asm_load64(LIns *ins) { switch (ins->opcode()) { case LIR_ldq: case LIR_ldqc: // handled by mainline code below for now break; case LIR_ld32f: case LIR_ldc32f: NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture"); return; default: NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode"); return; } LIns* base = ins->oprnd1(); #ifdef NANOJIT_64BIT Register rr = ins->getReg(); if (isKnownReg(rr) && (rmask(rr) & FpRegs)) { // FPR already assigned, fine, use it freeRsrcOf(ins, false); } else { // use a GPR register; its okay to copy doubles with GPR's // but *not* okay to copy non-doubles with FPR's rr = prepResultReg(ins, GpRegs); } #else Register rr = prepResultReg(ins, FpRegs); #endif int dr = ins->disp(); Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs); #ifdef NANOJIT_64BIT if (rmask(rr) & GpRegs) { #if !PEDANTIC if (isS16(dr)) { LD(rr, dr, ra); return; } #endif // general case 64bit GPR load LDX(rr, ra, R0); asm_li(R0, dr); return; } #endif // FPR #if !PEDANTIC if (isS16(dr)) { LFD(rr, dr, ra); return; } #endif // general case FPR load LFDX(rr, ra, R0); asm_li(R0, dr); }