void Assembler::asm_load64(LIns *ins) { switch (ins->opcode()) { case LIR_ldd: CASE64(LIR_ldq:) // handled by mainline code below for now break; case LIR_ldf2d: NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture"); return; default: NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode"); return; } LIns* base = ins->oprnd1(); #ifdef NANOJIT_64BIT Register rr = ins->deprecated_getReg(); if (deprecated_isKnownReg(rr) && (rmask(rr) & FpRegs)) { // FPR already assigned, fine, use it deprecated_freeRsrcOf(ins); } else { // use a GPR register; its okay to copy doubles with GPR's // but *not* okay to copy non-doubles with FPR's rr = deprecated_prepResultReg(ins, GpRegs); } #else Register rr = deprecated_prepResultReg(ins, FpRegs); #endif int dr = ins->disp(); Register ra = getBaseReg(base, dr, GpRegs); #ifdef NANOJIT_64BIT if (rmask(rr) & GpRegs) { #if !PEDANTIC if (isS16(dr)) { LD(rr, dr, ra); return; } #endif // general case 64bit GPR load LDX(rr, ra, R0); asm_li(R0, dr); return; } #endif // FPR #if !PEDANTIC if (isS16(dr)) { LFD(rr, dr, ra); return; } #endif // general case FPR load LFDX(rr, ra, R0); asm_li(R0, dr); }
void PPCXEmitter::Epilogue() { u32 regSize = 8; // 4 in 32bit system u32 stackFrameSize = 0x1F0; //Break(); // Write Epilogue (restore stack frame, return) // free stack ADDI(R1, R1, stackFrameSize); #if 0 ADDI(R12, R1, -0x98); // Restore fpr for(int i = 14; i < 32; i ++) { LFD((PPCReg)i, R1, -((32 - i) * regSize)); } #endif // Restore gpr for(int i = 14; i < 32; i ++) { LD((PPCReg)i, R1, -((33 - i) * regSize)); } // recover r12 (LR saved register) LWZ (R12, R1, -0x8); // Restore Lr MTLR(R12); #if 1 // load fpr buff MOVI2R(R5, (u32)&_fprTmp); // Load fpr for(int i = 14; i < 32; i ++) { LFD((PPCReg)i, R5, i * regSize); } #endif }
void Assembler::asm_load64(LIns *ins) { LIns* base = ins->oprnd1(); #ifdef NANOJIT_64BIT Register rr = ins->getReg(); if (isKnownReg(rr) && (rmask(rr) & FpRegs)) { // FPR already assigned, fine, use it freeRsrcOf(ins, false); } else { // use a GPR register; its okay to copy doubles with GPR's // but *not* okay to copy non-doubles with FPR's rr = prepResultReg(ins, GpRegs); } #else Register rr = prepResultReg(ins, FpRegs); #endif int dr = ins->disp(); Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs); #ifdef NANOJIT_64BIT if (rmask(rr) & GpRegs) { #if !PEDANTIC if (isS16(dr)) { LD(rr, dr, ra); return; } #endif // general case 64bit GPR load LDX(rr, ra, R0); asm_li(R0, dr); return; } #endif // FPR #if !PEDANTIC if (isS16(dr)) { LFD(rr, dr, ra); return; } #endif // general case FPR load LFDX(rr, ra, R0); asm_li(R0, dr); }