void Assembler::asm_load64(LIns *ins) { switch (ins->opcode()) { case LIR_ldd: CASE64(LIR_ldq:) // handled by mainline code below for now break; case LIR_ldf2d: NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture"); return; default: NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode"); return; } LIns* base = ins->oprnd1(); #ifdef NANOJIT_64BIT Register rr = ins->deprecated_getReg(); if (deprecated_isKnownReg(rr) && (rmask(rr) & FpRegs)) { // FPR already assigned, fine, use it deprecated_freeRsrcOf(ins); } else { // use a GPR register; its okay to copy doubles with GPR's // but *not* okay to copy non-doubles with FPR's rr = deprecated_prepResultReg(ins, GpRegs); } #else Register rr = deprecated_prepResultReg(ins, FpRegs); #endif int dr = ins->disp(); Register ra = getBaseReg(base, dr, GpRegs); #ifdef NANOJIT_64BIT if (rmask(rr) & GpRegs) { #if !PEDANTIC if (isS16(dr)) { LD(rr, dr, ra); return; } #endif // general case 64bit GPR load LDX(rr, ra, R0); asm_li(R0, dr); return; } #endif // FPR #if !PEDANTIC if (isS16(dr)) { LFD(rr, dr, ra); return; } #endif // general case FPR load LFDX(rr, ra, R0); asm_li(R0, dr); }
void Assembler::asm_call(LInsp ins) { Register retReg = ( ins->isop(LIR_fcall) ? F0 : retRegs[0] ); deprecated_prepResultReg(ins, rmask(retReg)); // Do this after we've handled the call result, so we don't // force the call result to be spilled unnecessarily. evictScratchRegs(); const CallInfo* call = ins->callInfo(); underrunProtect(8); NOP(); ArgSize sizes[MAXARGS]; uint32_t argc = call->get_sizes(sizes); NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall)); verbose_only(if (_logc->lcbits & LC_Assembly) outputf(" %p:", _nIns); )