NIns *Assembler::genEpilogue() { underrunProtect(12); RESTORE(G0, G0, G0); //restore JMPLI(I7, 8, G0); //ret ORI(O0, 0, I0); return _nIns; }
void Assembler::asm_call(LInsp ins) { Register retReg = ( ins->isop(LIR_fcall) ? F0 : retRegs[0] ); prepResultReg(ins, rmask(retReg)); // Do this after we've handled the call result, so we don't // force the call result to be spilled unnecessarily. evictScratchRegs(); const CallInfo* call = ins->callInfo(); underrunProtect(8); NOP(); ArgSize sizes[MAXARGS]; uint32_t argc = call->get_sizes(sizes); NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall)); verbose_only(if (_logc->lcbits & LC_Assembly) outputf(" %p:", _nIns); )
void Assembler::asm_ld(LIns *ins) { LIns* base = ins->oprnd1(); int d = ins->disp(); Register rr = prepResultReg(ins, GpRegs); Register ra = getBaseReg(ins->opcode(), base, d, GpRegs); #if !PEDANTIC if (isS16(d)) { if (ins->isop(LIR_ldcb)) { LBZ(rr, d, ra); } else { LWZ(rr, d, ra); } return; } #endif // general case underrunProtect(12); LWZX(rr, ra, R0); // rr = [ra+R0] asm_li(R0,d); }