void Assembler::asm_load32(LIns *ins) { LIns* base = ins->oprnd1(); int d = ins->disp(); Register rr = prepResultReg(ins, GpRegs); Register ra = getBaseReg(ins->opcode(), base, d, GpRegs); switch(ins->opcode()) { case LIR_ldzb: case LIR_ldcb: if (isS16(d)) { LBZ(rr, d, ra); } else { LBZX(rr, ra, R0); // rr = [ra+R0] asm_li(R0,d); } return; case LIR_ldzs: case LIR_ldcs: // these are expected to be 2 or 4-byte aligned if (isS16(d)) { LHZ(rr, d, ra); } else { LHZX(rr, ra, R0); // rr = [ra+R0] asm_li(R0,d); } return; case LIR_ld: case LIR_ldc: // these are expected to be 4-byte aligned if (isS16(d)) { LWZ(rr, d, ra); } else { LWZX(rr, ra, R0); // rr = [ra+R0] asm_li(R0,d); } return; case LIR_ldsb: case LIR_ldss: case LIR_ldcsb: case LIR_ldcss: NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture"); return; default: NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode"); return; } }
void Assembler::asm_ld(LIns *ins) { LIns* base = ins->oprnd1(); int d = ins->disp(); Register rr = prepResultReg(ins, GpRegs); Register ra = getBaseReg(ins->opcode(), base, d, GpRegs); #if !PEDANTIC if (isS16(d)) { if (ins->isop(LIR_ldcb)) { LBZ(rr, d, ra); } else { LWZ(rr, d, ra); } return; } #endif // general case underrunProtect(12); LWZX(rr, ra, R0); // rr = [ra+R0] asm_li(R0,d); }