Encoding *RegisterAllocator::movq(OperandMMREG r64, OperandMM64 r_m64) { if(r_m64.isSubtypeOf(Operand::OPERAND_MMREG)) { return movq(r64, (OperandMMREG)r_m64); } else { return movq(r64, (OperandMEM64)r_m64); } }
template<class _mm> void TimgFilterNoiseMplayer::Tprocess::lineNoise_simd(uint8_t *dst,const uint8_t *src, int8_t *noise, int len, int shift) { if (_mm::align && (intptr_t(src)&15 || intptr_t(dst)&15)) { lineNoise_simd<typename _mm::T64>(dst,src,noise,len,shift); return; } int mmx_len=len&(~(_mm::size-1)); noise+=shift; typename _mm::__m mm7; pcmpeqb(mm7,mm7); psllw(mm7,15); packsswb(mm7,mm7); for (int x=-mmx_len; x<0; x+=_mm::size) { //".balign 16 \n\t" typename _mm::__m mm0,mm1; movq (mm0,src+mmx_len+x); movdqu (mm1,noise+mmx_len+x); //SSE3 pxor (mm0,mm7); paddsb (mm0,mm1); pxor (mm0,mm7); _mm::movntq(dst+mmx_len+x,mm0); } if (mmx_len!=len) { lineNoise_C(dst+mmx_len,src+mmx_len,noise+mmx_len,len-mmx_len,0); } }
Encoding *RegisterAllocator::spill64(int i) { // Register loaded but not used, eliminate load and don't spill if(MMX[i].loadInstruction && loadElimination) { MMX[i].loadInstruction->reserve(); MMX[i].loadInstruction = 0; MMX[i].reference = 0; MMX[i].priority = 0; MMX[i].partial = 0; MMX[i].copyInstruction = 0; MMX[i].loadInstruction = 0; // MMX[i].spillInstruction = 0; // NOTE: Keep previous spill info return 0; } Encoding *spillInstruction = 0; if(MMX[i].reference != 0 && (MMX[i].modified || !dropUnmodified)) { spillInstruction = movq(qword_ptr [MMX[i].reference], OperandMMREG(i)); } MMX[i].free(); return spillInstruction; }
template<class _mm> void TimgFilterNoiseMplayer::Tprocess::lineNoiseAvg_simd(uint8_t *dst,const uint8_t *src, int len, int8_t **shift_) { if (_mm::align && (intptr_t(src)&15 || intptr_t(dst)&15)) { lineNoiseAvg_simd<typename _mm::T64>(dst,src,len,shift_); return; } const int mmx_len=len&(~(_mm::size-1)); int8_t *shift2[3]= {shift_[0]+mmx_len, shift_[1]+mmx_len, shift_[2]+mmx_len}; for (int x=-mmx_len; x<0; x+=_mm::size) { //".balign 16 \n\t" typename _mm::__m mm0,mm1,mm2,mm3; movdqu (mm1,shift2[0]+mmx_len+x); movq (mm0,src+mmx_len+x); typename _mm::__m shift1_8; movVqu(shift1_8, shift2[1]+mmx_len+x); paddb (mm1,shift1_8); typename _mm::__m shift2_8; movVqu(shift2_8, shift2[2]+mmx_len+x); paddb (mm1,shift2_8); movq (mm2,mm0); movq (mm3,mm1); punpcklbw (mm0,mm0); punpckhbw (mm2,mm2); punpcklbw (mm1,mm1); punpckhbw (mm3,mm3); pmulhw (mm1,mm0); pmulhw (mm3,mm2); paddw (mm1,mm1); paddw (mm3,mm3); paddw (mm1,mm0); paddw (mm3,mm2); psrlw (mm1,8); psrlw (mm3,8); packuswb (mm1,mm3); movq (dst+mmx_len+x,mm1); } if (mmx_len!=len) { lineNoiseAvg_C(dst+mmx_len, src+mmx_len, len-mmx_len, shift2); } }
void InterpreterRuntime::SignatureHandlerGenerator::generate( uint64_t fingerprint) { // generate code to handle arguments iterate(fingerprint); // return result handler __ movq(rax, (int64_t) AbstractInterpreter:: result_handler(method()->result_type())); __ ret(0); __ flush(); }
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { const Address src(from(), -(offset() + 1) * wordSize); #ifdef _WIN64 if (_num_args < Argument::n_float_register_parameters-1) { __ movlpd(as_FloatRegister(++_num_args), src); } else { __ movq(rax, src); __ movq(Address(to(), _stack_offset), rax); _stack_offset += wordSize; } #else if (_num_fp_args < 8) { __ movlpd(as_FloatRegister(_num_fp_args++), src); } else { __ movq(rax, src); __ movq(Address(to(), _stack_offset), rax); _stack_offset += wordSize; } #endif }
void GSDrawScanlineCodeGenerator::WritePixel(const Xmm& src, const Xmm& temp, const Reg32& addr, const Reg8& mask, bool fast, int psm) { if(fast) { // if(fzm & 0x0f) GSVector4i::storel(&vm16[addr + 0], fs); // if(fzm & 0xf0) GSVector4i::storeh(&vm16[addr + 8], fs); test(mask, 0x0f); je("@f"); movq(qword[addr * 2 + (size_t)m_env.vm], src); L("@@"); test(mask, 0xf0); je("@f"); movhps(qword[addr * 2 + (size_t)m_env.vm + 8 * 2], src); L("@@"); } else { // if(fzm & 0x03) WritePixel(fpsm, &vm16[addr + 0], fs.extract32<0>()); // if(fzm & 0x0c) WritePixel(fpsm, &vm16[addr + 2], fs.extract32<1>()); // if(fzm & 0x30) WritePixel(fpsm, &vm16[addr + 8], fs.extract32<2>()); // if(fzm & 0xc0) WritePixel(fpsm, &vm16[addr + 10], fs.extract32<3>()); test(mask, 0x03); je("@f"); WritePixel(src, temp, addr, 0, psm); L("@@"); test(mask, 0x0c); je("@f"); WritePixel(src, temp, addr, 1, psm); L("@@"); test(mask, 0x30); je("@f"); WritePixel(src, temp, addr, 2, psm); L("@@"); test(mask, 0xc0); je("@f"); WritePixel(src, temp, addr, 3, psm); L("@@"); } }
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info); address generate_getPsrInfo() { StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); # define __ _masm-> address start = __ pc(); // rbx is callee-save on both unix and windows // rcx and rdx are first and second argument registers on windows __ pushq(rbx); __ movq(r8, rarg0); __ xorl(rax, rax); __ cpuid(); __ leaq(r9, Address(r8, in_bytes(VM_Version::std_cpuid0_offset()))); __ movl(Address(r9, 0), rax); __ movl(Address(r9, 4), rbx); __ movl(Address(r9, 8), rcx); __ movl(Address(r9, 12), rdx); __ movl(rax, 1); __ cpuid(); __ leaq(r9, Address(r8, in_bytes(VM_Version::std_cpuid1_offset()))); __ movl(Address(r9, 0), rax); __ movl(Address(r9, 4), rbx); __ movl(Address(r9, 8), rcx); __ movl(Address(r9, 12), rdx); __ movl(rax, 0x80000001); __ cpuid(); __ leaq(r9, Address(r8, in_bytes(VM_Version::ext_cpuid1_offset()))); __ movl(Address(r9, 0), rax); __ movl(Address(r9, 4), rbx); __ movl(Address(r9, 8), rcx); __ movl(Address(r9, 12), rdx); __ popq(rbx); __ ret(0); return start; }
void Assembler::emitBatchPop(int scratch_rbp_offset, int scratch_size, const std::vector<GenericRegister>& to_push) { int offset = 0; for (const GenericRegister& r : to_push) { assert(scratch_size >= offset + 8); Indirect next_slot(RBP, offset + scratch_rbp_offset); if (r.type == GenericRegister::GP) { Register gp = r.gp; assert(gp.regnum >= 0 && gp.regnum < 16); movq(next_slot, gp); offset += 8; } else if (r.type == GenericRegister::XMM) { XMMRegister reg = r.xmm; movsd(next_slot, reg); offset += 8; } else { RELEASE_ASSERT(0, "%d", r.type); } } }
OperandMMREG RegisterAllocator::allocate64(int i, const OperandREF &ref, bool copy) { MMX[i].reference = ref; prioritize64(i); Encoding *loadInstruction = 0; Encoding *spillInstruction = MMX[i].spillInstruction; AllocationData spillAllocation = MMX[i].spill; if(copy) { loadInstruction = movq(OperandMMREG(i), qword_ptr [ref]); } MMX[i].loadInstruction = loadInstruction; MMX[i].spillInstruction = spillInstruction; MMX[i].spill = spillAllocation; MMX[i].modified = false; return OperandMMREG(i); }
void GPUDrawScanlineCodeGenerator::Generate() { push(esi); push(edi); Init(); align(16); L("loop"); // GSVector4i test = m_test[7 + (steps & (steps >> 31))]; mov(edx, ecx); sar(edx, 31); and(edx, ecx); shl(edx, 4); movdqa(xmm7, ptr[edx + (size_t)&m_test[7]]); // movdqu(xmm1, ptr[edi]); movq(xmm1, qword[edi]); movhps(xmm1, qword[edi + 8]); // ecx = steps // esi = tex (tme) // edi = fb // xmm1 = fd // xmm2 = s // xmm3 = t // xmm4 = r // xmm5 = g // xmm6 = b // xmm7 = test TestMask(); SampleTexture(); // xmm1 = fd // xmm3 = a // xmm4 = r // xmm5 = g // xmm6 = b // xmm7 = test // xmm0, xmm2 = free ColorTFX(); AlphaBlend(); Dither(); WriteFrame(); L("step"); // if(steps <= 0) break; test(ecx, ecx); jle("exit", T_NEAR); Step(); jmp("loop", T_NEAR); L("exit"); pop(edi); pop(esi); ret(8); }
void MacroAssembler::fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register eax, Register ecx, Register edx, Register tmp1, Register tmp2) { Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2; Label L_2TAG_PACKET_8_0_2; Label L_2TAG_PACKET_12_0_2, L_2TAG_PACKET_13_0_2, B1_3, B1_5, start; assert_different_registers(tmp1, tmp2, eax, ecx, edx); jmp(start); address L_tbl = (address)_L_tbl; address log2 = (address)_log2; address coeff = (address)_coeff; bind(start); subq(rsp, 24); movsd(Address(rsp, 0), xmm0); mov64(rax, 0x3ff0000000000000); movdq(xmm2, rax); mov64(rdx, 0x77f0000000000000); movdq(xmm3, rdx); movl(ecx, 32768); movdl(xmm4, rcx); mov64(tmp1, 0xffffe00000000000); movdq(xmm5, tmp1); movdqu(xmm1, xmm0); pextrw(eax, xmm0, 3); por(xmm0, xmm2); movl(ecx, 16352); psrlq(xmm0, 27); lea(tmp2, ExternalAddress(L_tbl)); psrld(xmm0, 2); rcpps(xmm0, xmm0); psllq(xmm1, 12); pshufd(xmm6, xmm5, 228); psrlq(xmm1, 12); subl(eax, 16); cmpl(eax, 32736); jcc(Assembler::aboveEqual, L_2TAG_PACKET_0_0_2); bind(L_2TAG_PACKET_1_0_2); paddd(xmm0, xmm4); por(xmm1, xmm3); movdl(edx, xmm0); psllq(xmm0, 29); pand(xmm5, xmm1); pand(xmm0, xmm6); subsd(xmm1, xmm5); mulpd(xmm5, xmm0); andl(eax, 32752); subl(eax, ecx); cvtsi2sdl(xmm7, eax); mulsd(xmm1, xmm0); movq(xmm6, ExternalAddress(log2)); // 0xfefa3800UL, 0x3fa62e42UL movdqu(xmm3, ExternalAddress(coeff)); // 0x92492492UL, 0x3fc24924UL, 0x00000000UL, 0xbfd00000UL subsd(xmm5, xmm2); andl(edx, 16711680); shrl(edx, 12); movdqu(xmm0, Address(tmp2, edx)); movdqu(xmm4, ExternalAddress(16 + coeff)); // 0x3d6fb175UL, 0xbfc5555eUL, 0x55555555UL, 0x3fd55555UL addsd(xmm1, xmm5); movdqu(xmm2, ExternalAddress(32 + coeff)); // 0x9999999aUL, 0x3fc99999UL, 0x00000000UL, 0xbfe00000UL mulsd(xmm6, xmm7); movddup(xmm5, xmm1); mulsd(xmm7, ExternalAddress(8 + log2)); // 0x93c76730UL, 0x3ceef357UL mulsd(xmm3, xmm1); addsd(xmm0, xmm6); mulpd(xmm4, xmm5); mulpd(xmm5, xmm5); movddup(xmm6, xmm0); addsd(xmm0, xmm1); addpd(xmm4, xmm2); mulpd(xmm3, xmm5); subsd(xmm6, xmm0); mulsd(xmm4, xmm1); pshufd(xmm2, xmm0, 238); addsd(xmm1, xmm6); mulsd(xmm5, xmm5); addsd(xmm7, xmm2); addpd(xmm4, xmm3); addsd(xmm1, xmm7); mulpd(xmm4, xmm5); addsd(xmm1, xmm4); pshufd(xmm5, xmm4, 238); addsd(xmm1, xmm5); addsd(xmm0, xmm1); jmp(B1_5); bind(L_2TAG_PACKET_0_0_2); movq(xmm0, Address(rsp, 0)); movq(xmm1, Address(rsp, 0)); addl(eax, 16); cmpl(eax, 32768); jcc(Assembler::aboveEqual, L_2TAG_PACKET_2_0_2); cmpl(eax, 16); jcc(Assembler::below, L_2TAG_PACKET_3_0_2); bind(L_2TAG_PACKET_4_0_2); addsd(xmm0, xmm0); jmp(B1_5); bind(L_2TAG_PACKET_5_0_2); jcc(Assembler::above, L_2TAG_PACKET_4_0_2); cmpl(edx, 0); jcc(Assembler::above, L_2TAG_PACKET_4_0_2); jmp(L_2TAG_PACKET_6_0_2); bind(L_2TAG_PACKET_3_0_2); xorpd(xmm1, xmm1); addsd(xmm1, xmm0); movdl(edx, xmm1); psrlq(xmm1, 32); movdl(ecx, xmm1); orl(edx, ecx); cmpl(edx, 0); jcc(Assembler::equal, L_2TAG_PACKET_7_0_2); xorpd(xmm1, xmm1); movl(eax, 18416); pinsrw(xmm1, eax, 3); mulsd(xmm0, xmm1); movdqu(xmm1, xmm0); pextrw(eax, xmm0, 3); por(xmm0, xmm2); psrlq(xmm0, 27); movl(ecx, 18416); psrld(xmm0, 2); rcpps(xmm0, xmm0); psllq(xmm1, 12); pshufd(xmm6, xmm5, 228); psrlq(xmm1, 12); jmp(L_2TAG_PACKET_1_0_2); bind(L_2TAG_PACKET_2_0_2); movdl(edx, xmm1); psrlq(xmm1, 32); movdl(ecx, xmm1); addl(ecx, ecx); cmpl(ecx, -2097152); jcc(Assembler::aboveEqual, L_2TAG_PACKET_5_0_2); orl(edx, ecx); cmpl(edx, 0); jcc(Assembler::equal, L_2TAG_PACKET_7_0_2); bind(L_2TAG_PACKET_6_0_2); xorpd(xmm1, xmm1); xorpd(xmm0, xmm0); movl(eax, 32752); pinsrw(xmm1, eax, 3); mulsd(xmm0, xmm1); movl(Address(rsp, 16), 3); jmp(L_2TAG_PACKET_8_0_2); bind(L_2TAG_PACKET_7_0_2); xorpd(xmm1, xmm1); xorpd(xmm0, xmm0); movl(eax, 49136); pinsrw(xmm0, eax, 3); divsd(xmm0, xmm1); movl(Address(rsp, 16), 2); bind(L_2TAG_PACKET_8_0_2); movq(Address(rsp, 8), xmm0); bind(B1_3); movq(xmm0, Address(rsp, 8)); bind(B1_5); addq(rsp, 24); }
void MacroAssembler::fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register eax, Register ecx, Register edx, Register tmp) { Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2; Label L_2TAG_PACKET_8_0_2, L_2TAG_PACKET_9_0_2, L_2TAG_PACKET_10_0_2, L_2TAG_PACKET_11_0_2; Label L_2TAG_PACKET_12_0_2, B1_3, B1_5, start; assert_different_registers(tmp, eax, ecx, edx); jmp(start); address cv = (address)_cv; address Shifter = (address)_shifter; address mmask = (address)_mmask; address bias = (address)_bias; address Tbl_addr = (address)_Tbl_addr; address ALLONES = (address)_ALLONES; address ebias = (address)_ebias; address XMAX = (address)_XMAX; address XMIN = (address)_XMIN; address INF = (address)_INF; address ZERO = (address)_ZERO; address ONE_val = (address)_ONE_val; bind(start); subq(rsp, 24); movsd(Address(rsp, 8), xmm0); unpcklpd(xmm0, xmm0); movdqu(xmm1, ExternalAddress(cv)); // 0x652b82feUL, 0x40571547UL, 0x652b82feUL, 0x40571547UL movdqu(xmm6, ExternalAddress(Shifter)); // 0x00000000UL, 0x43380000UL, 0x00000000UL, 0x43380000UL movdqu(xmm2, ExternalAddress(16+cv)); // 0xfefa0000UL, 0x3f862e42UL, 0xfefa0000UL, 0x3f862e42UL movdqu(xmm3, ExternalAddress(32+cv)); // 0xbc9e3b3aUL, 0x3d1cf79aUL, 0xbc9e3b3aUL, 0x3d1cf79aUL pextrw(eax, xmm0, 3); andl(eax, 32767); movl(edx, 16527); subl(edx, eax); subl(eax, 15504); orl(edx, eax); cmpl(edx, INT_MIN); jcc(Assembler::aboveEqual, L_2TAG_PACKET_0_0_2); mulpd(xmm1, xmm0); addpd(xmm1, xmm6); movapd(xmm7, xmm1); subpd(xmm1, xmm6); mulpd(xmm2, xmm1); movdqu(xmm4, ExternalAddress(64+cv)); // 0xe3289860UL, 0x3f56c15cUL, 0x555b9e25UL, 0x3fa55555UL mulpd(xmm3, xmm1); movdqu(xmm5, ExternalAddress(80+cv)); // 0xc090cf0fUL, 0x3f811115UL, 0x55548ba1UL, 0x3fc55555UL subpd(xmm0, xmm2); movdl(eax, xmm7); movl(ecx, eax); andl(ecx, 63); shll(ecx, 4); sarl(eax, 6); movl(edx, eax); movdqu(xmm6, ExternalAddress(mmask)); // 0xffffffc0UL, 0x00000000UL, 0xffffffc0UL, 0x00000000UL pand(xmm7, xmm6); movdqu(xmm6, ExternalAddress(bias)); // 0x0000ffc0UL, 0x00000000UL, 0x0000ffc0UL, 0x00000000UL paddq(xmm7, xmm6); psllq(xmm7, 46); subpd(xmm0, xmm3); lea(tmp, ExternalAddress(Tbl_addr)); movdqu(xmm2, Address(ecx,tmp)); mulpd(xmm4, xmm0); movapd(xmm6, xmm0); movapd(xmm1, xmm0); mulpd(xmm6, xmm6); mulpd(xmm0, xmm6); addpd(xmm5, xmm4); mulsd(xmm0, xmm6); mulpd(xmm6, ExternalAddress(48+cv)); // 0xfffffffeUL, 0x3fdfffffUL, 0xfffffffeUL, 0x3fdfffffUL addsd(xmm1, xmm2); unpckhpd(xmm2, xmm2); mulpd(xmm0, xmm5); addsd(xmm1, xmm0); por(xmm2, xmm7); unpckhpd(xmm0, xmm0); addsd(xmm0, xmm1); addsd(xmm0, xmm6); addl(edx, 894); cmpl(edx, 1916); jcc (Assembler::above, L_2TAG_PACKET_1_0_2); mulsd(xmm0, xmm2); addsd(xmm0, xmm2); jmp (B1_5); bind(L_2TAG_PACKET_1_0_2); xorpd(xmm3, xmm3); movdqu(xmm4, ExternalAddress(ALLONES)); // 0xffffffffUL, 0xffffffffUL, 0xffffffffUL, 0xffffffffUL movl(edx, -1022); subl(edx, eax); movdl(xmm5, edx); psllq(xmm4, xmm5); movl(ecx, eax); sarl(eax, 1); pinsrw(xmm3, eax, 3); movdqu(xmm6, ExternalAddress(ebias)); // 0x00000000UL, 0x3ff00000UL, 0x00000000UL, 0x3ff00000UL psllq(xmm3, 4); psubd(xmm2, xmm3); mulsd(xmm0, xmm2); cmpl(edx, 52); jcc(Assembler::greater, L_2TAG_PACKET_2_0_2); pand(xmm4, xmm2); paddd(xmm3, xmm6); subsd(xmm2, xmm4); addsd(xmm0, xmm2); cmpl(ecx, 1023); jcc(Assembler::greaterEqual, L_2TAG_PACKET_3_0_2); pextrw(ecx, xmm0, 3); andl(ecx, 32768); orl(edx, ecx); cmpl(edx, 0); jcc(Assembler::equal, L_2TAG_PACKET_4_0_2); movapd(xmm6, xmm0); addsd(xmm0, xmm4); mulsd(xmm0, xmm3); pextrw(ecx, xmm0, 3); andl(ecx, 32752); cmpl(ecx, 0); jcc(Assembler::equal, L_2TAG_PACKET_5_0_2); jmp(B1_5); bind(L_2TAG_PACKET_5_0_2); mulsd(xmm6, xmm3); mulsd(xmm4, xmm3); movdqu(xmm0, xmm6); pxor(xmm6, xmm4); psrad(xmm6, 31); pshufd(xmm6, xmm6, 85); psllq(xmm0, 1); psrlq(xmm0, 1); pxor(xmm0, xmm6); psrlq(xmm6, 63); paddq(xmm0, xmm6); paddq(xmm0, xmm4); movl(Address(rsp,0), 15); jmp(L_2TAG_PACKET_6_0_2); bind(L_2TAG_PACKET_4_0_2); addsd(xmm0, xmm4); mulsd(xmm0, xmm3); jmp(B1_5); bind(L_2TAG_PACKET_3_0_2); addsd(xmm0, xmm4); mulsd(xmm0, xmm3); pextrw(ecx, xmm0, 3); andl(ecx, 32752); cmpl(ecx, 32752); jcc(Assembler::aboveEqual, L_2TAG_PACKET_7_0_2); jmp(B1_5); bind(L_2TAG_PACKET_2_0_2); paddd(xmm3, xmm6); addpd(xmm0, xmm2); mulsd(xmm0, xmm3); movl(Address(rsp,0), 15); jmp(L_2TAG_PACKET_6_0_2); bind(L_2TAG_PACKET_8_0_2); cmpl(eax, 2146435072); jcc(Assembler::aboveEqual, L_2TAG_PACKET_9_0_2); movl(eax, Address(rsp,12)); cmpl(eax, INT_MIN); jcc(Assembler::aboveEqual, L_2TAG_PACKET_10_0_2); movsd(xmm0, ExternalAddress(XMAX)); // 0xffffffffUL, 0x7fefffffUL mulsd(xmm0, xmm0); bind(L_2TAG_PACKET_7_0_2); movl(Address(rsp,0), 14); jmp(L_2TAG_PACKET_6_0_2); bind(L_2TAG_PACKET_10_0_2); movsd(xmm0, ExternalAddress(XMIN)); // 0x00000000UL, 0x00100000UL mulsd(xmm0, xmm0); movl(Address(rsp,0), 15); jmp(L_2TAG_PACKET_6_0_2); bind(L_2TAG_PACKET_9_0_2); movl(edx, Address(rsp,8)); cmpl(eax, 2146435072); jcc(Assembler::above, L_2TAG_PACKET_11_0_2); cmpl(edx, 0); jcc(Assembler::notEqual, L_2TAG_PACKET_11_0_2); movl(eax, Address(rsp,12)); cmpl(eax, 2146435072); jcc(Assembler::notEqual, L_2TAG_PACKET_12_0_2); movsd(xmm0, ExternalAddress(INF)); // 0x00000000UL, 0x7ff00000UL jmp(B1_5); bind(L_2TAG_PACKET_12_0_2); movsd(xmm0, ExternalAddress(ZERO)); // 0x00000000UL, 0x00000000UL jmp(B1_5); bind(L_2TAG_PACKET_11_0_2); movsd(xmm0, Address(rsp, 8)); addsd(xmm0, xmm0); jmp(B1_5); bind(L_2TAG_PACKET_0_0_2); movl(eax, Address(rsp, 12)); andl(eax, 2147483647); cmpl(eax, 1083179008); jcc(Assembler::aboveEqual, L_2TAG_PACKET_8_0_2); movsd(Address(rsp, 8), xmm0); addsd(xmm0, ExternalAddress(ONE_val)); // 0x00000000UL, 0x3ff00000UL jmp(B1_5); bind(L_2TAG_PACKET_6_0_2); movq(Address(rsp, 16), xmm0); bind(B1_3); movq(xmm0, Address(rsp, 16)); bind(B1_5); addq(rsp, 24); }
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { const Address src(from(), -(offset() + 1) * wordSize); #ifdef _WIN64 switch (_num_args) { case 0: __ movq(rarg1, src); _num_args++; break; case 1: __ movq(rarg2, src); _num_args++; break; case 2: __ movq(rarg3, src); _num_args++; break; case 3: default: __ movq(rax, src); __ movq(Address(to(), _stack_offset), rax); _stack_offset += wordSize; break; } #else switch (_num_int_args) { case 0: __ movq(rarg1, src); _num_int_args++; break; case 1: __ movq(rarg2, src); _num_int_args++; break; case 2: __ movq(rarg3, src); _num_int_args++; break; case 3: __ movq(rarg4, src); _num_int_args++; break; case 4: __ movq(rarg5, src); _num_int_args++; break; default: __ movq(rax, src); __ movq(Address(to(), _stack_offset), rax); _stack_offset += wordSize; break; } #endif }
//------------------------------------------------------------------------------ // MethodHandles::generate_method_handle_stub // // Generate an "entry" field for a method handle. // This determines how the method handle will respond to calls. void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { // Here is the register state during an interpreted call, // as set up by generate_method_handle_interpreter_entry(): // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused) // - rcx: receiver method handle // - rax: method handle type (only used by the check_mtype entry point) // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) // - rdx: garbage temp, can blow away const Register rcx_recv = rcx; const Register rax_argslot = rax; const Register rbx_temp = rbx; const Register rdx_temp = rdx; // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls) // and gen_c2i_adapter (from compiled calls): const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi); // Argument registers for _raise_exception. // 32-bit: Pass first two oop/int args in registers ECX and EDX. const Register rarg0_code = LP64_ONLY(j_rarg0) NOT_LP64(rcx); const Register rarg1_actual = LP64_ONLY(j_rarg1) NOT_LP64(rdx); const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi); assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp); guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); // some handy addresses Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() ); Address rbx_method_fce( rbx, methodOopDesc::from_compiled_offset() ); Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() ); Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() ); Address rcx_bmh_vmargslot( rcx_recv, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes() ); Address rcx_bmh_argument( rcx_recv, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes() ); Address rcx_amh_vmargslot( rcx_recv, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes() ); Address rcx_amh_argument( rcx_recv, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes() ); Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() ); Address vmarg; // __ argument_address(vmargslot) const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); if (have_entry(ek)) { __ nop(); // empty stubs make SG sick return; } address interp_entry = __ pc(); trace_method_handle(_masm, entry_name(ek)); BLOCK_COMMENT(entry_name(ek)); switch ((int) ek) { case _raise_exception: { // Not a real MH entry, but rather shared code for raising an // exception. Since we use the compiled entry, arguments are // expected in compiler argument registers. assert(raise_exception_method(), "must be set"); assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); const Register rdi_pc = rax; __ pop(rdi_pc); // caller PC __ mov(rsp, saved_last_sp); // cut the stack back to where the caller started Register rbx_method = rbx_temp; Label L_no_method; // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method)); __ testptr(rbx_method, rbx_method); __ jccb(Assembler::zero, L_no_method); const int jobject_oop_offset = 0; __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject __ testptr(rbx_method, rbx_method); __ jccb(Assembler::zero, L_no_method); __ verify_oop(rbx_method); NOT_LP64(__ push(rarg2_required)); __ push(rdi_pc); // restore caller PC __ jmp(rbx_method_fce); // jump to compiled entry // Do something that is at least causes a valid throw from the interpreter. __ bind(L_no_method); __ push(rarg2_required); __ push(rarg1_actual); __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); } break; case _invokestatic_mh: case _invokespecial_mh: { Register rbx_method = rbx_temp; __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop __ verify_oop(rbx_method); // same as TemplateTable::invokestatic or invokespecial, // minus the CP setup and profiling: if (ek == _invokespecial_mh) { // Must load & check the first argument before entering the target method. __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); __ null_check(rcx_recv); __ verify_oop(rcx_recv); } __ jmp(rbx_method_fie); } break; case _invokevirtual_mh: { // same as TemplateTable::invokevirtual, // minus the CP setup and profiling: // pick out the vtable index and receiver offset from the MH, // and then we can discard it: __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); Register rbx_index = rbx_temp; __ movl(rbx_index, rcx_dmh_vmindex); // Note: The verifier allows us to ignore rcx_mh_vmtarget. __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); // get receiver klass Register rax_klass = rax_argslot; __ load_klass(rax_klass, rcx_recv); __ verify_oop(rax_klass); // get target methodOop & entry point const int base = instanceKlass::vtable_start_offset() * wordSize; assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); Address vtable_entry_addr(rax_klass, rbx_index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()); Register rbx_method = rbx_temp; __ movptr(rbx_method, vtable_entry_addr); __ verify_oop(rbx_method); __ jmp(rbx_method_fie); } break; case _invokeinterface_mh: { // same as TemplateTable::invokeinterface, // minus the CP setup and profiling: // pick out the interface and itable index from the MH. __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); Register rdx_intf = rdx_temp; Register rbx_index = rbx_temp; __ load_heap_oop(rdx_intf, rcx_mh_vmtarget); __ movl(rbx_index, rcx_dmh_vmindex); __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); // get receiver klass Register rax_klass = rax_argslot; __ load_klass(rax_klass, rcx_recv); __ verify_oop(rax_klass); Register rdi_temp = rdi; Register rbx_method = rbx_index; // get interface klass Label no_such_interface; __ verify_oop(rdx_intf); __ lookup_interface_method(rax_klass, rdx_intf, // note: next two args must be the same: rbx_index, rbx_method, rdi_temp, no_such_interface); __ verify_oop(rbx_method); __ jmp(rbx_method_fie); __ hlt(); __ bind(no_such_interface); // Throw an exception. // For historical reasons, it will be IncompatibleClassChangeError. __ mov(rbx_temp, rcx_recv); // rarg2_required might be RCX assert_different_registers(rarg2_required, rbx_temp); __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset)); // required interface __ mov( rarg1_actual, rbx_temp); // bad receiver __ movl( rarg0_code, (int) Bytecodes::_invokeinterface); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); } break; case _bound_ref_mh: case _bound_int_mh: case _bound_long_mh: case _bound_ref_direct_mh: case _bound_int_direct_mh: case _bound_long_direct_mh: { bool direct_to_method = (ek >= _bound_ref_direct_mh); BasicType arg_type = T_ILLEGAL; int arg_mask = _INSERT_NO_MASK; int arg_slots = -1; get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots); // make room for the new argument: __ movl(rax_argslot, rcx_bmh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp); // store bound argument into the new stack slot: __ load_heap_oop(rbx_temp, rcx_bmh_argument); if (arg_type == T_OBJECT) { __ movptr(Address(rax_argslot, 0), rbx_temp); } else { Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); const int arg_size = type2aelembytes(arg_type); __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp); __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp); } if (direct_to_method) { Register rbx_method = rbx_temp; __ load_heap_oop(rbx_method, rcx_mh_vmtarget); __ verify_oop(rbx_method); __ jmp(rbx_method_fie); } else { __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ verify_oop(rcx_recv); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } } break; case _adapter_retype_only: case _adapter_retype_raw: // immediately jump to the next MH layer: __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ verify_oop(rcx_recv); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); // This is OK when all parameter types widen. // It is also OK when a return type narrows. break; case _adapter_check_cast: { // temps: Register rbx_klass = rbx_temp; // interesting AMH data // check a reference argument before jumping to the next layer of MH: __ movl(rax_argslot, rcx_amh_vmargslot); vmarg = __ argument_address(rax_argslot); // What class are we casting to? __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); Label done; __ movptr(rdx_temp, vmarg); __ testptr(rdx_temp, rdx_temp); __ jcc(Assembler::zero, done); // no cast if null __ load_klass(rdx_temp, rdx_temp); // live at this point: // - rbx_klass: klass required by the target method // - rdx_temp: argument klass to test // - rcx_recv: adapter method handle __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done); // If we get here, the type check failed! // Call the wrong_method_type stub, passing the failing argument type in rax. Register rax_mtype = rax_argslot; __ movl(rax_argslot, rcx_amh_vmargslot); // reload argslot field __ movptr(rdx_temp, vmarg); assert_different_registers(rarg2_required, rdx_temp); __ load_heap_oop(rarg2_required, rcx_amh_argument); // required class __ mov( rarg1_actual, rdx_temp); // bad object __ movl( rarg0_code, (int) Bytecodes::_checkcast); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); __ bind(done); // get the new MH: __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_prim_to_prim: case _adapter_ref_to_prim: // handled completely by optimized cases __ stop("init_AdapterMethodHandle should not issue this"); break; case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim { // perform an in-place conversion to int or an int subword __ movl(rax_argslot, rcx_amh_vmargslot); vmarg = __ argument_address(rax_argslot); switch (ek) { case _adapter_opt_i2i: __ movl(rdx_temp, vmarg); break; case _adapter_opt_l2i: { // just delete the extra slot; on a little-endian machine we keep the first __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); remove_arg_slots(_masm, -stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); vmarg = Address(rax_argslot, -Interpreter::stackElementSize); __ movl(rdx_temp, vmarg); } break; case _adapter_opt_unboxi: { // Load the value up from the heap. __ movptr(rdx_temp, vmarg); int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); #ifdef ASSERT for (int bt = T_BOOLEAN; bt < T_INT; bt++) { if (is_subword_type(BasicType(bt))) assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); } #endif __ null_check(rdx_temp, value_offset); __ movl(rdx_temp, Address(rdx_temp, value_offset)); // We load this as a word. Because we are little-endian, // the low bits will be correct, but the high bits may need cleaning. // The vminfo will guide us to clean those bits. } break; default: ShouldNotReachHere(); } // Do the requested conversion and store the value. Register rbx_vminfo = rbx_temp; __ movl(rbx_vminfo, rcx_amh_conversion); assert(CONV_VMINFO_SHIFT == 0, "preshifted"); // get the new MH: __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); // (now we are done with the old MH) // original 32-bit vmdata word must be of this form: // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | __ xchgptr(rcx, rbx_vminfo); // free rcx for shifts __ shll(rdx_temp /*, rcx*/); Label zero_extend, done; __ testl(rcx, CONV_VMINFO_SIGN_FLAG); __ jccb(Assembler::zero, zero_extend); // this path is taken for int->byte, int->short __ sarl(rdx_temp /*, rcx*/); __ jmpb(done); __ bind(zero_extend); // this is taken for int->char __ shrl(rdx_temp /*, rcx*/); __ bind(done); __ movl(vmarg, rdx_temp); // Store the value. __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim { // perform an in-place int-to-long or ref-to-long conversion __ movl(rax_argslot, rcx_amh_vmargslot); // on a little-endian machine we keep the first slot and add another after __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, rax_argslot, rbx_temp, rdx_temp); Address vmarg1(rax_argslot, -Interpreter::stackElementSize); Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize); switch (ek) { case _adapter_opt_i2l: { #ifdef _LP64 __ movslq(rdx_temp, vmarg1); // Load sign-extended __ movq(vmarg1, rdx_temp); // Store into first slot #else __ movl(rdx_temp, vmarg1); __ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign() __ movl(vmarg2, rdx_temp); // store second word #endif } break; case _adapter_opt_unboxl: { // Load the value up from the heap. __ movptr(rdx_temp, vmarg1); int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); __ null_check(rdx_temp, value_offset); #ifdef _LP64 __ movq(rbx_temp, Address(rdx_temp, value_offset)); __ movq(vmarg1, rbx_temp); #else __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt)); __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt)); __ movl(vmarg1, rbx_temp); __ movl(vmarg2, rdx_temp); #endif } break; default: ShouldNotReachHere(); } __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim { // perform an in-place floating primitive conversion __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); if (ek == _adapter_opt_f2d) { insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, rax_argslot, rbx_temp, rdx_temp); } Address vmarg(rax_argslot, -Interpreter::stackElementSize); #ifdef _LP64 if (ek == _adapter_opt_f2d) { __ movflt(xmm0, vmarg); __ cvtss2sd(xmm0, xmm0); __ movdbl(vmarg, xmm0); } else { __ movdbl(xmm0, vmarg); __ cvtsd2ss(xmm0, xmm0); __ movflt(vmarg, xmm0); } #else //_LP64 if (ek == _adapter_opt_f2d) { __ fld_s(vmarg); // load float to ST0 __ fstp_s(vmarg); // store single } else { __ fld_d(vmarg); // load double to ST0 __ fstp_s(vmarg); // store single } #endif //_LP64 if (ek == _adapter_opt_d2f) { remove_arg_slots(_masm, -stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); } __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_prim_to_ref: __ unimplemented(entry_name(ek)); // %%% FIXME: NYI break; case _adapter_swap_args: case _adapter_rot_args: // handled completely by optimized cases __ stop("init_AdapterMethodHandle should not issue this"); break; case _adapter_opt_swap_1: case _adapter_opt_swap_2: case _adapter_opt_rot_1_up: case _adapter_opt_rot_1_down: case _adapter_opt_rot_2_up: case _adapter_opt_rot_2_down: { int swap_bytes = 0, rotate = 0; get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); // 'argslot' is the position of the first argument to swap __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); // 'vminfo' is the second Register rbx_destslot = rbx_temp; __ movl(rbx_destslot, rcx_amh_conversion); assert(CONV_VMINFO_SHIFT == 0, "preshifted"); __ andl(rbx_destslot, CONV_VMINFO_MASK); __ lea(rbx_destslot, __ argument_address(rbx_destslot)); DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame")); if (!rotate) { for (int i = 0; i < swap_bytes; i += wordSize) { __ movptr(rdx_temp, Address(rax_argslot , i)); __ push(rdx_temp); __ movptr(rdx_temp, Address(rbx_destslot, i)); __ movptr(Address(rax_argslot, i), rdx_temp); __ pop(rdx_temp); __ movptr(Address(rbx_destslot, i), rdx_temp); } } else { // push the first chunk, which is going to get overwritten for (int i = swap_bytes; (i -= wordSize) >= 0; ) { __ movptr(rdx_temp, Address(rax_argslot, i)); __ push(rdx_temp); } if (rotate > 0) { // rotate upward __ subptr(rax_argslot, swap_bytes); #ifdef ASSERT { // Verify that argslot > destslot, by at least swap_bytes. Label L_ok; __ cmpptr(rax_argslot, rbx_destslot); __ jccb(Assembler::aboveEqual, L_ok); __ stop("source must be above destination (upward rotation)"); __ bind(L_ok); } #endif // work argslot down to destslot, copying contiguous data upwards // pseudo-code: // rax = src_addr - swap_bytes // rbx = dest_addr // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--; Label loop; __ bind(loop); __ movptr(rdx_temp, Address(rax_argslot, 0)); __ movptr(Address(rax_argslot, swap_bytes), rdx_temp); __ addptr(rax_argslot, -wordSize); __ cmpptr(rax_argslot, rbx_destslot); __ jccb(Assembler::aboveEqual, loop); } else { __ addptr(rax_argslot, swap_bytes); #ifdef ASSERT { // Verify that argslot < destslot, by at least swap_bytes. Label L_ok; __ cmpptr(rax_argslot, rbx_destslot); __ jccb(Assembler::belowEqual, L_ok); __ stop("source must be below destination (downward rotation)"); __ bind(L_ok); } #endif // work argslot up to destslot, copying contiguous data downwards // pseudo-code: // rax = src_addr + swap_bytes // rbx = dest_addr // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; Label loop; __ bind(loop); __ movptr(rdx_temp, Address(rax_argslot, 0)); __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp); __ addptr(rax_argslot, wordSize); __ cmpptr(rax_argslot, rbx_destslot); __ jccb(Assembler::belowEqual, loop); } // pop the original first chunk into the destination slot, now free for (int i = 0; i < swap_bytes; i += wordSize) { __ pop(rdx_temp); __ movptr(Address(rbx_destslot, i), rdx_temp); } } __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_dup_args: { // 'argslot' is the position of the first argument to duplicate __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); // 'stack_move' is negative number of words to duplicate Register rdx_stack_move = rdx_temp; __ movl2ptr(rdx_stack_move, rcx_amh_conversion); __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT); int argslot0_num = 0; Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num)); assert(argslot0.base() == rsp, ""); int pre_arg_size = argslot0.disp(); assert(pre_arg_size % wordSize == 0, ""); assert(pre_arg_size > 0, "must include PC"); // remember the old rsp+1 (argslot[0]) Register rbx_oldarg = rbx_temp; __ lea(rbx_oldarg, argslot0); // move rsp down to make room for dups __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr)); // compute the new rsp+1 (argslot[0]) Register rdx_newarg = rdx_temp; __ lea(rdx_newarg, argslot0); __ push(rdi); // need a temp // (preceding push must be done after arg addresses are taken!) // pull down the pre_arg_size data (PC) for (int i = -pre_arg_size; i < 0; i += wordSize) { __ movptr(rdi, Address(rbx_oldarg, i)); __ movptr(Address(rdx_newarg, i), rdi); } // copy from rax_argslot[0...] down to new_rsp[1...] // pseudo-code: // rbx = old_rsp+1 // rdx = new_rsp+1 // rax = argslot // while (rdx < rbx) *rdx++ = *rax++ Label loop; __ bind(loop); __ movptr(rdi, Address(rax_argslot, 0)); __ movptr(Address(rdx_newarg, 0), rdi); __ addptr(rax_argslot, wordSize); __ addptr(rdx_newarg, wordSize); __ cmpptr(rdx_newarg, rbx_oldarg); __ jccb(Assembler::less, loop); __ pop(rdi); // restore temp __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_drop_args: { // 'argslot' is the position of the first argument to nuke __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); __ push(rdi); // need a temp // (must do previous push after argslot address is taken) // 'stack_move' is number of words to drop Register rdi_stack_move = rdi; __ movl2ptr(rdi_stack_move, rcx_amh_conversion); __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT); remove_arg_slots(_masm, rdi_stack_move, rax_argslot, rbx_temp, rdx_temp); __ pop(rdi); // restore temp __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_collect_args: __ unimplemented(entry_name(ek)); // %%% FIXME: NYI break; case _adapter_spread_args: // handled completely by optimized cases __ stop("init_AdapterMethodHandle should not issue this"); break; case _adapter_opt_spread_0: case _adapter_opt_spread_1: case _adapter_opt_spread_more: { // spread an array out into a group of arguments int length_constant = get_ek_adapter_opt_spread_info(ek); // find the address of the array argument __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); // grab some temps { __ push(rsi); __ push(rdi); } // (preceding pushes must be done after argslot address is taken!) #define UNPUSH_RSI_RDI \ { __ pop(rdi); __ pop(rsi); } // arx_argslot points both to the array and to the first output arg vmarg = Address(rax_argslot, 0); // Get the array value. Register rsi_array = rsi; Register rdx_array_klass = rdx_temp; BasicType elem_type = T_OBJECT; int length_offset = arrayOopDesc::length_offset_in_bytes(); int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); __ movptr(rsi_array, vmarg); Label skip_array_check; if (length_constant == 0) { __ testptr(rsi_array, rsi_array); __ jcc(Assembler::zero, skip_array_check); } __ null_check(rsi_array, oopDesc::klass_offset_in_bytes()); __ load_klass(rdx_array_klass, rsi_array); // Check the array type. Register rbx_klass = rbx_temp; __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); Label ok_array_klass, bad_array_klass, bad_array_length; __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass); // If we get here, the type check failed! __ jmp(bad_array_klass); __ bind(ok_array_klass); // Check length. if (length_constant >= 0) { __ cmpl(Address(rsi_array, length_offset), length_constant); } else { Register rbx_vminfo = rbx_temp; __ movl(rbx_vminfo, rcx_amh_conversion); assert(CONV_VMINFO_SHIFT == 0, "preshifted"); __ andl(rbx_vminfo, CONV_VMINFO_MASK); __ cmpl(rbx_vminfo, Address(rsi_array, length_offset)); } __ jcc(Assembler::notEqual, bad_array_length); Register rdx_argslot_limit = rdx_temp; // Array length checks out. Now insert any required stack slots. if (length_constant == -1) { // Form a pointer to the end of the affected region. __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize)); // 'stack_move' is negative number of words to insert Register rdi_stack_move = rdi; __ movl2ptr(rdi_stack_move, rcx_amh_conversion); __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT); Register rsi_temp = rsi_array; // spill this insert_arg_slots(_masm, rdi_stack_move, -1, rax_argslot, rbx_temp, rsi_temp); // reload the array (since rsi was killed) __ movptr(rsi_array, vmarg); } else if (length_constant > 1) { int arg_mask = 0; int new_slots = (length_constant - 1); for (int i = 0; i < new_slots; i++) { arg_mask <<= 1; arg_mask |= _INSERT_REF_MASK; } insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp); } else if (length_constant == 1) { // no stack resizing required } else if (length_constant == 0) { remove_arg_slots(_masm, -stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); } // Copy from the array to the new slots. // Note: Stack change code preserves integrity of rax_argslot pointer. // So even after slot insertions, rax_argslot still points to first argument. if (length_constant == -1) { // [rax_argslot, rdx_argslot_limit) is the area we are inserting into. Register rsi_source = rsi_array; __ lea(rsi_source, Address(rsi_array, elem0_offset)); Label loop; __ bind(loop); __ movptr(rbx_temp, Address(rsi_source, 0)); __ movptr(Address(rax_argslot, 0), rbx_temp); __ addptr(rsi_source, type2aelembytes(elem_type)); __ addptr(rax_argslot, Interpreter::stackElementSize); __ cmpptr(rax_argslot, rdx_argslot_limit); __ jccb(Assembler::less, loop); } else if (length_constant == 0) { __ bind(skip_array_check); // nothing to copy } else { int elem_offset = elem0_offset; int slot_offset = 0; for (int index = 0; index < length_constant; index++) { __ movptr(rbx_temp, Address(rsi_array, elem_offset)); __ movptr(Address(rax_argslot, slot_offset), rbx_temp); elem_offset += type2aelembytes(elem_type); slot_offset += Interpreter::stackElementSize; } } // Arguments are spread. Move to next method handle. UNPUSH_RSI_RDI; __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); __ bind(bad_array_klass); UNPUSH_RSI_RDI; assert(!vmarg.uses(rarg2_required), "must be different registers"); __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type __ movptr(rarg1_actual, vmarg); // bad array __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); __ bind(bad_array_length); UNPUSH_RSI_RDI; assert(!vmarg.uses(rarg2_required), "must be different registers"); __ mov (rarg2_required, rcx_recv); // AMH requiring a certain length __ movptr(rarg1_actual, vmarg); // bad array __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); #undef UNPUSH_RSI_RDI } break; case _adapter_flyby: case _adapter_ricochet: __ unimplemented(entry_name(ek)); // %%% FIXME: NYI break; default: ShouldNotReachHere(); } __ hlt(); address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); __ unimplemented(entry_name(ek)); // %%% FIXME: NYI init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); }
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { const Address src(from(), -offset() * wordSize); #ifdef _WIN64 switch (_num_args) { case 0: assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); __ leaq(rarg1, src); _num_args++; break; case 1: __ leaq(rax, src); __ xorl(rarg2, rarg2); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, rarg2, rax); _num_args++; break; case 2: __ leaq(rax, src); __ xorl(rarg3, rarg3); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, rarg3, rax); _num_args++; break; default: __ leaq(rax, src); __ xorl(temp(), temp()); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, temp(), rax); __ movq(Address(to(), _stack_offset), temp()); _stack_offset += wordSize; break; } #else switch (_num_int_args) { case 0: assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); __ leaq(rarg1, src); _num_int_args++; break; case 1: __ leaq(rax, src); __ xorl(rarg2, rarg2); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, rarg2, rax); _num_int_args++; break; case 2: __ leaq(rax, src); __ xorl(rarg3, rarg3); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, rarg3, rax); _num_int_args++; break; case 3: __ leaq(rax, src); __ xorl(rarg4, rarg4); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, rarg4, rax); _num_int_args++; break; case 4: __ leaq(rax, src); __ xorl(rarg5, rarg5); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, rarg5, rax); _num_int_args++; break; default: __ leaq(rax, src); __ xorl(temp(), temp()); __ cmpq(src, 0); __ cmovq(Assembler::notEqual, temp(), rax); __ movq(Address(to(), _stack_offset), temp()); _stack_offset += wordSize; break; } #endif }
void GPUDrawScanlineCodeGenerator::WriteFrame() { // GSVector4i fs = r | g | b | (m_sel.md ? GSVector4i(0x80008000) : m_sel.tme ? a : 0); pcmpeqd(xmm0, xmm0); if(m_sel.md || m_sel.tme) { movdqa(xmm2, xmm0); psllw(xmm2, 15); } psrlw(xmm0, 11); psllw(xmm0, 3); // xmm0 = 0x00f8 // xmm2 = 0x8000 (md) // GSVector4i r = (c[0] & 0x00f800f8) >> 3; pand(xmm4, xmm0); psrlw(xmm4, 3); // GSVector4i g = (c[1] & 0x00f800f8) << 2; pand(xmm5, xmm0); psllw(xmm5, 2); por(xmm4, xmm5); // GSVector4i b = (c[2] & 0x00f800f8) << 7; pand(xmm6, xmm0); psllw(xmm6, 7); por(xmm4, xmm6); if(m_sel.md) { // GSVector4i a = GSVector4i(0x80008000); por(xmm4, xmm2); } else if(m_sel.tme) { // GSVector4i a = (c[3] << 8) & 0x80008000; psllw(xmm3, 8); pand(xmm3, xmm2); por(xmm4, xmm3); } // fs = fs.blend8(fd, test); movdqa(xmm0, xmm7); blend8(xmm4, xmm1); // GSVector4i::store<false>(fb, fs); // movdqu(ptr[edi], xmm4); movq(qword[edi], xmm4); movhps(qword[edi + 8], xmm4); }
void GSDrawScanlineCodeGenerator::ReadPixel(const Xmm& dst, const Reg32& addr) { movq(dst, qword[addr * 2 + (size_t)m_env.vm]); movhps(dst, qword[addr * 2 + (size_t)m_env.vm + 8 * 2]); }
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { const char *name; switch (type) { case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break; case T_CHAR: name = "jni_fast_GetCharField"; break; case T_SHORT: name = "jni_fast_GetShortField"; break; case T_INT: name = "jni_fast_GetIntField"; break; case T_LONG: name = "jni_fast_GetLongField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE); address fast_entry = b->instructions_begin(); CodeBuffer* cbuf = new CodeBuffer(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(cbuf); Label slow; address counter_addr = SafepointSynchronize::safepoint_counter_addr(); Address ca(counter_addr, relocInfo::none); __ movl (rcounter, ca); __ movq (robj, rarg1); __ testb (rcounter, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { __ xorq (robj, rcounter); __ xorq (robj, rcounter); // obj, since // robj ^ rcounter ^ rcounter == robj // robj is data dependent on rcounter. } __ movq (robj, Address(robj)); // *obj __ movq (roffset, rarg2); __ shrq (roffset, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { case T_BOOLEAN: __ movzbl (rax, Address(robj, roffset, Address::times_1)); break; case T_BYTE: __ movsbl (rax, Address(robj, roffset, Address::times_1)); break; case T_CHAR: __ movzwl (rax, Address(robj, roffset, Address::times_1)); break; case T_SHORT: __ movswl (rax, Address(robj, roffset, Address::times_1)); break; case T_INT: __ movl (rax, Address(robj, roffset, Address::times_1)); break; case T_LONG: __ movq (rax, Address(robj, roffset, Address::times_1)); break; default: ShouldNotReachHere(); } __ movq (rcounter_addr, (int64_t)counter_addr); ca = Address(rcounter_addr); if (os::is_MP()) { __ xorq (rcounter_addr, rax); __ xorq (rcounter_addr, rax); // ca is data dependent on rax. } __ cmpl (rcounter, ca); __ jcc (Assembler::notEqual, slow); __ ret (0); slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); address slow_case_addr; switch (type) { case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break; case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break; case T_INT: slow_case_addr = jni_GetIntField_addr(); break; case T_LONG: slow_case_addr = jni_GetLongField_addr(); } // tail call __ jmp (slow_case_addr, relocInfo::none); __ flush (); return fast_entry; }
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { const char *name = NULL; switch (type) { case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break; case T_CHAR: name = "jni_fast_GetCharField"; break; case T_SHORT: name = "jni_fast_GetShortField"; break; case T_INT: name = "jni_fast_GetIntField"; break; case T_LONG: name = "jni_fast_GetLongField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); CodeBuffer cbuf(blob); MacroAssembler* masm = new MacroAssembler(&cbuf); address fast_entry = __ pc(); Label slow; ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); __ mov32 (rcounter, counter); __ mov (robj, c_rarg1); __ testb (rcounter, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { __ xorptr(robj, rcounter); __ xorptr(robj, rcounter); // obj, since // robj ^ rcounter ^ rcounter == robj // robj is data dependent on rcounter. } __ clear_jweak_tag(robj); __ movptr(robj, Address(robj, 0)); // *obj __ mov (roffset, c_rarg2); __ shrptr(roffset, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { case T_BOOLEAN: __ movzbl (rax, Address(robj, roffset, Address::times_1)); break; case T_BYTE: __ movsbl (rax, Address(robj, roffset, Address::times_1)); break; case T_CHAR: __ movzwl (rax, Address(robj, roffset, Address::times_1)); break; case T_SHORT: __ movswl (rax, Address(robj, roffset, Address::times_1)); break; case T_INT: __ movl (rax, Address(robj, roffset, Address::times_1)); break; case T_LONG: __ movq (rax, Address(robj, roffset, Address::times_1)); break; default: ShouldNotReachHere(); } if (os::is_MP()) { __ lea(rcounter_addr, counter); // ca is data dependent on rax. __ xorptr(rcounter_addr, rax); __ xorptr(rcounter_addr, rax); __ cmpl (rcounter, Address(rcounter_addr, 0)); } else { __ cmp32 (rcounter, counter); } __ jcc (Assembler::notEqual, slow); __ ret (0); slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); address slow_case_addr = NULL; switch (type) { case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break; case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break; case T_INT: slow_case_addr = jni_GetIntField_addr(); break; case T_LONG: slow_case_addr = jni_GetLongField_addr(); } // tail call __ jump (ExternalAddress(slow_case_addr)); __ flush (); return fast_entry; }