Beispiel #1
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[1].u.operand;

    /* Caller always:
        - Updates callFrameRegister to callee callFrame.
        - Initializes ArgumentCount; CallerFrame; Callee.

       For a JS call:
        - Caller initializes ScopeChain.
        - Callee initializes ReturnPC; CodeBlock.
        - Callee restores callFrameRegister before return.

       For a non-JS call:
        - Caller initializes ScopeChain; ReturnPC; CodeBlock.
        - Caller restores callFrameRegister after return.
    */

    if (opcodeID == op_call_varargs)
        compileLoadVarargs(instruction);
    else {
        int argCount = instruction[2].u.operand;
        int registerOffset = instruction[3].u.operand;

        if (opcodeID == op_call && shouldEmitProfiling()) {
            emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
            Jump done = emitJumpIfNotJSCell(regT0);
            loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
            storePtr(regT0, instruction[5].u.arrayProfile->addressOfLastSeenStructure());
            done.link(this);
        }

        addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
        store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
    } // regT1 holds newCallFrame with ArgumentCount initialized.

    store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
    emitGetVirtualRegister(callee, regT0); // regT0 holds callee.

    store64(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
    store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
    move(regT1, callFrameRegister);

    if (opcodeID == op_call_eval) {
        compileCallEval();
        return;
    }

    DataLabelPtr addressOfLinkedFunctionCheck;
    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
    Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
    END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
    addSlowCase(slowCase);

    ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
    m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
    m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;

    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
    emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
    m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();

    sampleCodeBlock(m_codeBlock);
}
Beispiel #2
0
char* StubRoutines::generate_megamorphic_ic(MacroAssembler* masm) {
// Called from within a MIC (megamorphic inline cache), the special
// variant of PICs for compiled code (see compiledPIC.hpp/cpp).
// The MIC layout is as follows:
//
// call <this stub routine>
// selector			<--- return address (tos)
//
// Note: Don't use this for megamorphic super sends!

    Label is_smi, probe_primary_cache, probe_secondary_cache, call_method, is_methodOop, do_lookup;

    masm->bind(is_smi);				// smi case (assumed to be infrequent)
    masm->movl(ecx, Address((int)&smiKlassObj, relocInfo::external_word_type));
    masm->jmp(probe_primary_cache);

    // eax    : receiver
    // tos    : return address pointing to selector in MIC
    // tos + 4: return address of megamorphic send in compiled code
    // tos + 8: last argument/receiver
    char* entry_point = masm->pc();
    masm->popl(ebx);				// get return address (MIC cache)
    masm->test(eax, Mem_Tag);			// check if smi
    masm->jcc(Assembler::zero, is_smi);		// if so, get smi class directly
    masm->movl(ecx, Address(eax, memOopDesc::klass_byte_offset()));	// otherwise, load receiver class

    // probe primary cache
    //
    // eax: receiver
    // ebx: MIC cache pointer
    // ecx: receiver klass
    // tos: return address of megamorphic send in compiled code (ic)
    masm->bind(probe_primary_cache);		// compute hash value
    masm->movl(edx, Address(ebx));		// get selector
    // compute hash value
    masm->movl(edi, ecx);
    masm->xorl(edi, edx);
    masm->andl(edi, (primary_cache_size - 1) << 4);
    // probe cache
    masm->cmpl(ecx, Address(edi, lookupCache::primary_cache_address() + 0*oopSize));
    masm->jcc(Assembler::notEqual, probe_secondary_cache);
    masm->cmpl(edx, Address(edi, lookupCache::primary_cache_address() + 1*oopSize));
    masm->jcc(Assembler::notEqual, probe_secondary_cache);
    masm->movl(ecx, Address(edi, lookupCache::primary_cache_address() + 2*oopSize));

    // call method
    //
    // eax: receiver
    // ecx: methodOop/nmethod
    // tos: return address of megamorphic send in compiled code (ic)
    masm->bind(call_method);
    masm->test(ecx, Mem_Tag);			// check if methodOop
    masm->jcc(Assembler::notZero, is_methodOop);	// otherwise
    masm->jmp(ecx);				// call nmethod

    // call methodOop - setup registers
    masm->bind(is_methodOop);
    masm->xorl(ebx, ebx);				// clear ebx for interpreter
    masm->movl(edx, Address(int(&method_entry_point), relocInfo::external_word_type));
    // (Note: cannot use value in method_entry_point directly since interpreter is generated afterwards)
    //
    // eax: receiver
    // ebx: 00000000
    // ecx: methodOop
    // edx: entry point
    // tos: return address of megamorphic send in compiled code (ic)
    masm->jmp(edx);				// call method_entry

    // probe secondary cache
    //
    // eax: receiver
    // ebx: MIC cache pointer
    // ecx: receiver klass
    // edx: selector
    // edi: primary cache index
    // tos: return address of megamorphic send in compiled code (ic)
    masm->bind(probe_secondary_cache);		// compute hash value
    masm->andl(edi, (secondary_cache_size - 1) << 4);
    // probe cache
    masm->cmpl(ecx, Address(edi, lookupCache::secondary_cache_address() + 0*oopSize));
    masm->jcc(Assembler::notEqual, do_lookup);
    masm->cmpl(edx, Address(edi, lookupCache::secondary_cache_address() + 1*oopSize));
    masm->jcc(Assembler::notEqual, do_lookup);
    masm->movl(ecx, Address(edi, lookupCache::secondary_cache_address() + 2*oopSize));
    masm->jmp(call_method);

    // do lookup
    //
    // eax: receiver
    // ebx: MIC cache pointer
    // ecx: receiver klass
    // edx: selector
    // edi: secondary cache index
    // tos: return address of megamorphic send in compiled code (ic)
    masm->bind(do_lookup);
    masm->set_last_Delta_frame_after_call();
    masm->pushl(eax);				// save receiver
    masm->pushl(edx);				// pass 2nd argument: selector
    masm->pushl(ecx);				// pass 1st argument: receiver klass
    masm->call((char*)lookupCache::normal_lookup, relocInfo::runtime_call_type);
    masm->movl(ecx, eax);				// ecx: method
    masm->popl(ebx);				// pop 1st argument
    masm->popl(ebx);				// pop 2nd argument
    masm->popl(eax);				// restore receiver
    masm->reset_last_Delta_frame();
    masm->testl(ecx, ecx);			// test if method has been found in lookup cache
    masm->jcc(Assembler::notZero, call_method);

    // method not found in the lookup cache - full lookup needed (message not understood may happen)
    // eax: receiver
    // ebx: points to MIC cache
    // tos: return address of megamorphic send in compiled code
    //
    // Note: This should not happen right now, since normal_lookup always returns a value
    //       if the method exists (and 'message not understood' is not yet supported in
    //       compiled code). However, this should change at some point, and normal_lookup_cache_probe
    //       should be used instead of normal_lookup.
    masm->hlt();

    return entry_point;
}
Beispiel #3
0
char* StubRoutines::generate_PIC_stub(MacroAssembler* masm, int pic_size) {
// Called from within a PIC (polymorphic inline cache).
// The stub interprets the methodOop section of compiled PICs.
// The methodOop section layout is as follows:
//
// call <this stub routine>
// cached klass 1	<--- return address (tos)
// cached methodOop 1
// cached klass 2
// cached methodOop2
// ...
//
// cached klass n
// cached methodOop n
//
// Note: Don't use this for polymorphic super sends!

    Label found, loop;

    // entry found at index
    //
    // eax: receiver
    // ebx: PIC table pointer
    // ecx: methodOop
    // edx: receiver klass
    // tos: return address of polymorphic send in compiled code
    masm->bind(found);
    masm->movl(edx, Address(int(&method_entry_point), relocInfo::external_word_type));
    // (Note: cannot use value in method_entry_point directly since interpreter is generated afterwards)
    masm->xorl(ebx, ebx);
    // eax: receiver
    // ebx: 000000xx
    // ecx: methodOop
    masm->jmp(edx);

    // eax    : receiver
    // tos    : return address pointing to table in PIC
    // tos + 4: return address of polymorphic send in compiled code
    // tos + 8: last argument/receiver
    char* entry_point = masm->pc();
    masm->popl(ebx);				// get return address (PIC table pointer)
    masm->movl(edx, Address((int)&smiKlassObj, relocInfo::external_word_type));
    masm->test(eax, Mem_Tag);			// check if smi
    masm->jcc(Assembler::zero, loop);		// if so, class is already in ecx
    masm->movl(edx, Address(eax, memOopDesc::klass_byte_offset()));	// otherwise, load receiver class

    // eax: receiver
    // ebx: PIC table pointer
    // edx: receiver klass
    // tos: return address of polymorphic send in compiled code
    masm->bind(loop);
    for (int i = 0; i < pic_size; i++) {
        // compare receiver klass with klass in PIC table at index
        masm->cmpl(edx, Address(ebx, i * PIC::PIC_methodOop_entry_size + PIC::PIC_methodOop_klass_offset));
        masm->movl(ecx, Address(ebx, i * PIC::PIC_methodOop_entry_size + PIC::PIC_methodOop_offset));
        masm->jcc(Assembler::equal, found);
    }
    assert(ic_normal_lookup_entry() != NULL, "ic_normal_lookup_entry must be generated before");
    masm->jmp(ic_normal_lookup_entry(), relocInfo::runtime_call_type);

    return entry_point;
}
Beispiel #4
0
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
  // rbx: methodOop
  // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
  // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
  // rdx, rdi: garbage temp, blown away

  Register rbx_method = rbx;
  Register rcx_recv   = rcx;
  Register rax_mtype  = rax;
  Register rdx_temp   = rdx;
  Register rdi_temp   = rdi;

  // emit WrongMethodType path first, to enable jccb back-branch from main path
  Label wrong_method_type;
  __ bind(wrong_method_type);
  Label invoke_generic_slow_path;
  assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
  __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
  __ jcc(Assembler::notEqual, invoke_generic_slow_path);
  __ push(rax_mtype);       // required mtype
  __ push(rcx_recv);        // bad mh (1st stacked argument)
  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));

  // here's where control starts out:
  __ align(CodeEntryAlignment);
  address entry_point = __ pc();

  // fetch the MethodType from the method handle into rax (the 'check' register)
  {
    Register tem = rbx_method;
    for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
      __ movptr(rax_mtype, Address(tem, *pchase));
      tem = rax_mtype;          // in case there is another indirection
    }
  }

  // given the MethodType, find out where the MH argument is buried
  __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
  Register rdx_vmslots = rdx_temp;
  __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
  __ movptr(rcx_recv, __ argument_address(rdx_vmslots));

  trace_method_handle(_masm, "invokeExact");

  __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
  __ jump_to_method_handle_entry(rcx_recv, rdi_temp);

  // for invokeGeneric (only), apply argument and result conversions on the fly
  __ bind(invoke_generic_slow_path);
#ifdef ASSERT
  { Label L;
    __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
    __ jcc(Assembler::equal, L);
    __ stop("bad methodOop::intrinsic_id");
    __ bind(L);
  }
#endif //ASSERT
  Register rbx_temp = rbx_method;  // don't need it now

  // make room on the stack for another pointer:
  Register rcx_argslot = rcx_recv;
  __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK,
                   rcx_argslot, rbx_temp, rdx_temp);

  // load up an adapter from the calling type (Java weaves this)
  __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
  Register rdx_adapter = rdx_temp;
  // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes()));
  // deal with old JDK versions:
  __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
  __ cmpptr(rdi_temp, rdx_temp);
  Label sorry_no_invoke_generic;
  __ jcc(Assembler::below, sorry_no_invoke_generic);

  __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0));
  __ testptr(rdx_adapter, rdx_adapter);
  __ jcc(Assembler::zero, sorry_no_invoke_generic);
  __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
  // As a trusted first argument, pass the type being called, so the adapter knows
  // the actual types of the arguments and return values.
  // (Generic invokers are shared among form-families of method-type.)
  __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype);
  // FIXME: assert that rdx_adapter is of the right method-type.
  __ mov(rcx, rdx_adapter);
  trace_method_handle(_masm, "invokeGeneric");
  __ jump_to_method_handle_entry(rcx, rdi_temp);

  __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
  __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize));  // recover original MH
  __ push(rax_mtype);       // required mtype
  __ push(rcx_recv);        // bad mh (1st stacked argument)
  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));

  return entry_point;
}
Beispiel #5
0
 Address operator&(uintptr_t mask) const {
   return Address(reinterpret_cast<void*>(address_ & mask));
 }
Beispiel #6
0
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
    int callee = instruction[2].u.operand;

    /* Caller always:
        - Updates callFrameRegister to callee callFrame.
        - Initializes ArgumentCount; CallerFrame; Callee.

       For a JS call:
        - Callee initializes ReturnPC; CodeBlock.
        - Callee restores callFrameRegister before return.

       For a non-JS call:
        - Caller initializes ReturnPC; CodeBlock.
        - Caller restores callFrameRegister after return.
    */
    CallLinkInfo* info;
    if (opcodeID != op_call_eval)
        info = m_codeBlock->addCallLinkInfo();
    if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs)
        compileSetupVarargsFrame(instruction, info);
    else {
        int argCount = instruction[3].u.operand;
        int registerOffset = -instruction[4].u.operand;
        
        if (opcodeID == op_call && shouldEmitProfiling()) {
            emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
            Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
            loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1);
            storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
            done.link(this);
        }
    
        addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);

        store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
    } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
    
    uint32_t locationBits = CallSiteIndex(instruction).bits();
    store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister));
    emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.

    store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
    store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC)));

    if (opcodeID == op_call_eval) {
        compileCallEval(instruction);
        return;
    }

    addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));

    DataLabelPtr addressOfLinkedFunctionCheck;
    Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));

    addSlowCase(slowCase);

    ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
    info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
    m_callCompilationInfo.append(CallCompilationInfo());
    m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
    m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;

    checkStackPointerAlignment();
    m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();

    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
    checkStackPointerAlignment();

    sampleCodeBlock(m_codeBlock);
    emitPutCallResult(instruction);
}
Beispiel #7
0
// Helper to remove argument slots from the stack.
// arg_slots must be a multiple of stack_move_unit() and >= 0
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
                                    RegisterOrConstant arg_slots,
                                    Register rax_argslot,
                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
  assert(temp3_reg == noreg, "temp3 not required");
  assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
                             (!arg_slots.is_register() ? rsp : arg_slots.as_register()));

#ifdef ASSERT
  // Verify that [argslot..argslot+size) lies within (rsp, rbp).
  __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
  verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
  if (arg_slots.is_register()) {
    Label L_ok, L_bad;
    __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
    __ jccb(Assembler::less, L_bad);
    __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
    __ jccb(Assembler::zero, L_ok);
    __ bind(L_bad);
    __ stop("assert arg_slots >= 0 and clear low bits");
    __ bind(L_ok);
  } else {
    assert(arg_slots.as_constant() >= 0, "");
    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
  }
#endif //ASSERT

#ifdef _LP64
  if (false) {                  // not needed, since register is positive
    // clean high bits of stack motion register (was loaded as an int)
    if (arg_slots.is_register())
      __ movslq(arg_slots.as_register(), arg_slots.as_register());
  }
#endif

  BLOCK_COMMENT("remove_arg_slots {");
  // Pull up everything shallower than rax_argslot.
  // Then remove the excess space on the stack.
  // The stacked return address gets pulled up with everything else.
  // That is, copy [rsp, argslot) upward by size words.  In pseudo-code:
  //   for (rdx = argslot-1; rdx >= rsp; --rdx)
  //     rdx[size] = rdx[0]
  //   argslot += size;
  //   rsp += size;
  __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
  {
    Label loop;
    __ BIND(loop);
    // pull one word up each time through the loop
    __ movptr(rbx_temp, Address(rdx_temp, 0));
    __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
    __ addptr(rdx_temp, -wordSize);
    __ cmpptr(rdx_temp, rsp);
    __ jccb(Assembler::greaterEqual, loop);
  }

  // Now move the argslot up, to point to the just-copied block.
  __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
  // And adjust the argslot address to point at the deletion point.
  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
  BLOCK_COMMENT("} remove_arg_slots");
}
Beispiel #8
0
 Address operator-(int change) const {
     return Address(reinterpret_cast<void*>(address_ - change));
 }
AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType(
    JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode)
{
    AssemblyHelpers::JumpList result;

    switch (descriptor.kind()) {
    case InferredType::Bottom:
        result.append(jump());
        break;

    case InferredType::Boolean:
        result.append(branchIfNotBoolean(regs, tempGPR));
        break;

    case InferredType::Other:
        result.append(branchIfNotOther(regs, tempGPR));
        break;

    case InferredType::Int32:
        result.append(branchIfNotInt32(regs, mode));
        break;

    case InferredType::Number:
        result.append(branchIfNotNumber(regs, tempGPR, mode));
        break;

    case InferredType::String:
        result.append(branchIfNotCell(regs, mode));
        result.append(branchIfNotString(regs.payloadGPR()));
        break;

    case InferredType::ObjectWithStructure:
        result.append(branchIfNotCell(regs, mode));
        result.append(
            branchStructure(
                NotEqual,
                Address(regs.payloadGPR(), JSCell::structureIDOffset()),
                descriptor.structure()));
        break;

    case InferredType::ObjectWithStructureOrOther: {
        Jump ok = branchIfOther(regs, tempGPR);
        result.append(branchIfNotCell(regs, mode));
        result.append(
            branchStructure(
                NotEqual,
                Address(regs.payloadGPR(), JSCell::structureIDOffset()),
                descriptor.structure()));
        ok.link(this);
        break;
    }

    case InferredType::Object:
        result.append(branchIfNotCell(regs, mode));
        result.append(branchIfNotObject(regs.payloadGPR()));
        break;

    case InferredType::ObjectOrOther: {
        Jump ok = branchIfOther(regs, tempGPR);
        result.append(branchIfNotCell(regs, mode));
        result.append(branchIfNotObject(regs.payloadGPR()));
        ok.link(this);
        break;
    }

    case InferredType::Top:
        break;
    }

    return result;
}
Beispiel #10
0
	Address get(T offset) const {
		return Address(reinterpret_cast<uintptr_t>(m_ptr)+offset);
	}
Beispiel #11
0
int main(int argc, const char * argv[])
{
    Buffer buffer;
    Storage disk;
    Record::buffer=&buffer;
    Bptree_node::buffer=&buffer;
    Bptree::buffer=&buffer;
    Record record;
    Table_info table;
    table.table_name="friendg";
    table.database="zyh";
    Attribute attribute;
    Tuple_data tuple_data(90);
    Tuple_info tuple;
    table.tuple_size=21;
    attribute.type=SQL_INT;
    attribute.size=4;
    table.attribute_list.push_back(attribute);
    attribute.type=SQL_STRING;
    attribute.size=6;
    table.attribute_list.push_back(attribute);
    attribute.type=SQL_STRING;
    attribute.size=4;
    table.attribute_list.push_back(attribute);
    attribute.type=SQL_STRING;
    attribute.size=3;
    table.attribute_list.push_back(attribute);
    attribute.attribute_name="result";
    attribute.type=SQL_FLOAT;
    attribute.size=4;
    table.attribute_list.push_back(attribute);
    tuple.info.push_back("44");
    tuple.info.push_back("abcde");
    tuple.info.push_back("ac");
    tuple.info.push_back("ac");
    tuple.info.push_back("44.4");
    Tuple_info tuple_unpack;
    for (int i=0;i<5;i++)
        tuple_unpack.info.push_back("");
    try
    {
        record.pack(table,tuple,&tuple_data);
    }
    catch (Error error)
    {
        error.print_error();
    }
    printf("before unpack\n");
      printf("\n");
    record.unpack(table,&tuple_unpack,&tuple_data);
    for (int i=0;i<5;i++)
        std::cout<<tuple_unpack.info[i]<<std::endl;
    for (int i=0;i<30;i++)
        printf("%X  ",tuple_data.data[i]);
    printf("\n");
    Storage storage;
    record.create_table(table);
    record.insert_tuple(table, tuple_unpack);
    record.insert_tuple(table, tuple_unpack);
    record.insert_tuple(table, tuple_unpack);
    Address del_address;
    del_address=record.int_to_address(table, 12);
    record.delete_tuple(table, del_address);
    del_address=record.int_to_address(table, 41);
    record.delete_tuple(table, del_address);
    record.insert_tuple(table, tuple_unpack);
    Tuple_info new_tuple;
    new_tuple.info.push_back("55");
    new_tuple.info.push_back("qqqq");
    new_tuple.info.push_back("ac");
    new_tuple.info.push_back("ac");
    new_tuple.info.push_back("32.2");
    record.insert_tuple(table, new_tuple);
    Tuple_info get_tuple(5);
    Address next_address;
    record.get_first_tuple(table, &get_tuple, &next_address);
    while (!(next_address.block_offset==0 && next_address.file_offset==0))
    {
        for (int i=0;i<5;i++)
        {
            std::cout<<get_tuple.info[i]<<std::endl;
        }
        std::cout<<"next"<<std::endl;
        Address address=next_address;
        record.get_tuple(table, address,&get_tuple, &next_address);
    }
    for (int i=0;i<5;i++)
    {
        std::cout<<get_tuple.info[i]<<std::endl;
    }
    Bptree bptree;
    Table_info table2;
    table2.table_name="friendindex";
    table2.database="zyh";
    table2.tuple_size=2400;
    attribute.type=SQL_STRING;
    attribute.size=2400;
    table2.attribute_list.push_back(attribute);
//    Tuple_info tuple2;
//    Address add;
//    tuple2.info.push_back("a");
//    tuple2.info[0]="b";
//    Address finded;
//    Address nnext_address;
//    Tuple_info finded_tuple;
//    finded=bptree.search(table2, attribute, "b");
//    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
//    std::cout<<finded_tuple.info[0]<<std::endl;
    //1
    Tuple_info tuple2;
    tuple2.info.push_back("a");
    record.create_table(table2);
    Address add;
    add=record.insert_tuple(table2, tuple2);
    bptree.drop(table2, attribute);
    bptree.create(table2, attribute);
    bptree.insert(table2, attribute, "a", add);
    Address finded=bptree.search(table2, attribute, "a");
    Tuple_info finded_tuple;
    Address nnext_address;
    //2
    tuple2.info[0]="b";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "b", add);
    finded=bptree.search(table2, attribute, "b");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    //3
    tuple2.info[0]="c";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "c", add);
    finded=bptree.search(table2, attribute, "c");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.print(table2,attribute);

    std::cout<<finded_tuple.info[0]<<std::endl;
    //4
    tuple2.info[0]="d";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "d", add);
    finded=bptree.search(table2, attribute, "d");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    //5
    tuple2.info[0]="f";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "f", add);
    finded=bptree.search(table2, attribute, "f");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    //6
    tuple2.info[0]="e";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "e", add);
    finded=bptree.search(table2, attribute, "e");
//    record.get_tuple(table2, add, &finded_tuple, &nnext_address);
//    std::cout<<finded_tuple.info[0]<<std::endl;
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    //6
    tuple2.info[0]="g";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "g", add);
    finded=bptree.search(table2, attribute, "g");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    //7
    tuple2.info[0]="q";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "q", add);
    finded=bptree.search(table2, attribute, "q");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    //3
    tuple2.info[0]="h";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "h", add);
    finded=bptree.search(table2, attribute, "h");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    //8
    tuple2.info[0]="z";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "z", add);
//    bptree.print(table2,attribute);

    finded=bptree.search(table2, attribute, "z");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    //9
    tuple2.info[0]="y";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "y", add);
    //    bptree.print(table2,attribute);
    
    finded=bptree.search(table2, attribute, "y");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
//10
    tuple2.info[0]="w";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "w", add);
    //    bptree.print(table2,attribute);
    
    finded=bptree.search(table2, attribute, "w");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
//11
    tuple2.info[0]="u";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "u", add);
    //    bptree.print(table2,attribute);
    
    finded=bptree.search(table2, attribute, "u");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);

//12
    tuple2.info[0]="v";
    add=record.insert_tuple(table2, tuple2);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    bptree.insert(table2, attribute, "v", add);
    //    bptree.print(table2,attribute);
    
    finded=bptree.search(table2, attribute, "v");
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    bptree.print(table2,attribute);
    record.delete_tuple(table2, finded);
    bptree.deletion(table2, attribute, "v");
    add=bptree.search(table2, attribute, "v");
    add=record.insert_tuple(table2, tuple2);
    bptree.insert(table2, attribute, "v", add);
    record.get_tuple(table2, finded, &finded_tuple, &nnext_address);
    std::cout<<finded_tuple.info[0]<<std::endl;
    std::cout<<add.address_int()<<std::endl;
    Address begin,end;
    bptree.search_section(table2, attribute, false, "inf", false, "inf", &begin, &end);
    int loop_address=begin.address_int();
    for (int i=loop_address;i<end.address_int();i+=ADDRESS_SIZE)
    {
        Address now(begin.database_name,begin.file_name,i);
        Address_byte record_address;
        Block block;
        buffer.read_data(now,&block);
        block.get_block_data(now.block_offset, ADDRESS_SIZE, record_address.byte);
        Address record_add=Address(table2.database,table2.table_name,record_address.address);
        record.get_tuple(table2, record_add, &finded_tuple, &nnext_address);
        std::cout<<finded_tuple.info[0]<<std::endl;
    }
    if (end.address_int()>loop_address)
    {
        buffer.remove_file(begin);
    }
    
//        bptree.test(table,attribute);
//    record.drop_table(table);
//    uuid_t uu;
//    int i;
//    uuid_generate( uu );
//    
//    for(i=0;i<16;i++)
//    {
//        printf("%02X-",uu[i]);
//    }
//    printf("\n");
//    uuid_string_t strc;
//    uuid_unparse_upper(uu,strc);
//    std::cout<<strc<<std::endl;
    return 0;
}
Beispiel #12
0
 Address Address::rootAddress()
 {
     return Address();
 }
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {

  // rbx,: Method*
  // rcx: scratrch
  // rsi: sender sp

  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry

  address entry_point = __ pc();

  // These don't need a safepoint check because they aren't virtually
  // callable. We won't enter these intrinsics from compiled code.
  // If in the future we added an intrinsic which was virtually callable
  // we'd have to worry about how to safepoint so that this code is used.

  // mathematical functions inlined by compiler
  // (interpreter must provide identical implementation
  // in order to avoid monotonicity bugs when switching
  // from interpreter to compiler in the middle of some
  // computation)
  //
  // stack: [ ret adr ] <-- rsp
  //        [ lo(arg) ]
  //        [ hi(arg) ]
  //
  if (kind == Interpreter::java_lang_math_fmaD) {
    __ movdbl(xmm2, Address(rsp, 5 * wordSize));
    __ movdbl(xmm1, Address(rsp, 3 * wordSize));
    __ movdbl(xmm0, Address(rsp, 1 * wordSize));
    __ fmad(xmm0, xmm1, xmm2, xmm0);
    __ pop(rdi);                               // get return address
    __ mov(rsp, rsi);                          // set sp to sender sp
    __ jmp(rdi);

    return entry_point;
  } else if (kind == Interpreter::java_lang_math_fmaF) {
    __ movflt(xmm2, Address(rsp, 3 * wordSize));
    __ movflt(xmm1, Address(rsp, 2 * wordSize));
    __ movflt(xmm0, Address(rsp, 1 * wordSize));
    __ fmaf(xmm0, xmm1, xmm2, xmm0);
    __ pop(rdi);                               // get return address
    __ mov(rsp, rsi);                          // set sp to sender sp
    __ jmp(rdi);

    return entry_point;
 }

  __ fld_d(Address(rsp, 1*wordSize));
  switch (kind) {
    case Interpreter::java_lang_math_sin :
        __ subptr(rsp, 2 * wordSize);
        __ fstp_d(Address(rsp, 0));
        if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) {
          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsin())));
        } else {
          __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dsin));
        }
        __ addptr(rsp, 2 * wordSize);
        break;
    case Interpreter::java_lang_math_cos :
        __ subptr(rsp, 2 * wordSize);
        __ fstp_d(Address(rsp, 0));
        if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) {
          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcos())));
        } else {
          __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dcos));
        }
        __ addptr(rsp, 2 * wordSize);
        break;
    case Interpreter::java_lang_math_tan :
        __ subptr(rsp, 2 * wordSize);
        __ fstp_d(Address(rsp, 0));
        if (StubRoutines::dtan() != NULL) {
          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtan())));
        } else {
          __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
        }
        __ addptr(rsp, 2 * wordSize);
        break;
    case Interpreter::java_lang_math_sqrt:
        __ fsqrt();
        break;
    case Interpreter::java_lang_math_abs:
        __ fabs();
        break;
    case Interpreter::java_lang_math_log:
        __ subptr(rsp, 2 * wordSize);
        __ fstp_d(Address(rsp, 0));
        if (StubRoutines::dlog() != NULL) {
          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
        } else {
          __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog));
        }
        __ addptr(rsp, 2 * wordSize);
        break;
    case Interpreter::java_lang_math_log10:
        __ subptr(rsp, 2 * wordSize);
        __ fstp_d(Address(rsp, 0));
        if (StubRoutines::dlog10() != NULL) {
          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog10())));
        } else {
          __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10));
        }
        __ addptr(rsp, 2 * wordSize);
        break;
    case Interpreter::java_lang_math_pow:
      __ fld_d(Address(rsp, 3*wordSize)); // second argument
      __ subptr(rsp, 4 * wordSize);
      __ fstp_d(Address(rsp, 0));
      __ fstp_d(Address(rsp, 2 * wordSize));
      if (StubRoutines::dpow() != NULL) {
        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
      } else {
        __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dpow));
      }
      __ addptr(rsp, 4 * wordSize);
      break;
    case Interpreter::java_lang_math_exp:
      __ subptr(rsp, 2*wordSize);
      __ fstp_d(Address(rsp, 0));
      if (StubRoutines::dexp() != NULL) {
        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
      } else {
        __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dexp));
      }
      __ addptr(rsp, 2*wordSize);
    break;
    default                              :
        ShouldNotReachHere();
  }

  // return double result in xmm0 for interpreter and compilers.
  if (UseSSE >= 2) {
    __ subptr(rsp, 2*wordSize);
    __ fstp_d(Address(rsp, 0));
    __ movdbl(xmm0, Address(rsp, 0));
    __ addptr(rsp, 2*wordSize);
  }

  // done, result in FPU ST(0) or XMM0
  __ pop(rdi);                               // get return address
  __ mov(rsp, rsi);                          // set sp to sender sp
  __ jmp(rdi);

  return entry_point;
}
Beispiel #14
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[2].u.operand;
    int arguments = instruction[3].u.operand;
    int firstFreeRegister = instruction[4].u.operand;

    killLastResultRegister();

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
                       && arguments == m_codeBlock->argumentsRegister()
                       && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT1);
        lshift32(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        neg32(regT0);
        signExtend32ToPtr(regT0, regT0);
        end.append(branchAdd64(Zero, TrustedImm32(1), regT0));
        // regT0: -argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchAdd64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    JITStubCall stubCall(this, cti_op_load_varargs);
    stubCall.addArgument(thisValue, regT0);
    stubCall.addArgument(arguments, regT0);
    stubCall.addArgument(Imm32(firstFreeRegister));
    stubCall.call(regT1);

    if (canOptimize)
        end.link(this);
}
Beispiel #15
0
TObject *luaA_Address(lua_Object o) {
	return Address(o);
}
Beispiel #16
0
int32 lua_isnil(lua_Object o) {
	return (o == LUA_NOOBJECT) || (ttype(Address(o)) == LUA_T_NIL);
}
Beispiel #17
0
int32 lua_currentline(lua_Function func) {
	TObject *f = Address(func);
	return (f + 1 < lua_state->stack.top && (f + 1)->ttype == LUA_T_LINE) ? (f + 1)->value.i : -1;
}
Beispiel #18
0
int32 lua_istable(lua_Object o) {
	return (o != LUA_NOOBJECT) && (ttype(Address(o)) == LUA_T_ARRAY);
}
Beispiel #19
0
void OptoRuntime::generate_exception_blob() {

  // Capture info about frame layout
  enum layout {
    thread_off,                 // last_java_sp
    // The frame sender code expects that rbp will be in the "natural" place and
    // will override any oopMap setting for it. We must therefore force the layout
    // so that it agrees with the frame sender code.
    rbp_off,
    return_off,                 // slot for return address
    framesize
  };

  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  CodeBuffer   buffer("exception_blob", 512, 512);
  MacroAssembler* masm = new MacroAssembler(&buffer);

  OopMapSet *oop_maps = new OopMapSet();

  address start = __ pc();

  __ push(rdx);
  __ subptr(rsp, return_off * wordSize);   // Prolog!

  // rbp, location is implicitly known
  __ movptr(Address(rsp,rbp_off  *wordSize), rbp);

  // Store exception in Thread object. We cannot pass any arguments to the
  // handle_exception call, since we do not want to make any assumption
  // about the size of the frame where the exception happened in.
  __ get_thread(rcx);
  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), rax);
  __ movptr(Address(rcx, JavaThread::exception_pc_offset()),  rdx);

  // This call does all the hard work.  It checks if an exception handler
  // exists in the method.
  // If so, it returns the handler address.
  // If not, it prepares for stack-unwinding, restoring the callee-save
  // registers of the frame being removed.
  //
  __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
  __ set_last_Java_frame(rcx, noreg, noreg, NULL);

  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));

  // No registers to map, rbp is known implicitly
  oop_maps->add_gc_map( __ pc() - start,  new OopMap( framesize, 0 ));
  __ get_thread(rcx);
  __ reset_last_Java_frame(rcx, false, false);

  // Restore callee-saved registers
  __ movptr(rbp, Address(rsp, rbp_off * wordSize));

  __ addptr(rsp, return_off * wordSize);   // Epilog!
  __ pop(rdx); // Exception pc

  // rax: exception handler for given <exception oop/exception pc>

  // Restore SP from BP if the exception PC is a MethodHandle call.
  __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
  __ cmovptr(Assembler::notEqual, rsp, rbp);

  // We have a handler in rax, (could be deopt blob)
  // rdx - throwing pc, deopt blob will need it.

  __ push(rax);

  // Get the exception
  __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
  // Get the exception pc in case we are deoptimized
  __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
#ifdef ASSERT
  __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), NULL_WORD);
  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
#endif
  // Clear the exception oop so GC no longer processes it as a root.
  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);

  __ pop(rcx);

  // rax: exception oop
  // rcx: exception handler
  // rdx: exception pc
  __ jmp (rcx);

  // -------------
  // make sure all code is generated
  masm->flush();

  _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize);
}
Beispiel #20
0
int32 lua_isuserdata(lua_Object o) {
	return (o != LUA_NOOBJECT) && (ttype(Address(o)) == LUA_T_USERDATA);
}
Beispiel #21
0
//------------------------------------------------------------------------------
// MethodHandles::generate_method_handle_stub
//
// Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls.
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
  // Here is the register state during an interpreted call,
  // as set up by generate_method_handle_interpreter_entry():
  // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
  // - rcx: receiver method handle
  // - rax: method handle type (only used by the check_mtype entry point)
  // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
  // - rdx: garbage temp, can blow away

  const Register rcx_recv    = rcx;
  const Register rax_argslot = rax;
  const Register rbx_temp    = rbx;
  const Register rdx_temp    = rdx;

  // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
  // and gen_c2i_adapter (from compiled calls):
  const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);

  // Argument registers for _raise_exception.
  // 32-bit: Pass first two oop/int args in registers ECX and EDX.
  const Register rarg0_code     = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
  const Register rarg1_actual   = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
  const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
  assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp);

  guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");

  // some handy addresses
  Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
  Address rbx_method_fce(     rbx,      methodOopDesc::from_compiled_offset() );

  Address rcx_mh_vmtarget(    rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
  Address rcx_dmh_vmindex(    rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );

  Address rcx_bmh_vmargslot(  rcx_recv, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes() );
  Address rcx_bmh_argument(   rcx_recv, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes() );

  Address rcx_amh_vmargslot(  rcx_recv, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes() );
  Address rcx_amh_argument(   rcx_recv, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes() );
  Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
  Address vmarg;                // __ argument_address(vmargslot)

  const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();

  if (have_entry(ek)) {
    __ nop();                   // empty stubs make SG sick
    return;
  }

  address interp_entry = __ pc();

  trace_method_handle(_masm, entry_name(ek));

  BLOCK_COMMENT(entry_name(ek));

  switch ((int) ek) {
  case _raise_exception:
    {
      // Not a real MH entry, but rather shared code for raising an
      // exception.  Since we use the compiled entry, arguments are
      // expected in compiler argument registers.
      assert(raise_exception_method(), "must be set");
      assert(raise_exception_method()->from_compiled_entry(), "method must be linked");

      const Register rdi_pc = rax;
      __ pop(rdi_pc);  // caller PC
      __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started

      Register rbx_method = rbx_temp;
      Label L_no_method;
      // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
      __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
      __ testptr(rbx_method, rbx_method);
      __ jccb(Assembler::zero, L_no_method);

      const int jobject_oop_offset = 0;
      __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
      __ testptr(rbx_method, rbx_method);
      __ jccb(Assembler::zero, L_no_method);
      __ verify_oop(rbx_method);

      NOT_LP64(__ push(rarg2_required));
      __ push(rdi_pc);         // restore caller PC
      __ jmp(rbx_method_fce);  // jump to compiled entry

      // Do something that is at least causes a valid throw from the interpreter.
      __ bind(L_no_method);
      __ push(rarg2_required);
      __ push(rarg1_actual);
      __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
    }
    break;

  case _invokestatic_mh:
  case _invokespecial_mh:
    {
      Register rbx_method = rbx_temp;
      __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop
      __ verify_oop(rbx_method);
      // same as TemplateTable::invokestatic or invokespecial,
      // minus the CP setup and profiling:
      if (ek == _invokespecial_mh) {
        // Must load & check the first argument before entering the target method.
        __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
        __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
        __ null_check(rcx_recv);
        __ verify_oop(rcx_recv);
      }
      __ jmp(rbx_method_fie);
    }
    break;

  case _invokevirtual_mh:
    {
      // same as TemplateTable::invokevirtual,
      // minus the CP setup and profiling:

      // pick out the vtable index and receiver offset from the MH,
      // and then we can discard it:
      __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
      Register rbx_index = rbx_temp;
      __ movl(rbx_index, rcx_dmh_vmindex);
      // Note:  The verifier allows us to ignore rcx_mh_vmtarget.
      __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
      __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());

      // get receiver klass
      Register rax_klass = rax_argslot;
      __ load_klass(rax_klass, rcx_recv);
      __ verify_oop(rax_klass);

      // get target methodOop & entry point
      const int base = instanceKlass::vtable_start_offset() * wordSize;
      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
      Address vtable_entry_addr(rax_klass,
                                rbx_index, Address::times_ptr,
                                base + vtableEntry::method_offset_in_bytes());
      Register rbx_method = rbx_temp;
      __ movptr(rbx_method, vtable_entry_addr);

      __ verify_oop(rbx_method);
      __ jmp(rbx_method_fie);
    }
    break;

  case _invokeinterface_mh:
    {
      // same as TemplateTable::invokeinterface,
      // minus the CP setup and profiling:

      // pick out the interface and itable index from the MH.
      __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
      Register rdx_intf  = rdx_temp;
      Register rbx_index = rbx_temp;
      __ load_heap_oop(rdx_intf, rcx_mh_vmtarget);
      __ movl(rbx_index, rcx_dmh_vmindex);
      __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
      __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());

      // get receiver klass
      Register rax_klass = rax_argslot;
      __ load_klass(rax_klass, rcx_recv);
      __ verify_oop(rax_klass);

      Register rdi_temp   = rdi;
      Register rbx_method = rbx_index;

      // get interface klass
      Label no_such_interface;
      __ verify_oop(rdx_intf);
      __ lookup_interface_method(rax_klass, rdx_intf,
                                 // note: next two args must be the same:
                                 rbx_index, rbx_method,
                                 rdi_temp,
                                 no_such_interface);

      __ verify_oop(rbx_method);
      __ jmp(rbx_method_fie);
      __ hlt();

      __ bind(no_such_interface);
      // Throw an exception.
      // For historical reasons, it will be IncompatibleClassChangeError.
      __ mov(rbx_temp, rcx_recv);  // rarg2_required might be RCX
      assert_different_registers(rarg2_required, rbx_temp);
      __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset));  // required interface
      __ mov(   rarg1_actual,   rbx_temp);                               // bad receiver
      __ movl(  rarg0_code,     (int) Bytecodes::_invokeinterface);      // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
    }
    break;

  case _bound_ref_mh:
  case _bound_int_mh:
  case _bound_long_mh:
  case _bound_ref_direct_mh:
  case _bound_int_direct_mh:
  case _bound_long_direct_mh:
    {
      bool direct_to_method = (ek >= _bound_ref_direct_mh);
      BasicType arg_type  = T_ILLEGAL;
      int       arg_mask  = _INSERT_NO_MASK;
      int       arg_slots = -1;
      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);

      // make room for the new argument:
      __ movl(rax_argslot, rcx_bmh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp);

      // store bound argument into the new stack slot:
      __ load_heap_oop(rbx_temp, rcx_bmh_argument);
      if (arg_type == T_OBJECT) {
        __ movptr(Address(rax_argslot, 0), rbx_temp);
      } else {
        Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
        const int arg_size = type2aelembytes(arg_type);
        __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp);
        __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp);
      }

      if (direct_to_method) {
        Register rbx_method = rbx_temp;
        __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
        __ verify_oop(rbx_method);
        __ jmp(rbx_method_fie);
      } else {
        __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
        __ verify_oop(rcx_recv);
        __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
      }
    }
    break;

  case _adapter_retype_only:
  case _adapter_retype_raw:
    // immediately jump to the next MH layer:
    __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    __ verify_oop(rcx_recv);
    __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    // This is OK when all parameter types widen.
    // It is also OK when a return type narrows.
    break;

  case _adapter_check_cast:
    {
      // temps:
      Register rbx_klass = rbx_temp; // interesting AMH data

      // check a reference argument before jumping to the next layer of MH:
      __ movl(rax_argslot, rcx_amh_vmargslot);
      vmarg = __ argument_address(rax_argslot);

      // What class are we casting to?
      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));

      Label done;
      __ movptr(rdx_temp, vmarg);
      __ testptr(rdx_temp, rdx_temp);
      __ jcc(Assembler::zero, done);         // no cast if null
      __ load_klass(rdx_temp, rdx_temp);

      // live at this point:
      // - rbx_klass:  klass required by the target method
      // - rdx_temp:   argument klass to test
      // - rcx_recv:   adapter method handle
      __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);

      // If we get here, the type check failed!
      // Call the wrong_method_type stub, passing the failing argument type in rax.
      Register rax_mtype = rax_argslot;
      __ movl(rax_argslot, rcx_amh_vmargslot);  // reload argslot field
      __ movptr(rdx_temp, vmarg);

      assert_different_registers(rarg2_required, rdx_temp);
      __ load_heap_oop(rarg2_required, rcx_amh_argument);             // required class
      __ mov(          rarg1_actual,   rdx_temp);                     // bad object
      __ movl(         rarg0_code,     (int) Bytecodes::_checkcast);  // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));

      __ bind(done);
      // get the new MH:
      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_prim_to_prim:
  case _adapter_ref_to_prim:
    // handled completely by optimized cases
    __ stop("init_AdapterMethodHandle should not issue this");
    break;

  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
    {
      // perform an in-place conversion to int or an int subword
      __ movl(rax_argslot, rcx_amh_vmargslot);
      vmarg = __ argument_address(rax_argslot);

      switch (ek) {
      case _adapter_opt_i2i:
        __ movl(rdx_temp, vmarg);
        break;
      case _adapter_opt_l2i:
        {
          // just delete the extra slot; on a little-endian machine we keep the first
          __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
          remove_arg_slots(_masm, -stack_move_unit(),
                           rax_argslot, rbx_temp, rdx_temp);
          vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
          __ movl(rdx_temp, vmarg);
        }
        break;
      case _adapter_opt_unboxi:
        {
          // Load the value up from the heap.
          __ movptr(rdx_temp, vmarg);
          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
#ifdef ASSERT
          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
            if (is_subword_type(BasicType(bt)))
              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
          }
#endif
          __ null_check(rdx_temp, value_offset);
          __ movl(rdx_temp, Address(rdx_temp, value_offset));
          // We load this as a word.  Because we are little-endian,
          // the low bits will be correct, but the high bits may need cleaning.
          // The vminfo will guide us to clean those bits.
        }
        break;
      default:
        ShouldNotReachHere();
      }

      // Do the requested conversion and store the value.
      Register rbx_vminfo = rbx_temp;
      __ movl(rbx_vminfo, rcx_amh_conversion);
      assert(CONV_VMINFO_SHIFT == 0, "preshifted");

      // get the new MH:
      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      // (now we are done with the old MH)

      // original 32-bit vmdata word must be of this form:
      //    | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
      __ xchgptr(rcx, rbx_vminfo);                // free rcx for shifts
      __ shll(rdx_temp /*, rcx*/);
      Label zero_extend, done;
      __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
      __ jccb(Assembler::zero, zero_extend);

      // this path is taken for int->byte, int->short
      __ sarl(rdx_temp /*, rcx*/);
      __ jmpb(done);

      __ bind(zero_extend);
      // this is taken for int->char
      __ shrl(rdx_temp /*, rcx*/);

      __ bind(done);
      __ movl(vmarg, rdx_temp);  // Store the value.
      __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv

      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
    {
      // perform an in-place int-to-long or ref-to-long conversion
      __ movl(rax_argslot, rcx_amh_vmargslot);

      // on a little-endian machine we keep the first slot and add another after
      __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                       rax_argslot, rbx_temp, rdx_temp);
      Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
      Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);

      switch (ek) {
      case _adapter_opt_i2l:
        {
#ifdef _LP64
          __ movslq(rdx_temp, vmarg1);  // Load sign-extended
          __ movq(vmarg1, rdx_temp);    // Store into first slot
#else
          __ movl(rdx_temp, vmarg1);
          __ sarl(rdx_temp, BitsPerInt - 1);  // __ extend_sign()
          __ movl(vmarg2, rdx_temp); // store second word
#endif
        }
        break;
      case _adapter_opt_unboxl:
        {
          // Load the value up from the heap.
          __ movptr(rdx_temp, vmarg1);
          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
          __ null_check(rdx_temp, value_offset);
#ifdef _LP64
          __ movq(rbx_temp, Address(rdx_temp, value_offset));
          __ movq(vmarg1, rbx_temp);
#else
          __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
          __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
          __ movl(vmarg1, rbx_temp);
          __ movl(vmarg2, rdx_temp);
#endif
        }
        break;
      default:
        ShouldNotReachHere();
      }

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
    {
      // perform an in-place floating primitive conversion
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
      if (ek == _adapter_opt_f2d) {
        insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                         rax_argslot, rbx_temp, rdx_temp);
      }
      Address vmarg(rax_argslot, -Interpreter::stackElementSize);

#ifdef _LP64
      if (ek == _adapter_opt_f2d) {
        __ movflt(xmm0, vmarg);
        __ cvtss2sd(xmm0, xmm0);
        __ movdbl(vmarg, xmm0);
      } else {
        __ movdbl(xmm0, vmarg);
        __ cvtsd2ss(xmm0, xmm0);
        __ movflt(vmarg, xmm0);
      }
#else //_LP64
      if (ek == _adapter_opt_f2d) {
        __ fld_s(vmarg);        // load float to ST0
        __ fstp_s(vmarg);       // store single
      } else {
        __ fld_d(vmarg);        // load double to ST0
        __ fstp_s(vmarg);       // store single
      }
#endif //_LP64

      if (ek == _adapter_opt_d2f) {
        remove_arg_slots(_masm, -stack_move_unit(),
                         rax_argslot, rbx_temp, rdx_temp);
      }

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_prim_to_ref:
    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
    break;

  case _adapter_swap_args:
  case _adapter_rot_args:
    // handled completely by optimized cases
    __ stop("init_AdapterMethodHandle should not issue this");
    break;

  case _adapter_opt_swap_1:
  case _adapter_opt_swap_2:
  case _adapter_opt_rot_1_up:
  case _adapter_opt_rot_1_down:
  case _adapter_opt_rot_2_up:
  case _adapter_opt_rot_2_down:
    {
      int swap_bytes = 0, rotate = 0;
      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);

      // 'argslot' is the position of the first argument to swap
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      // 'vminfo' is the second
      Register rbx_destslot = rbx_temp;
      __ movl(rbx_destslot, rcx_amh_conversion);
      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
      __ andl(rbx_destslot, CONV_VMINFO_MASK);
      __ lea(rbx_destslot, __ argument_address(rbx_destslot));
      DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));

      if (!rotate) {
        for (int i = 0; i < swap_bytes; i += wordSize) {
          __ movptr(rdx_temp, Address(rax_argslot , i));
          __ push(rdx_temp);
          __ movptr(rdx_temp, Address(rbx_destslot, i));
          __ movptr(Address(rax_argslot, i), rdx_temp);
          __ pop(rdx_temp);
          __ movptr(Address(rbx_destslot, i), rdx_temp);
        }
      } else {
        // push the first chunk, which is going to get overwritten
        for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
          __ movptr(rdx_temp, Address(rax_argslot, i));
          __ push(rdx_temp);
        }

        if (rotate > 0) {
          // rotate upward
          __ subptr(rax_argslot, swap_bytes);
#ifdef ASSERT
          {
            // Verify that argslot > destslot, by at least swap_bytes.
            Label L_ok;
            __ cmpptr(rax_argslot, rbx_destslot);
            __ jccb(Assembler::aboveEqual, L_ok);
            __ stop("source must be above destination (upward rotation)");
            __ bind(L_ok);
          }
#endif
          // work argslot down to destslot, copying contiguous data upwards
          // pseudo-code:
          //   rax = src_addr - swap_bytes
          //   rbx = dest_addr
          //   while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
          Label loop;
          __ bind(loop);
          __ movptr(rdx_temp, Address(rax_argslot, 0));
          __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
          __ addptr(rax_argslot, -wordSize);
          __ cmpptr(rax_argslot, rbx_destslot);
          __ jccb(Assembler::aboveEqual, loop);
        } else {
          __ addptr(rax_argslot, swap_bytes);
#ifdef ASSERT
          {
            // Verify that argslot < destslot, by at least swap_bytes.
            Label L_ok;
            __ cmpptr(rax_argslot, rbx_destslot);
            __ jccb(Assembler::belowEqual, L_ok);
            __ stop("source must be below destination (downward rotation)");
            __ bind(L_ok);
          }
#endif
          // work argslot up to destslot, copying contiguous data downwards
          // pseudo-code:
          //   rax = src_addr + swap_bytes
          //   rbx = dest_addr
          //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
          Label loop;
          __ bind(loop);
          __ movptr(rdx_temp, Address(rax_argslot, 0));
          __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
          __ addptr(rax_argslot, wordSize);
          __ cmpptr(rax_argslot, rbx_destslot);
          __ jccb(Assembler::belowEqual, loop);
        }

        // pop the original first chunk into the destination slot, now free
        for (int i = 0; i < swap_bytes; i += wordSize) {
          __ pop(rdx_temp);
          __ movptr(Address(rbx_destslot, i), rdx_temp);
        }
      }

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_dup_args:
    {
      // 'argslot' is the position of the first argument to duplicate
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      // 'stack_move' is negative number of words to duplicate
      Register rdx_stack_move = rdx_temp;
      __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
      __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);

      int argslot0_num = 0;
      Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
      assert(argslot0.base() == rsp, "");
      int pre_arg_size = argslot0.disp();
      assert(pre_arg_size % wordSize == 0, "");
      assert(pre_arg_size > 0, "must include PC");

      // remember the old rsp+1 (argslot[0])
      Register rbx_oldarg = rbx_temp;
      __ lea(rbx_oldarg, argslot0);

      // move rsp down to make room for dups
      __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));

      // compute the new rsp+1 (argslot[0])
      Register rdx_newarg = rdx_temp;
      __ lea(rdx_newarg, argslot0);

      __ push(rdi);             // need a temp
      // (preceding push must be done after arg addresses are taken!)

      // pull down the pre_arg_size data (PC)
      for (int i = -pre_arg_size; i < 0; i += wordSize) {
        __ movptr(rdi, Address(rbx_oldarg, i));
        __ movptr(Address(rdx_newarg, i), rdi);
      }

      // copy from rax_argslot[0...] down to new_rsp[1...]
      // pseudo-code:
      //   rbx = old_rsp+1
      //   rdx = new_rsp+1
      //   rax = argslot
      //   while (rdx < rbx) *rdx++ = *rax++
      Label loop;
      __ bind(loop);
      __ movptr(rdi, Address(rax_argslot, 0));
      __ movptr(Address(rdx_newarg, 0), rdi);
      __ addptr(rax_argslot, wordSize);
      __ addptr(rdx_newarg, wordSize);
      __ cmpptr(rdx_newarg, rbx_oldarg);
      __ jccb(Assembler::less, loop);

      __ pop(rdi);              // restore temp

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_drop_args:
    {
      // 'argslot' is the position of the first argument to nuke
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      __ push(rdi);             // need a temp
      // (must do previous push after argslot address is taken)

      // 'stack_move' is number of words to drop
      Register rdi_stack_move = rdi;
      __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
      __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
      remove_arg_slots(_masm, rdi_stack_move,
                       rax_argslot, rbx_temp, rdx_temp);

      __ pop(rdi);              // restore temp

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_collect_args:
    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
    break;

  case _adapter_spread_args:
    // handled completely by optimized cases
    __ stop("init_AdapterMethodHandle should not issue this");
    break;

  case _adapter_opt_spread_0:
  case _adapter_opt_spread_1:
  case _adapter_opt_spread_more:
    {
      // spread an array out into a group of arguments
      int length_constant = get_ek_adapter_opt_spread_info(ek);

      // find the address of the array argument
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      // grab some temps
      { __ push(rsi); __ push(rdi); }
      // (preceding pushes must be done after argslot address is taken!)
#define UNPUSH_RSI_RDI \
      { __ pop(rdi); __ pop(rsi); }

      // arx_argslot points both to the array and to the first output arg
      vmarg = Address(rax_argslot, 0);

      // Get the array value.
      Register  rsi_array       = rsi;
      Register  rdx_array_klass = rdx_temp;
      BasicType elem_type       = T_OBJECT;
      int       length_offset   = arrayOopDesc::length_offset_in_bytes();
      int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
      __ movptr(rsi_array, vmarg);
      Label skip_array_check;
      if (length_constant == 0) {
        __ testptr(rsi_array, rsi_array);
        __ jcc(Assembler::zero, skip_array_check);
      }
      __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
      __ load_klass(rdx_array_klass, rsi_array);

      // Check the array type.
      Register rbx_klass = rbx_temp;
      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));

      Label ok_array_klass, bad_array_klass, bad_array_length;
      __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
      // If we get here, the type check failed!
      __ jmp(bad_array_klass);
      __ bind(ok_array_klass);

      // Check length.
      if (length_constant >= 0) {
        __ cmpl(Address(rsi_array, length_offset), length_constant);
      } else {
        Register rbx_vminfo = rbx_temp;
        __ movl(rbx_vminfo, rcx_amh_conversion);
        assert(CONV_VMINFO_SHIFT == 0, "preshifted");
        __ andl(rbx_vminfo, CONV_VMINFO_MASK);
        __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
      }
      __ jcc(Assembler::notEqual, bad_array_length);

      Register rdx_argslot_limit = rdx_temp;

      // Array length checks out.  Now insert any required stack slots.
      if (length_constant == -1) {
        // Form a pointer to the end of the affected region.
        __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
        // 'stack_move' is negative number of words to insert
        Register rdi_stack_move = rdi;
        __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
        __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
        Register rsi_temp = rsi_array;  // spill this
        insert_arg_slots(_masm, rdi_stack_move, -1,
                         rax_argslot, rbx_temp, rsi_temp);
        // reload the array (since rsi was killed)
        __ movptr(rsi_array, vmarg);
      } else if (length_constant > 1) {
        int arg_mask = 0;
        int new_slots = (length_constant - 1);
        for (int i = 0; i < new_slots; i++) {
          arg_mask <<= 1;
          arg_mask |= _INSERT_REF_MASK;
        }
        insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
                         rax_argslot, rbx_temp, rdx_temp);
      } else if (length_constant == 1) {
        // no stack resizing required
      } else if (length_constant == 0) {
        remove_arg_slots(_masm, -stack_move_unit(),
                         rax_argslot, rbx_temp, rdx_temp);
      }

      // Copy from the array to the new slots.
      // Note: Stack change code preserves integrity of rax_argslot pointer.
      // So even after slot insertions, rax_argslot still points to first argument.
      if (length_constant == -1) {
        // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
        Register rsi_source = rsi_array;
        __ lea(rsi_source, Address(rsi_array, elem0_offset));
        Label loop;
        __ bind(loop);
        __ movptr(rbx_temp, Address(rsi_source, 0));
        __ movptr(Address(rax_argslot, 0), rbx_temp);
        __ addptr(rsi_source, type2aelembytes(elem_type));
        __ addptr(rax_argslot, Interpreter::stackElementSize);
        __ cmpptr(rax_argslot, rdx_argslot_limit);
        __ jccb(Assembler::less, loop);
      } else if (length_constant == 0) {
        __ bind(skip_array_check);
        // nothing to copy
      } else {
        int elem_offset = elem0_offset;
        int slot_offset = 0;
        for (int index = 0; index < length_constant; index++) {
          __ movptr(rbx_temp, Address(rsi_array, elem_offset));
          __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
          elem_offset += type2aelembytes(elem_type);
           slot_offset += Interpreter::stackElementSize;
        }
      }

      // Arguments are spread.  Move to next method handle.
      UNPUSH_RSI_RDI;
      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);

      __ bind(bad_array_klass);
      UNPUSH_RSI_RDI;
      assert(!vmarg.uses(rarg2_required), "must be different registers");
      __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset));  // required type
      __ movptr(rarg1_actual,   vmarg);                                         // bad array
      __ movl(  rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));

      __ bind(bad_array_length);
      UNPUSH_RSI_RDI;
      assert(!vmarg.uses(rarg2_required), "must be different registers");
      __ mov   (rarg2_required, rcx_recv);                       // AMH requiring a certain length
      __ movptr(rarg1_actual,   vmarg);                          // bad array
      __ movl(  rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));

#undef UNPUSH_RSI_RDI
    }
    break;

  case _adapter_flyby:
  case _adapter_ricochet:
    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
    break;

  default:  ShouldNotReachHere();
  }
  __ hlt();

  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI

  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
}
Beispiel #22
0
int32 lua_isnumber(lua_Object o) {
	return (o!= LUA_NOOBJECT) && (tonumber(Address(o)) == 0);
}
Beispiel #23
0
 Address operator-(int change) const {
   change = RBX_MEMORY_ALIGN(change);
   return Address(reinterpret_cast<void*>(address_ - change));
 }
Beispiel #24
0
const char *lua_getstring (lua_Object object) {
	if (object == LUA_NOOBJECT || tostring(Address(object)))
		return NULL;
	else
		return (svalue(Address(object)));
}
Beispiel #25
0
 static Address null() {
   return Address(0);
 }
Beispiel #26
0
void *lua_getuserdata(lua_Object object) {
	if (object == LUA_NOOBJECT || ttype(Address(object)) != LUA_T_USERDATA)
		return NULL;
	else
		return tsvalue(Address(object))->globalval.value.ts;
}
Beispiel #27
0
char* StubRoutines::generate_call_DLL(MacroAssembler* masm, bool async) {
// The following routine provides the extra frame for DLL calls.
// Note: 1. Its code has to be *outside* the interpreters code! (see also: DLL calls in interpreter)
//       2. This routine is also used by the compiler! Make sure to adjust the parameter
//          passing in the compiler as well (x86_node.cpp, codeGenerator.cpp), when changing this code!
//
// Stack layout immediately after calling the DLL:
// (The DLL state word is used for asynchronous DLL/interrupted DLL calls)
//
//	  ...					DLL land
// esp->[ return addr	] ------------------------------------------------------------------------
//	[ unboxed arg 1	]			C land
//	  ...
// 	[ unboxed arg n	]
// 	[ ptr to itself	] <----	ptr to itself	to check that the right no. of arguments is used
// 	[ DLL state	]			used for DLL/interrupted DLL calls
// 	[ return addr	] ------------------------------------------------------------------------
// 	[ argument n	] <----	last_Delta_sp	Delta land
// 	[ argument n-1	]
// 	  ...
// 	[ argument 1	]
//	[ return proxy	]
//	  ...
// ebp->[ previous ebp	] <----	last_Delta_fp
//
// The routine expects 3 arguments to be passed in registers as follows:
//
// ebx: number of arguments
// ecx: address of last argument
// edx: DLL function entry point

    Label loop_entry, no_arguments, smi_argument, next_argument, wrong_call;

    char* entry_point = masm->pc();
    masm->set_last_Delta_frame_after_call();
    masm->pushl(0);				// initial value for DLL state
    masm->movl(esi, esp);				// save DLL state address
    masm->pushl(esp);				// to check that the right no. of arguments is used
    if (TraceDLLCalls) {				// call trace routine (C to C call, no special setup required)
        masm->pushl(esi);				// save DLL state address
        masm->pushl(ebx);				// pass arguments in reverse order
        masm->pushl(ecx);
        masm->pushl(edx);
        masm->call((char*)trace_DLL_call_1, relocInfo::runtime_call_type);
        masm->popl(edx);				// restore registers
        masm->popl(ecx);
        masm->popl(ebx);
        masm->popl(esi);				// restore DLL state address
    }
    masm->testl(ebx, ebx);			// if number of arguments != 0 then
    masm->jcc(MacroAssembler::notZero, loop_entry);// convert arguments

    // done with all the arguments
    masm->bind(no_arguments);
    if (async) {
        masm->pushl(edx);
        masm->pushl(esi);				// pass DLL state address
        masm->call((char*)DLLs::enter_async_call, relocInfo::runtime_call_type);
        masm->popl(esi);				// discard argument
        masm->popl(edx);				// restore registers
    }

    // do DLL call
    masm->call(edx);				// eax := dll call (pops arguments itself)

    // check no. of arguments
    masm->popl(ebx);				// must be the same as esp after popping
    masm->cmpl(ebx, esp);
    masm->jcc(Assembler::notEqual, wrong_call);

    // top of stack contains DLL state
    masm->movl(ebx, esp);				// get DLL state address
    masm->pushl(eax);				// save result
    masm->pushl(ebx);				// pass DLL state address
    char* exit_dll = (char*)(async ? DLLs::exit_async_call : DLLs::exit_sync_call);
    masm->call(exit_dll, relocInfo::runtime_call_type);
    masm->popl(ebx);				// discard argument
    masm->popl(eax);				// restore result

    if (TraceDLLCalls) {				// call trace routine (C to C call, no special setup required)
        masm->pushl(eax);				// pass result
        masm->call((char*)trace_DLL_call_2, relocInfo::runtime_call_type);
        masm->popl(eax);				// restore result
    }
    masm->popl(ebx);				// discard DLL state word
    masm->reset_last_Delta_frame();
    masm->ret(0);

    // wrong DLL has been called (no. of popped arguments is incorrect)
    masm->bind(wrong_call);
    masm->call((char*)wrong_DLL_call, relocInfo::runtime_call_type);
    masm->hlt();					// should never reach here

    // smi argument -> convert it to int
    masm->bind(smi_argument);
    masm->sarl(eax, Tag_Size);			// convert smi into C int

    // next argument
    masm->bind(next_argument);
    masm->pushl(eax);				// push converted argument
    masm->addl(ecx, oopSize);			// go to previous argument
    masm->decl(ebx);				// decrement argument counter
    masm->jcc(MacroAssembler::zero, no_arguments);// continue until no arguments

    // loop
    masm->bind(loop_entry);
    // ebx: argument count
    // ecx: current argument address
    masm->movl(eax, Address(ecx));		// get argument
    masm->testb(eax, Mem_Tag);			// check if smi or proxy
    masm->jcc(MacroAssembler::zero, smi_argument);

    // boxed argument -> unbox it
    masm->movl(eax, Address(eax, pointer_offset));// unbox proxy
    masm->jmp(next_argument);

    return entry_point;
}
Beispiel #28
0
lua_CFunction lua_getcfunction(lua_Object object) {
	if (!lua_iscfunction(object))
		return NULL;
	else
		return fvalue(luaA_protovalue(Address(object)));
}
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
  if (!VerifyOops) return;
  verify_oop_addr(Address(SP, stack_offset + STACK_BIAS));
}
// Helper to remove argument slots from the stack.
// arg_slots must be a multiple of stack_move_unit() and >= 0
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
                                     RegisterOrConstant arg_slots,
                                     Register argslot_reg,
                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
  assert(temp3_reg != noreg, "temp3 required");
  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));

  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);

#ifdef ASSERT
  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
  __ add(argslot_reg, offset, temp2_reg);
  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
  if (arg_slots.is_register()) {
    Label L_ok, L_bad;
    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
    __ br(Assembler::less, false, Assembler::pn, L_bad);
    __ delayed()->nop();
    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
    __ br(Assembler::zero, false, Assembler::pt, L_ok);
    __ delayed()->nop();
    __ bind(L_bad);
    __ stop("assert arg_slots >= 0 and clear low bits");
    __ bind(L_ok);
  } else {
    assert(arg_slots.as_constant() >= 0, "");
    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
  }
#endif // ASSERT

  // Pull up everything shallower than argslot.
  // Then remove the excess space on the stack.
  // The stacked return address gets pulled up with everything else.
  // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
  //   for (temp = argslot-1; temp >= sp; --temp)
  //     temp[size] = temp[0]
  //   argslot += size;
  //   sp += size;
  __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
  {
    Label loop;
    __ bind(loop);
    // pull one word up each time through the loop
    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
    __ st_ptr(temp2_reg, Address(temp_reg, offset));
    __ sub(temp_reg, wordSize, temp_reg);
    __ cmp(temp_reg, Gargs);
    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
    __ delayed()->nop();  // FILLME
  }

  // Now move the argslot up, to point to the just-copied block.
  __ add(Gargs, offset, Gargs);
  // And adjust the argslot address to point at the deletion point.
  __ add(argslot_reg, offset, argslot_reg);

  // Keep the stack pointer 2*wordSize aligned.
  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
  __ add(SP, masked_offset, SP);
}