// Return the vector version of a scalar operation node.
VectorNode* VectorNode::make(Compile* C, int opc, Node* n1, Node* n2, uint vlen, BasicType bt) {
  const TypeVect* vt = TypeVect::make(bt, vlen);
  int vopc = VectorNode::opcode(opc, bt);
  // This method should not be called for unimplemented vectors.
  guarantee(vopc > 0, err_msg_res("Vector for '%s' is not implemented", NodeClassNames[opc]));

  switch (vopc) {
  case Op_AddVB: return new (C) AddVBNode(n1, n2, vt);
  case Op_AddVS: return new (C) AddVSNode(n1, n2, vt);
  case Op_AddVI: return new (C) AddVINode(n1, n2, vt);
  case Op_AddVL: return new (C) AddVLNode(n1, n2, vt);
  case Op_AddVF: return new (C) AddVFNode(n1, n2, vt);
  case Op_AddVD: return new (C) AddVDNode(n1, n2, vt);

  case Op_SubVB: return new (C) SubVBNode(n1, n2, vt);
  case Op_SubVS: return new (C) SubVSNode(n1, n2, vt);
  case Op_SubVI: return new (C) SubVINode(n1, n2, vt);
  case Op_SubVL: return new (C) SubVLNode(n1, n2, vt);
  case Op_SubVF: return new (C) SubVFNode(n1, n2, vt);
  case Op_SubVD: return new (C) SubVDNode(n1, n2, vt);

  case Op_MulVS: return new (C) MulVSNode(n1, n2, vt);
  case Op_MulVI: return new (C) MulVINode(n1, n2, vt);
  case Op_MulVF: return new (C) MulVFNode(n1, n2, vt);
  case Op_MulVD: return new (C) MulVDNode(n1, n2, vt);

  case Op_DivVF: return new (C) DivVFNode(n1, n2, vt);
  case Op_DivVD: return new (C) DivVDNode(n1, n2, vt);

  case Op_LShiftVB: return new (C) LShiftVBNode(n1, n2, vt);
  case Op_LShiftVS: return new (C) LShiftVSNode(n1, n2, vt);
  case Op_LShiftVI: return new (C) LShiftVINode(n1, n2, vt);
  case Op_LShiftVL: return new (C) LShiftVLNode(n1, n2, vt);

  case Op_RShiftVB: return new (C) RShiftVBNode(n1, n2, vt);
  case Op_RShiftVS: return new (C) RShiftVSNode(n1, n2, vt);
  case Op_RShiftVI: return new (C) RShiftVINode(n1, n2, vt);
  case Op_RShiftVL: return new (C) RShiftVLNode(n1, n2, vt);

  case Op_URShiftVB: return new (C) URShiftVBNode(n1, n2, vt);
  case Op_URShiftVS: return new (C) URShiftVSNode(n1, n2, vt);
  case Op_URShiftVI: return new (C) URShiftVINode(n1, n2, vt);
  case Op_URShiftVL: return new (C) URShiftVLNode(n1, n2, vt);

  case Op_AndV: return new (C) AndVNode(n1, n2, vt);
  case Op_OrV:  return new (C) OrVNode (n1, n2, vt);
  case Op_XorV: return new (C) XorVNode(n1, n2, vt);
  }
  fatal(err_msg_res("Missed vector creation for '%s'", NodeClassNames[vopc]));
  return NULL;

}
void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
  address p = bcp + offset;
  assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
  if (!reverse) {
    int cp_index = Bytes::get_Java_u2(p);
    int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
    int cpc2 = add_secondary_cp_cache_entry(cpc);
    // The second secondary entry is required to store the MethodType and
    // must be the next entry.
    int cpc3 = add_secondary_cp_cache_entry(cpc);
    assert(cpc2 + 1 == cpc3, err_msg_res("must be consecutive: %d + 1 == %d", cpc2, cpc3));

    // Replace the trailing four bytes with a CPC index for the dynamic
    // call site.  Unlike other CPC entries, there is one per bytecode,
    // not just one per distinct CP entry.  In other words, the
    // CPC-to-CP relation is many-to-one for invokedynamic entries.
    // This means we must use a larger index size than u2 to address
    // all these entries.  That is the main reason invokedynamic
    // must have a five-byte instruction format.  (Of course, other JVM
    // implementations can use the bytes for other purposes.)
    Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
    // Note: We use native_u4 format exclusively for 4-byte indexes.
  } else {
    int cache_index = constantPoolCacheOopDesc::decode_secondary_index(
                        Bytes::get_native_u4(p));
    int secondary_index = cp_cache_secondary_entry_main_index(cache_index);
    int pool_index = cp_cache_entry_pool_index(secondary_index);
    assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index");
    // zero out 4 bytes
    Bytes::put_Java_u4(p, 0);
    Bytes::put_Java_u2(p, pool_index);
  }
}
// Create a binary tree form for Packs. [lo, hi) (half-open) range
PackNode* PackNode::binary_tree_pack(Compile* C, int lo, int hi) {
  int ct = hi - lo;
  assert(is_power_of_2(ct), "power of 2");
  if (ct == 2) {
    PackNode* pk = PackNode::make(C, in(lo), 2, vect_type()->element_basic_type());
    pk->add_opd(in(lo+1));
    return pk;

  } else {
    int mid = lo + ct/2;
    PackNode* n1 = binary_tree_pack(C, lo,  mid);
    PackNode* n2 = binary_tree_pack(C, mid, hi );

    BasicType bt = n1->vect_type()->element_basic_type();
    assert(bt == n2->vect_type()->element_basic_type(), "should be the same");
    switch (bt) {
    case T_BOOLEAN:
    case T_BYTE:
      return new (C) PackSNode(n1, n2, TypeVect::make(T_SHORT, 2));
    case T_CHAR:
    case T_SHORT:
      return new (C) PackINode(n1, n2, TypeVect::make(T_INT, 2));
    case T_INT:
      return new (C) PackLNode(n1, n2, TypeVect::make(T_LONG, 2));
    case T_LONG:
      return new (C) Pack2LNode(n1, n2, TypeVect::make(T_LONG, 2));
    case T_FLOAT:
      return new (C) PackDNode(n1, n2, TypeVect::make(T_DOUBLE, 2));
    case T_DOUBLE:
      return new (C) Pack2DNode(n1, n2, TypeVect::make(T_DOUBLE, 2));
    }
    fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
  }
  return NULL;
}
Пример #4
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b,
  AbstractRegister c,
  AbstractRegister d,
  AbstractRegister e,
  AbstractRegister f,
  AbstractRegister g,
  AbstractRegister h,
  AbstractRegister i
) {
  assert(
    a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i
           && b != c && b != d && b != e && b != f && b != g && b != h && b != i
                     && c != d && c != e && c != f && c != g && c != h && c != i
                               && d != e && d != f && d != g && d != h && d != i
                                         && e != f && e != g && e != h && e != i
                                                   && f != g && f != h && f != i
                                                             && g != h && g != i
                                                                       && h != i,
    err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
                ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
                ", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
                ", i=" INTPTR_FORMAT "",
                p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i))
  );
}
Пример #5
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b
) {
  assert(
    a != b,
    err_msg_res("registers must be different: a=%d, b=%d",
                a, b)
  );
}
Пример #6
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b
) {
  assert(
    a != b,
    err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT "",
                p2i(a), p2i(b))
  );
}
Пример #7
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b,
  AbstractRegister c
) {
  assert(
    a != b && a != c
           && b != c,
    err_msg_res("registers must be different: a=%d, b=%d, c=%d",
                a, b, c)
  );
}
Пример #8
0
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
  verify_oop_map(op->info());

  if (os::is_MP()) {
    // must align calls sites, otherwise they can't be updated atomically on MP hardware
    align_call(op->code());
  }

  // emit the static call stub stuff out of line
  emit_static_call_stub();

  switch (op->code()) {
  case lir_static_call:
  case lir_dynamic_call:
    call(op, relocInfo::static_call_type);
    break;
  case lir_optvirtual_call:
    call(op, relocInfo::opt_virtual_call_type);
    break;
  case lir_icvirtual_call:
    ic_call(op);
    break;
  case lir_virtual_call:
    vtable_call(op);
    break;
  default:
    fatal(err_msg_res("unexpected op code: %s", op->name()));
    break;
  }

  // JSR 292
  // Record if this method has MethodHandle invokes.
  if (op->is_method_handle_invoke()) {
    compilation()->set_has_method_handle_invokes(true);
  }

#if defined(X86) && defined(TIERED)
  // C2 leave fpu stack dirty clean it
  if (UseSSE < 2) {
    int i;
    for ( i = 1; i <= 7 ; i++ ) {
      ffree(i);
    }
    if (!op->result_opr()->is_float_kind()) {
      ffree(0);
    }
  }
#endif // X86 && TIERED
}
Пример #9
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b,
  AbstractRegister c,
  AbstractRegister d
) {
  assert(
    a != b && a != c && a != d
           && b != c && b != d
                     && c != d,
    err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
                ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT "",
                p2i(a), p2i(b), p2i(c), p2i(d))
  );
}
Пример #10
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b,
  AbstractRegister c,
  AbstractRegister d,
  AbstractRegister e
) {
  assert(
    a != b && a != c && a != d && a != e
           && b != c && b != d && b != e
                     && c != d && c != e
                               && d != e,
    err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d",
                a, b, c, d, e)
  );
}
Пример #11
0
VectorNode* VectorNode::shift_count(Compile* C, Node* shift, Node* cnt, uint vlen, BasicType bt) {
  assert(VectorNode::is_shift(shift) && !cnt->is_Con(), "only variable shift count");
  // Match shift count type with shift vector type.
  const TypeVect* vt = TypeVect::make(bt, vlen);
  switch (shift->Opcode()) {
  case Op_LShiftI:
  case Op_LShiftL:
    return new (C) LShiftCntVNode(cnt, vt);
  case Op_RShiftI:
  case Op_RShiftL:
  case Op_URShiftI:
  case Op_URShiftL:
    return new (C) RShiftCntVNode(cnt, vt);
  }
  fatal(err_msg_res("Missed vector creation for '%s'", NodeClassNames[shift->Opcode()]));
  return NULL;
}
Пример #12
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b,
  AbstractRegister c,
  AbstractRegister d,
  AbstractRegister e,
  AbstractRegister f
) {
  assert(
    a != b && a != c && a != d && a != e && a != f
           && b != c && b != d && b != e && b != f
                     && c != d && c != e && c != f
                               && d != e && d != f
                                         && e != f,
    err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
                ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
                ", f=" INTPTR_FORMAT "",
                p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f))
  );
}
Пример #13
0
inline void assert_different_registers(
  AbstractRegister a,
  AbstractRegister b,
  AbstractRegister c,
  AbstractRegister d,
  AbstractRegister e,
  AbstractRegister f,
  AbstractRegister g
) {
  assert(
    a != b && a != c && a != d && a != e && a != f && a != g
           && b != c && b != d && b != e && b != f && b != g
                     && c != d && c != e && c != f && c != g
                               && d != e && d != f && d != g
                                         && e != f && e != g
                                                   && f != g,
    err_msg_res("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d",
                a, b, c, d, e, f, g)
  );
}
Пример #14
0
// Return initial Pack node. Additional operands added with add_opd() calls.
PackNode* PackNode::make(Compile* C, Node* s, uint vlen, BasicType bt) {
  const TypeVect* vt = TypeVect::make(bt, vlen);
  switch (bt) {
  case T_BOOLEAN:
  case T_BYTE:
    return new (C) PackBNode(s, vt);
  case T_CHAR:
  case T_SHORT:
    return new (C) PackSNode(s, vt);
  case T_INT:
    return new (C) PackINode(s, vt);
  case T_LONG:
    return new (C) PackLNode(s, vt);
  case T_FLOAT:
    return new (C) PackFNode(s, vt);
  case T_DOUBLE:
    return new (C) PackDNode(s, vt);
  }
  fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
  return NULL;
}
Пример #15
0
// Scalar promotion
VectorNode* VectorNode::scalar2vector(Compile* C, Node* s, uint vlen, const Type* opd_t) {
  BasicType bt = opd_t->array_element_basic_type();
  const TypeVect* vt = opd_t->singleton() ? TypeVect::make(opd_t, vlen)
                                          : TypeVect::make(bt, vlen);
  switch (bt) {
  case T_BOOLEAN:
  case T_BYTE:
    return new (C) ReplicateBNode(s, vt);
  case T_CHAR:
  case T_SHORT:
    return new (C) ReplicateSNode(s, vt);
  case T_INT:
    return new (C) ReplicateINode(s, vt);
  case T_LONG:
    return new (C) ReplicateLNode(s, vt);
  case T_FLOAT:
    return new (C) ReplicateFNode(s, vt);
  case T_DOUBLE:
    return new (C) ReplicateDNode(s, vt);
  }
  fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
  return NULL;
}
Пример #16
0
// Extract a scalar element of vector.
Node* ExtractNode::make(Compile* C, Node* v, uint position, BasicType bt) {
  assert((int)position < Matcher::max_vector_size(bt), "pos in range");
  ConINode* pos = ConINode::make(C, (int)position);
  switch (bt) {
  case T_BOOLEAN:
    return new (C) ExtractUBNode(v, pos);
  case T_BYTE:
    return new (C) ExtractBNode(v, pos);
  case T_CHAR:
    return new (C) ExtractCNode(v, pos);
  case T_SHORT:
    return new (C) ExtractSNode(v, pos);
  case T_INT:
    return new (C) ExtractINode(v, pos);
  case T_LONG:
    return new (C) ExtractLNode(v, pos);
  case T_FLOAT:
    return new (C) ExtractFNode(v, pos);
  case T_DOUBLE:
    return new (C) ExtractDNode(v, pos);
  }
  fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
  return NULL;
}
Пример #17
0
const Type* OverflowNode::sub(const Type* t1, const Type* t2) const {
  fatal(err_msg_res("sub() should not be called for '%s'", NodeClassNames[this->Opcode()]));
  return TypeInt::CC;
}
Пример #18
0
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
                                                    vmIntrinsics::ID iid,
                                                    Register receiver_reg,
                                                    Register member_reg,
                                                    bool for_compiler_entry) {
  assert(is_signature_polymorphic(iid), "expected invoke iid");
  Register rbx_method = rbx;   // eventual target of this invocation
  // temps used in this code are not used in *either* compiled or interpreted calling sequences
#ifdef _LP64
  Register temp1 = rscratch1;
  Register temp2 = rscratch2;
  Register temp3 = rax;
  if (for_compiler_entry) {
    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
    assert_different_registers(temp1,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
    assert_different_registers(temp2,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
    assert_different_registers(temp3,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
  }
#else
  Register temp1 = (for_compiler_entry ? rsi : rdx);
  Register temp2 = rdi;
  Register temp3 = rax;
  if (for_compiler_entry) {
    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
    assert_different_registers(temp1,        rcx, rdx);
    assert_different_registers(temp2,        rcx, rdx);
    assert_different_registers(temp3,        rcx, rdx);
  }
#endif
  else {
    assert_different_registers(temp1, temp2, temp3, saved_last_sp_register());  // don't trash lastSP
  }
  assert_different_registers(temp1, temp2, temp3, receiver_reg);
  assert_different_registers(temp1, temp2, temp3, member_reg);

  if (iid == vmIntrinsics::_invokeBasic) {
    // indirect through MH.form.vmentry.vmtarget
    jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);

  } else {
    // The method is a member invoker used by direct method handles.
    if (VerifyMethodHandles) {
      // make sure the trailing argument really is a MemberName (caller responsibility)
      verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
                   "MemberName required for invokeVirtual etc.");
    }

    Address member_clazz(    member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
    Address member_vmindex(  member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));

    Register temp1_recv_klass = temp1;
    if (iid != vmIntrinsics::_linkToStatic) {
      __ verify_oop(receiver_reg);
      if (iid == vmIntrinsics::_linkToSpecial) {
        // Don't actually load the klass; just null-check the receiver.
        __ null_check(receiver_reg);
      } else {
        // load receiver klass itself
        __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      BLOCK_COMMENT("check_receiver {");
      // The receiver for the MemberName must be in receiver_reg.
      // Check the receiver against the MemberName.clazz
      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
        // Did not load it above...
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
        Label L_ok;
        Register temp2_defc = temp2;
        __ load_heap_oop(temp2_defc, member_clazz);
        load_klass_from_Class(_masm, temp2_defc);
        __ verify_klass_ptr(temp2_defc);
        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
        // If we get here, the type check failed!
        __ STOP("receiver class disagrees with MemberName.clazz");
        __ bind(L_ok);
      }
      BLOCK_COMMENT("} check_receiver");
    }
    if (iid == vmIntrinsics::_linkToSpecial ||
        iid == vmIntrinsics::_linkToStatic) {
      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
    }

    // Live registers at this point:
    //  member_reg - MemberName that was the trailing argument
    //  temp1_recv_klass - klass of stacked receiver, if needed
    //  rsi/r13 - interpreter linkage (if interpreted)
    //  rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)

    Label L_incompatible_class_change_error;
    switch (iid) {
    case vmIntrinsics::_linkToSpecial:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
      }
      __ movptr(rbx_method, member_vmtarget);
      break;

    case vmIntrinsics::_linkToStatic:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
      }
      __ movptr(rbx_method, member_vmtarget);
      break;

    case vmIntrinsics::_linkToVirtual:
    {
      // same as TemplateTable::invokevirtual,
      // minus the CP setup and profiling:

      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
      }

      // pick out the vtable index from the MemberName, and then we can discard it:
      Register temp2_index = temp2;
      __ movptr(temp2_index, member_vmindex);

      if (VerifyMethodHandles) {
        Label L_index_ok;
        __ cmpl(temp2_index, 0);
        __ jcc(Assembler::greaterEqual, L_index_ok);
        __ STOP("no virtual index");
        __ BIND(L_index_ok);
      }

      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.

      // get target Method* & entry point
      __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
      break;
    }

    case vmIntrinsics::_linkToInterface:
    {
      // same as TemplateTable::invokeinterface
      // (minus the CP setup and profiling, with different argument motion)
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
      }

      Register temp3_intf = temp3;
      __ load_heap_oop(temp3_intf, member_clazz);
      load_klass_from_Class(_masm, temp3_intf);
      __ verify_klass_ptr(temp3_intf);

      Register rbx_index = rbx_method;
      __ movptr(rbx_index, member_vmindex);
      if (VerifyMethodHandles) {
        Label L;
        __ cmpl(rbx_index, 0);
        __ jcc(Assembler::greaterEqual, L);
        __ STOP("invalid vtable index for MH.invokeInterface");
        __ bind(L);
      }

      // given intf, index, and recv klass, dispatch to the implementation method
      __ lookup_interface_method(temp1_recv_klass, temp3_intf,
                                 // note: next two args must be the same:
                                 rbx_index, rbx_method,
                                 temp2,
                                 L_incompatible_class_change_error);
      break;
    }

    default:
      fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
      break;
    }

    // Live at this point:
    //   rbx_method
    //   rsi/r13 (if interpreted)

    // After figuring out which concrete method to call, jump into it.
    // Note that this works in the interpreter with no data motion.
    // But the compiled version will require that rcx_recv be shifted out.
    __ verify_method_ptr(rbx_method);
    jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);

    if (iid == vmIntrinsics::_linkToInterface) {
      __ bind(L_incompatible_class_change_error);
      __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
    }
  }
}
Пример #19
0
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
  GraphKit kit(jvms);
  PhaseGVN& gvn = kit.gvn();
  Compile* C = kit.C;
  vmIntrinsics::ID iid = callee->intrinsic_id();
  input_not_const = true;
  switch (iid) {
  case vmIntrinsics::_invokeBasic:
    {
      // Get MethodHandle receiver:
      Node* receiver = kit.argument(0);
      if (receiver->Opcode() == Op_ConP) {
        input_not_const = false;
        const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
        ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
        const int vtable_index = Method::invalid_vtable_index;
        CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
        if (cg != NULL && cg->is_inline())
          return cg;
      }
    }
    break;

  case vmIntrinsics::_linkToVirtual:
  case vmIntrinsics::_linkToStatic:
  case vmIntrinsics::_linkToSpecial:
  case vmIntrinsics::_linkToInterface:
    {
      // Get MemberName argument:
      Node* member_name = kit.argument(callee->arg_size() - 1);
      if (member_name->Opcode() == Op_ConP) {
        input_not_const = false;
        const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
        ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();

        // In lamda forms we erase signature types to avoid resolving issues
        // involving class loaders.  When we optimize a method handle invoke
        // to a direct call we must cast the receiver and arguments to its
        // actual types.
        ciSignature* signature = target->signature();
        const int receiver_skip = target->is_static() ? 0 : 1;
        // Cast receiver to its type.
        if (!target->is_static()) {
          Node* arg = kit.argument(0);
          const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
          const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
          if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
            Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
            kit.set_argument(0, cast_obj);
          }
        }
        // Cast reference arguments to its type.
        for (int i = 0; i < signature->count(); i++) {
          ciType* t = signature->type_at(i);
          if (t->is_klass()) {
            Node* arg = kit.argument(receiver_skip + i);
            const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
            const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
            if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
              Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
              kit.set_argument(receiver_skip + i, cast_obj);
            }
          }
        }

        // Try to get the most accurate receiver type
        const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
        const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
        int  vtable_index       = Method::invalid_vtable_index;
        bool call_does_dispatch = false;

        ciKlass* speculative_receiver_type = NULL;
        if (is_virtual_or_interface) {
          ciInstanceKlass* klass = target->holder();
          Node*             receiver_node = kit.argument(0);
          const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
          // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
          target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
                                            is_virtual,
                                            call_does_dispatch, vtable_index);  // out-parameters
          // We lack profiling at this call but type speculation may
          // provide us with a type
          speculative_receiver_type = receiver_type->speculative_type();
        }

        CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
        if (cg != NULL && cg->is_inline())
          return cg;
      }
    }
    break;

  default:
    fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
    break;
  }
  return NULL;
}
Пример #20
0
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
    GraphKit kit(jvms);
    PhaseGVN& gvn = kit.gvn();
    Compile* C = kit.C;
    vmIntrinsics::ID iid = callee->intrinsic_id();
    switch (iid) {
    case vmIntrinsics::_invokeBasic:
    {
        // get MethodHandle receiver
        Node* receiver = kit.argument(0);
        if (receiver->Opcode() == Op_ConP) {
            const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
            ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
            guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
            const int vtable_index = methodOopDesc::invalid_vtable_index;
            CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS);
            if (cg != NULL && cg->is_inline())
                return cg;
        } else {
            if (PrintInlining)  CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
        }
    }
    break;

    case vmIntrinsics::_linkToVirtual:
    case vmIntrinsics::_linkToStatic:
    case vmIntrinsics::_linkToSpecial:
    case vmIntrinsics::_linkToInterface:
    {
        // pop MemberName argument
        Node* member_name = kit.argument(callee->arg_size() - 1);
        if (member_name->Opcode() == Op_ConP) {
            const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
            ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();

            // In lamda forms we erase signature types to avoid resolving issues
            // involving class loaders.  When we optimize a method handle invoke
            // to a direct call we must cast the receiver and arguments to its
            // actual types.
            ciSignature* signature = target->signature();
            const int receiver_skip = target->is_static() ? 0 : 1;
            // Cast receiver to its type.
            if (!target->is_static()) {
                Node* arg = kit.argument(0);
                const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
                const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
                if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
                    Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
                    kit.set_argument(0, cast_obj);
                }
            }
            // Cast reference arguments to its type.
            for (int i = 0; i < signature->count(); i++) {
                ciType* t = signature->type_at(i);
                if (t->is_klass()) {
                    Node* arg = kit.argument(receiver_skip + i);
                    const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
                    const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
                    if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
                        Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
                        kit.set_argument(receiver_skip + i, cast_obj);
                    }
                }
            }
            const int vtable_index = methodOopDesc::invalid_vtable_index;
            const bool call_is_virtual = target->is_abstract();  // FIXME workaround
            CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS);
            if (cg != NULL && cg->is_inline())
                return cg;
        }
    }
    break;

    default:
        fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
        break;
    }
    return NULL;
}
Пример #21
0
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
                                                    vmIntrinsics::ID iid,
                                                    Register receiver_reg,
                                                    Register member_reg,
                                                    bool for_compiler_entry) {
  assert(is_signature_polymorphic(iid), "expected invoke iid");
  Register temp1 = (for_compiler_entry ? R25_tmp5 : R7);
  Register temp2 = (for_compiler_entry ? R22_tmp2 : R8);
  Register temp3 = (for_compiler_entry ? R23_tmp3 : R9);
  Register temp4 = (for_compiler_entry ? R24_tmp4 : R10);
  if (receiver_reg != noreg)  assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg);
  if (member_reg   != noreg)  assert_different_registers(temp1, temp2, temp3, temp4, member_reg);

  if (iid == vmIntrinsics::_invokeBasic) {
    // indirect through MH.form.vmentry.vmtarget
    jump_to_lambda_form(_masm, receiver_reg, R19_method, temp1, temp2, for_compiler_entry);
  } else {
    // The method is a member invoker used by direct method handles.
    if (VerifyMethodHandles) {
      // make sure the trailing argument really is a MemberName (caller responsibility)
      verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass),
                   temp1, temp2,
                   "MemberName required for invokeVirtual etc.");
    }

    Register temp1_recv_klass = temp1;
    if (iid != vmIntrinsics::_linkToStatic) {
      __ verify_oop(receiver_reg);
      if (iid == vmIntrinsics::_linkToSpecial) {
        // Don't actually load the klass; just null-check the receiver.
        __ null_check_throw(receiver_reg, -1, temp1, EXCEPTION_ENTRY);
      } else {
        // load receiver klass itself
        __ null_check_throw(receiver_reg, oopDesc::klass_offset_in_bytes(), temp1, EXCEPTION_ENTRY);
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      BLOCK_COMMENT("check_receiver {");
      // The receiver for the MemberName must be in receiver_reg.
      // Check the receiver against the MemberName.clazz
      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
        // Did not load it above...
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
        Label L_ok;
        Register temp2_defc = temp2;
        __ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3);
        load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
        __ verify_klass_ptr(temp2_defc);
        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
        // If we get here, the type check failed!
        __ stop("receiver class disagrees with MemberName.clazz");
        __ BIND(L_ok);
      }
      BLOCK_COMMENT("} check_receiver");
    }
    if (iid == vmIntrinsics::_linkToSpecial ||
        iid == vmIntrinsics::_linkToStatic) {
      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
    }

    // Live registers at this point:
    //  member_reg - MemberName that was the trailing argument
    //  temp1_recv_klass - klass of stacked receiver, if needed
    //  O5_savedSP - interpreter linkage (if interpreted)
    //  O0..O5 - compiler arguments (if compiled)

    Label L_incompatible_class_change_error;
    switch (iid) {
    case vmIntrinsics::_linkToSpecial:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
      }
      __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
      break;

    case vmIntrinsics::_linkToStatic:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
      }
      __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
      break;

    case vmIntrinsics::_linkToVirtual:
    {
      // same as TemplateTable::invokevirtual,
      // minus the CP setup and profiling:

      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp2);
      }

      // pick out the vtable index from the MemberName, and then we can discard it:
      Register temp2_index = temp2;
      __ ld(temp2_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);

      if (VerifyMethodHandles) {
        Label L_index_ok;
        __ cmpdi(CCR1, temp2_index, 0);
        __ bge(CCR1, L_index_ok);
        __ stop("no virtual index");
        __ BIND(L_index_ok);
      }

      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.

      // get target Method* & entry point
      __ lookup_virtual_method(temp1_recv_klass, temp2_index, R19_method);
      break;
    }

    case vmIntrinsics::_linkToInterface:
    {
      // same as TemplateTable::invokeinterface
      // (minus the CP setup and profiling, with different argument motion)
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp2);
      }

      Register temp2_intf = temp2;
      __ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3);
      load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
      __ verify_klass_ptr(temp2_intf);

      Register vtable_index = R19_method;
      __ ld(vtable_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);
      if (VerifyMethodHandles) {
        Label L_index_ok;
        __ cmpdi(CCR1, vtable_index, 0);
        __ bge(CCR1, L_index_ok);
        __ stop("invalid vtable index for MH.invokeInterface");
        __ BIND(L_index_ok);
      }

      // given intf, index, and recv klass, dispatch to the implementation method
      __ lookup_interface_method(temp1_recv_klass, temp2_intf,
                                 // note: next two args must be the same:
                                 vtable_index, R19_method,
                                 temp3, temp4,
                                 L_incompatible_class_change_error);
      break;
    }

    default:
      fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
      break;
    }

    // Live at this point:
    //   R19_method
    //   O5_savedSP (if interpreted)

    // After figuring out which concrete method to call, jump into it.
    // Note that this works in the interpreter with no data motion.
    // But the compiled version will require that rcx_recv be shifted out.
    __ verify_method_ptr(R19_method);
    jump_from_method_handle(_masm, R19_method, temp1, temp2, for_compiler_entry);

    if (iid == vmIntrinsics::_linkToInterface) {
      __ BIND(L_incompatible_class_change_error);
      __ load_const_optimized(temp1, StubRoutines::throw_IncompatibleClassChangeError_entry());
      __ mtctr(temp1);
      __ bctr();
    }
  }
}
// Constructor, either file or network output
IdealGraphPrinter::IdealGraphPrinter() {

  // By default dump both ins and outs since dead or unreachable code
  // needs to appear in the graph.  There are also some special cases
  // in the mach where kill projections have no users but should
  // appear in the dump.
  _traverse_outs = true;
  _should_send_method = true;
  _output = NULL;
  buffer[0] = 0;
  _depth = 0;
  _current_method = NULL;
  assert(!_current_method, "current method must be initialized to NULL");
  _stream = NULL;

  if (PrintIdealGraphFile != NULL) {
    ThreadCritical tc;
    // User wants all output to go to files
    if (_file_count != 0) {
      ResourceMark rm;
      stringStream st;
      const char* dot = strrchr(PrintIdealGraphFile, '.');
      if (dot) {
        st.write(PrintIdealGraphFile, dot - PrintIdealGraphFile);
        st.print("%d%s", _file_count, dot);
      } else {
        st.print("%s%d", PrintIdealGraphFile, _file_count);
      }
      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(st.as_string());
      _output = stream;
    } else {
      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(PrintIdealGraphFile);
      _output = stream;
    }
    _file_count++;
  } else {
    _stream = new (ResourceObj::C_HEAP, mtCompiler) networkStream();

    // Try to connect to visualizer
    if (_stream->connect(PrintIdealGraphAddress, PrintIdealGraphPort)) {
      char c = 0;
      _stream->read(&c, 1);
      if (c != 'y') {
        tty->print_cr("Client available, but does not want to receive data!");
        _stream->close();
        delete _stream;
        _stream = NULL;
        return;
      }
      _output = _stream;
    } else {
      // It would be nice if we could shut down cleanly but it should
      // be an error if we can't connect to the visualizer.
      fatal(err_msg_res("Couldn't connect to visualizer at %s:" INTX_FORMAT,
                        PrintIdealGraphAddress, PrintIdealGraphPort));
    }
  }

  _xml = new (ResourceObj::C_HEAP, mtCompiler) xmlStream(_output);

  head(TOP_ELEMENT);
}