virtual void print_inlining_late(const char* msg) {
   CallNode* call = call_node();
   Compile* C = Compile::current();
   C->print_inlining_assert_ready();
   C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
   C->print_inlining_move_to(this);
   C->print_inlining_update_delayed(this);
 }
 virtual void print_inlining_late(const char* msg) {
   CallNode* call = call_node();
   Compile* C = Compile::current();
   C->print_inlining_insert(this);
   C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 }
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
  GraphKit kit(jvms);
  PhaseGVN& gvn = kit.gvn();
  Compile* C = kit.C;
  vmIntrinsics::ID iid = callee->intrinsic_id();
  input_not_const = true;
  switch (iid) {
  case vmIntrinsics::_invokeBasic:
    {
      // Get MethodHandle receiver:
      Node* receiver = kit.argument(0);
      if (receiver->Opcode() == Op_ConP) {
        input_not_const = false;
        const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
        ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
        const int vtable_index = Method::invalid_vtable_index;
        CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
        return cg;
      } else {
        const char* msg = "receiver not constant";
        if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
        C->log_inline_failure(msg);
      }
    }
    break;

  case vmIntrinsics::_linkToVirtual:
  case vmIntrinsics::_linkToStatic:
  case vmIntrinsics::_linkToSpecial:
  case vmIntrinsics::_linkToInterface:
    {
      // Get MemberName argument:
      Node* member_name = kit.argument(callee->arg_size() - 1);
      if (member_name->Opcode() == Op_ConP) {
        input_not_const = false;
        const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
        ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();

        // In lambda forms we erase signature types to avoid resolving issues
        // involving class loaders.  When we optimize a method handle invoke
        // to a direct call we must cast the receiver and arguments to its
        // actual types.
        ciSignature* signature = target->signature();
        const int receiver_skip = target->is_static() ? 0 : 1;
        // Cast receiver to its type.
        if (!target->is_static()) {
          Node* arg = kit.argument(0);
          const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
          const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
          if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
            Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
            kit.set_argument(0, cast_obj);
          }
        }
        // Cast reference arguments to its type.
        for (int i = 0; i < signature->count(); i++) {
          ciType* t = signature->type_at(i);
          if (t->is_klass()) {
            Node* arg = kit.argument(receiver_skip + i);
            const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
            const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
            if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
              Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
              kit.set_argument(receiver_skip + i, cast_obj);
            }
          }
        }

        // Try to get the most accurate receiver type
        const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
        const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
        int  vtable_index       = Method::invalid_vtable_index;
        bool call_does_dispatch = false;

        ciKlass* speculative_receiver_type = NULL;
        if (is_virtual_or_interface) {
          ciInstanceKlass* klass = target->holder();
          Node*             receiver_node = kit.argument(0);
          const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
          // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
          // optimize_virtual_call() takes 2 different holder
          // arguments for a corner case that doesn't apply here (see
          // Parse::do_call())
          target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
                                            target, receiver_type, is_virtual,
                                            call_does_dispatch, vtable_index, // out-parameters
                                            /*check_access=*/false);
          // We lack profiling at this call but type speculation may
          // provide us with a type
          speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
        }
        CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, /*allow_inline=*/true, PROB_ALWAYS, speculative_receiver_type, true, true);
        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
        return cg;
      } else {
        const char* msg = "member_name not constant";
        if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
        C->log_inline_failure(msg);
      }
    }
    break;

  default:
    fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
    break;
  }
  return NULL;
}