示例#1
0
// Initialize the MethodData* corresponding to a given method.
MethodData::MethodData(methodHandle method, int size, TRAPS) {
  No_Safepoint_Verifier no_safepoint;  // init function atomic wrt GC
  ResourceMark rm;
  // Set the method back-pointer.
  _method = method();

  if (TieredCompilation) {
    _invocation_counter.init();
    _backedge_counter.init();
    _invocation_counter_start = 0;
    _backedge_counter_start = 0;
    _num_loops = 0;
    _num_blocks = 0;
    _highest_comp_level = 0;
    _highest_osr_comp_level = 0;
    _would_profile = true;
  }
  set_creation_mileage(mileage_of(method()));

  // Initialize flags and trap history.
  _nof_decompiles = 0;
  _nof_overflow_recompiles = 0;
  _nof_overflow_traps = 0;
  assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
  Copy::zero_to_words((HeapWord*) &_trap_hist,
                      sizeof(_trap_hist) / sizeof(HeapWord));

  // Go through the bytecodes and allocate and initialize the
  // corresponding data cells.
  int data_size = 0;
  int empty_bc_count = 0;  // number of bytecodes lacking data
  BytecodeStream stream(method);
  Bytecodes::Code c;
  while ((c = stream.next()) >= 0) {
    int size_in_bytes = initialize_data(&stream, data_size);
    data_size += size_in_bytes;
    if (size_in_bytes == 0)  empty_bc_count += 1;
  }
  _data_size = data_size;
  int object_size = in_bytes(data_offset()) + data_size;

  // Add some extra DataLayout cells (at least one) to track stray traps.
  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
  int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);

  // Add a cell to record information about modified arguments.
  // Set up _args_modified array after traps cells so that
  // the code for traps cells works.
  DataLayout *dp = data_layout_at(data_size + extra_size);

  int arg_size = method->size_of_parameters();
  dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);

  object_size += extra_size + DataLayout::compute_size_in_bytes(arg_size+1);

  // Set an initial hint. Don't use set_hint_di() because
  // first_di() may be out of bounds if data_size is 0.
  // In that situation, _hint_di is never used, but at
  // least well-defined.
  _hint_di = first_di();

  post_initialize(&stream);

  set_size(object_size);
}
// Print an event.
void SimpleThresholdPolicy::print_event(EventType type, methodHandle mh, methodHandle imh,
                                        int bci, CompLevel level) {
  bool inlinee_event = mh() != imh();

  ttyLocker tty_lock;
  tty->print("%lf: [", os::elapsedTime());

  switch(type) {
  case CALL:
    tty->print("call");
    break;
  case LOOP:
    tty->print("loop");
    break;
  case COMPILE:
    tty->print("compile");
    break;
  case REMOVE_FROM_QUEUE:
    tty->print("remove-from-queue");
    break;
  case UPDATE_IN_QUEUE:
    tty->print("update-in-queue");
    break;
  case REPROFILE:
    tty->print("reprofile");
    break;
  case MAKE_NOT_ENTRANT:
    tty->print("make-not-entrant");
    break;
  default:
    tty->print("unknown");
  }

  tty->print(" level=%d ", level);

  ResourceMark rm;
  char *method_name = mh->name_and_sig_as_C_string();
  tty->print("[%s", method_name);
  if (inlinee_event) {
    char *inlinee_name = imh->name_and_sig_as_C_string();
    tty->print(" [%s]] ", inlinee_name);
  }
  else tty->print("] ");
  tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
                                      CompileBroker::queue_size(CompLevel_full_optimization));

  print_specific(type, mh, imh, bci, level);

  if (type != COMPILE) {
    print_counters("", mh);
    if (inlinee_event) {
      print_counters("inlinee ", imh);
    }
    tty->print(" compilable=");
    bool need_comma = false;
    if (!mh->is_not_compilable(CompLevel_full_profile)) {
      tty->print("c1");
      need_comma = true;
    }
    if (!mh->is_not_osr_compilable(CompLevel_full_profile)) {
      if (need_comma) tty->print(",");
      tty->print("c1-osr");
      need_comma = true;
    }
    if (!mh->is_not_compilable(CompLevel_full_optimization)) {
      if (need_comma) tty->print(",");
      tty->print("c2");
      need_comma = true;
    }
    if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) {
      if (need_comma) tty->print(",");
      tty->print("c2-osr");
    }
    tty->print(" status=");
    if (mh->queued_for_compilation()) {
      tty->print("in-queue");
    } else tty->print("idle");
  }
  tty->print_cr("]");
}
示例#3
0
 // Construction
 BaseBytecodeStream(methodHandle method) : _method(method) {
   set_interval(0, _method->code_size());
   _is_raw = false;
 }
CompilationScope::CompilationScope(methodHandle method, Scope* caller, int callerBCI) : Scope(method, caller, callerBCI) {
  assert(!method->is_abstract(), "cannot generate code for abstract method"); 
  assert(!method->is_native(), "no scopes for native methods");
}
// Tell the broker to compile the method
void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
  int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
}
void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& cpool,
                                                      Bytecodes::Code invoke_code,
                                                      const CallInfo &call_info) {
  // NOTE: This CPCE can be the subject of data races.
  // There are three words to update: flags, refs[f2], f1 (in that order).
  // Writers must store all other values before f1.
  // Readers must test f1 first for non-null before reading other fields.
  // Competing writers must acquire exclusive access via a lock.
  // A losing writer waits on the lock until the winner writes f1 and leaves
  // the lock, so that when the losing writer returns, he can use the linked
  // cache entry.

  objArrayHandle resolved_references = cpool->resolved_references();
  // Use the resolved_references() lock for this cpCache entry.
  // resolved_references are created for all classes with Invokedynamic, MethodHandle
  // or MethodType constant pool cache entries.
  assert(resolved_references() != NULL,
         "a resolved_references array should have been created for this class");
  ObjectLocker ol(resolved_references, Thread::current());
  if (!is_f1_null()) {
    return;
  }

  const methodHandle adapter = call_info.resolved_method();
  const Handle appendix      = call_info.resolved_appendix();
  const Handle method_type   = call_info.resolved_method_type();
  const bool has_appendix    = appendix.not_null();
  const bool has_method_type = method_type.not_null();

  // Write the flags.
  set_method_flags(as_TosState(adapter->result_type()),
                   ((has_appendix    ? 1 : 0) << has_appendix_shift   ) |
                   ((has_method_type ? 1 : 0) << has_method_type_shift) |
                   (                   1      << is_final_shift       ),
                   adapter->size_of_parameters());

  if (TraceInvokeDynamic) {
    ttyLocker ttyl;
    tty->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method_type=" PTR_FORMAT "%s method=" PTR_FORMAT " ",
                  invoke_code,
                  p2i(appendix()),    (has_appendix    ? "" : " (unused)"),
                  p2i(method_type()), (has_method_type ? "" : " (unused)"),
                  p2i(adapter()));
    adapter->print();
    if (has_appendix)  appendix()->print();
  }

  // Method handle invokes and invokedynamic sites use both cp cache words.
  // refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
  // In the general case, this could be the call site's MethodType,
  // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
  // f1 contains the adapter method which manages the actual call.
  // In the general case, this is a compiled LambdaForm.
  // (The Java code is free to optimize these calls by binding other
  // sorts of methods and appendices to call sites.)
  // JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
  // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
  // Even with the appendix, the method will never take more than 255 parameter slots.
  //
  // This means that given a call site like (List)mh.invoke("foo"),
  // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
  // not '(Ljava/lang/String;)Ljava/util/List;'.
  // The fact that String and List are involved is encoded in the MethodType in refs[f2].
  // This allows us to create fewer Methods, while keeping type safety.
  //

  // Store appendix, if any.
  if (has_appendix) {
    const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
    assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
    assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
    resolved_references->obj_at_put(appendix_index, appendix());
  }

  // Store MethodType, if any.
  if (has_method_type) {
    const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;
    assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");
    assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");
    resolved_references->obj_at_put(method_type_index, method_type());
  }

  release_set_f1(adapter());  // This must be the last one to set (see NOTE above)!

  // The interpreter assembly code does not check byte_2,
  // but it is used by is_resolved, method_if_resolved, etc.
  set_bytecode_1(invoke_code);
  NOT_PRODUCT(verify(tty));
  if (TraceInvokeDynamic) {
    ttyLocker ttyl;
    this->print(tty, 0);
  }
}
示例#7
0
AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) {
  // Abstract method?
  if (m->is_abstract()) return abstract;

  // Method handle primitive?
  if (m->is_method_handle_intrinsic()) {
    vmIntrinsics::ID id = m->intrinsic_id();
    assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic");
    MethodKind kind = (MethodKind)( method_handle_invoke_FIRST +
                                    ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) );
    assert(kind <= method_handle_invoke_LAST, "parallel enum ranges");
    return kind;
  }

#ifndef CC_INTERP
  if (UseCRC32Intrinsics && m->is_native()) {
    // Use optimized stub code for CRC32 native methods.
    switch (m->intrinsic_id()) {
      case vmIntrinsics::_updateCRC32            : return java_util_zip_CRC32_update;
      case vmIntrinsics::_updateBytesCRC32       : return java_util_zip_CRC32_updateBytes;
      case vmIntrinsics::_updateByteBufferCRC32  : return java_util_zip_CRC32_updateByteBuffer;
    }
  }
  if (UseCRC32CIntrinsics) {
    // Use optimized stub code for CRC32C methods.
    switch (m->intrinsic_id()) {
      case vmIntrinsics::_updateBytesCRC32C             : return java_util_zip_CRC32C_updateBytes;
      case vmIntrinsics::_updateDirectByteBufferCRC32C  : return java_util_zip_CRC32C_updateDirectByteBuffer;
    }
  }

  switch(m->intrinsic_id()) {
  case vmIntrinsics::_intBitsToFloat:      return java_lang_Float_intBitsToFloat;
  case vmIntrinsics::_floatToRawIntBits:   return java_lang_Float_floatToRawIntBits;
  case vmIntrinsics::_longBitsToDouble:    return java_lang_Double_longBitsToDouble;
  case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits;
  }

#endif // CC_INTERP

  // Native method?
  // Note: This test must come _before_ the test for intrinsic
  //       methods. See also comments below.
  if (m->is_native()) {
    assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out");
    return m->is_synchronized() ? native_synchronized : native;
  }

  // Synchronized?
  if (m->is_synchronized()) {
    return zerolocals_synchronized;
  }

  if (RegisterFinalizersAtInit && m->code_size() == 1 &&
      m->intrinsic_id() == vmIntrinsics::_Object_init) {
    // We need to execute the special return bytecode to check for
    // finalizer registration so create a normal frame.
    return zerolocals;
  }

  // Empty method?
  if (m->is_empty_method()) {
    return empty;
  }

  // Special intrinsic method?
  // Note: This test must come _after_ the test for native methods,
  //       otherwise we will run into problems with JDK 1.2, see also
  //       InterpreterGenerator::generate_method_entry() for
  //       for details.
  switch (m->intrinsic_id()) {
    case vmIntrinsics::_dsin  : return java_lang_math_sin  ;
    case vmIntrinsics::_dcos  : return java_lang_math_cos  ;
    case vmIntrinsics::_dtan  : return java_lang_math_tan  ;
    case vmIntrinsics::_dabs  : return java_lang_math_abs  ;
    case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
    case vmIntrinsics::_dlog  : return java_lang_math_log  ;
    case vmIntrinsics::_dlog10: return java_lang_math_log10;
    case vmIntrinsics::_dpow  : return java_lang_math_pow  ;
    case vmIntrinsics::_dexp  : return java_lang_math_exp  ;

    case vmIntrinsics::_Reference_get:
                                return java_lang_ref_reference_get;
  }

  // Accessor method?
  if (m->is_getter()) {
    // TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters.
    // See CppInterpreter::accessor_entry in cppInterpreter_zero.cpp. This should be fixed in Zero,
    // then the call above updated to ::is_accessor
    assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
    return accessor;
  }

  // Note: for now: zero locals for all non-empty methods
  return zerolocals;
}
void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
  // either the method is a miranda or its holder should accept the given index
  assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
  // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
  set_direct_or_vtable_call(invoke_code, method, index);
}
void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
                                                       methodHandle method,
                                                       int vtable_index) {
  bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
  assert(method->interpreter_entry() != NULL, "should have been set at this point");
  assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");

  int byte_no = -1;
  bool change_to_virtual = false;

  switch (invoke_code) {
    case Bytecodes::_invokeinterface:
      // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
      // instruction somehow links to a non-interface method (in Object).
      // In that case, the method has no itable index and must be invoked as a virtual.
      // Set a flag to keep track of this corner case.
      change_to_virtual = true;

      // ...and fall through as if we were handling invokevirtual:
    case Bytecodes::_invokevirtual:
      {
        if (!is_vtable_call) {
          assert(method->can_be_statically_bound(), "");
          // set_f2_as_vfinal_method checks if is_vfinal flag is true.
          set_method_flags(as_TosState(method->result_type()),
                           (                             1      << is_vfinal_shift) |
                           ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
                           ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
                           method()->size_of_parameters());
          set_f2_as_vfinal_method(method());
        } else {
          assert(!method->can_be_statically_bound(), "");
          assert(vtable_index >= 0, "valid index");
          assert(!method->is_final_method(), "sanity");
          set_method_flags(as_TosState(method->result_type()),
                           ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
                           method()->size_of_parameters());
          set_f2(vtable_index);
        }
        byte_no = 2;
        break;
      }

    case Bytecodes::_invokespecial:
    case Bytecodes::_invokestatic:
      assert(!is_vtable_call, "");
      // Note:  Read and preserve the value of the is_vfinal flag on any
      // invokevirtual bytecode shared with this constant pool cache entry.
      // It is cheap and safe to consult is_vfinal() at all times.
      // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
      set_method_flags(as_TosState(method->result_type()),
                       ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
                       ((method->is_final_method() ? 1 : 0) << is_final_shift),
                       method()->size_of_parameters());
      set_f1(method());
      byte_no = 1;
      break;
    default:
      ShouldNotReachHere();
      break;
  }

  // Note:  byte_no also appears in TemplateTable::resolve.
  if (byte_no == 1) {
    assert(invoke_code != Bytecodes::_invokevirtual &&
           invoke_code != Bytecodes::_invokeinterface, "");
    set_bytecode_1(invoke_code);
  } else if (byte_no == 2)  {
    if (change_to_virtual) {
      assert(invoke_code == Bytecodes::_invokeinterface, "");
      // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
      //
      // Workaround for the case where we encounter an invokeinterface, but we
      // should really have an _invokevirtual since the resolved method is a
      // virtual method in java.lang.Object. This is a corner case in the spec
      // but is presumably legal. javac does not generate this code.
      //
      // We set bytecode_1() to _invokeinterface, because that is the
      // bytecode # used by the interpreter to see if it is resolved.
      // We set bytecode_2() to _invokevirtual.
      // See also interpreterRuntime.cpp. (8/25/2000)
      // Only set resolved for the invokeinterface case if method is public.
      // Otherwise, the method needs to be reresolved with caller for each
      // interface call.
      if (method->is_public()) set_bytecode_1(invoke_code);
    } else {
      assert(invoke_code == Bytecodes::_invokevirtual, "");
    }
    // set up for invokevirtual, even if linking for invokeinterface also:
    set_bytecode_2(Bytecodes::_invokevirtual);
  } else {
    ShouldNotReachHere();
  }
  NOT_PRODUCT(verify(tty));
}
// is_optimized: Compiler has generated an optimized call (i.e., no inline
// cache) static_bound: The call can be static bound (i.e, no need to use
// inline cache)
void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
                                           KlassHandle receiver_klass,
                                           bool is_optimized,
                                           bool static_bound,
                                           CompiledICInfo& info,
                                           TRAPS) {
  nmethod* method_code = method->code();
  address entry = NULL;
  if (method_code != NULL && method_code->is_in_use()) {
    // Call to compiled code
    if (static_bound || is_optimized) {
      entry      = method_code->verified_entry_point();
    } else {
      entry      = method_code->entry_point();
    }
  }
  if (entry != NULL) {
    // Call to compiled code
    info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
  } else {
    // Note: the following problem exists with Compiler1:
    //   - at compile time we may or may not know if the destination is final
    //   - if we know that the destination is final, we will emit an optimized
    //     virtual call (no inline cache), and need a Method* to make a call
    //     to the interpreter
    //   - if we do not know if the destination is final, we emit a standard
    //     virtual call, and use CompiledICHolder to call interpreted code
    //     (no static call stub has been generated)
    //     However in that case we will now notice it is static_bound
    //     and convert the call into what looks to be an optimized
    //     virtual call. This causes problems in verifying the IC because
    //     it look vanilla but is optimized. Code in is_call_to_interpreted
    //     is aware of this and weakens its asserts.

    // static_bound should imply is_optimized -- otherwise we have a
    // performance bug (statically-bindable method is called via
    // dynamically-dispatched call note: the reverse implication isn't
    // necessarily true -- the call may have been optimized based on compiler
    // analysis (static_bound is only based on "final" etc.)
#ifdef COMPILER2
#ifdef TIERED
#if defined(ASSERT)
    // can't check the assert because we don't have the CompiledIC with which to
    // find the address if the call instruction.
    //
    // CodeBlob* cb = find_blob_unsafe(instruction_address());
    // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
#endif // ASSERT
#else
    assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
#endif // TIERED
#endif // COMPILER2
    if (is_optimized) {
      // Use stub entry
      info.set_interpreter_entry(method()->get_c2i_entry(), method());
    } else {
      // Use icholder entry
      CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
      info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
    }
  }
  assert(info.is_optimized() == is_optimized, "must agree");
}
bool Compiler::is_intrinsic_supported(const methodHandle& method) {
  vmIntrinsics::ID id = method->intrinsic_id();
  assert(id != vmIntrinsics::_none, "must be a VM intrinsic");

  if (method->is_synchronized()) {
    // C1 does not support intrinsification of synchronized methods.
    return false;
  }

  switch (id) {
  case vmIntrinsics::_compareAndSwapLong:
    if (!VM_Version::supports_cx8()) return false;
    break;
  case vmIntrinsics::_getAndAddInt:
    if (!VM_Version::supports_atomic_getadd4()) return false;
    break;
  case vmIntrinsics::_getAndAddLong:
    if (!VM_Version::supports_atomic_getadd8()) return false;
    break;
  case vmIntrinsics::_getAndSetInt:
    if (!VM_Version::supports_atomic_getset4()) return false;
    break;
  case vmIntrinsics::_getAndSetLong:
    if (!VM_Version::supports_atomic_getset8()) return false;
    break;
  case vmIntrinsics::_getAndSetObject:
#ifdef _LP64
    if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false;
    if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false;
#else
    if (!VM_Version::supports_atomic_getset4()) return false;
#endif
    break;
  case vmIntrinsics::_arraycopy:
  case vmIntrinsics::_currentTimeMillis:
  case vmIntrinsics::_nanoTime:
  case vmIntrinsics::_Reference_get:
    // Use the intrinsic version of Reference.get() so that the value in
    // the referent field can be registered by the G1 pre-barrier code.
    // Also to prevent commoning reads from this field across safepoint
    // since GC can change its value.
  case vmIntrinsics::_loadFence:
  case vmIntrinsics::_storeFence:
  case vmIntrinsics::_fullFence:
  case vmIntrinsics::_floatToRawIntBits:
  case vmIntrinsics::_intBitsToFloat:
  case vmIntrinsics::_doubleToRawLongBits:
  case vmIntrinsics::_longBitsToDouble:
  case vmIntrinsics::_getClass:
  case vmIntrinsics::_isInstance:
  case vmIntrinsics::_currentThread:
  case vmIntrinsics::_dabs:
  case vmIntrinsics::_dsqrt:
  case vmIntrinsics::_dsin:
  case vmIntrinsics::_dcos:
  case vmIntrinsics::_dtan:
  case vmIntrinsics::_dlog:
  case vmIntrinsics::_dlog10:
  case vmIntrinsics::_dexp:
  case vmIntrinsics::_dpow:
  case vmIntrinsics::_getObject:
  case vmIntrinsics::_getBoolean:
  case vmIntrinsics::_getByte:
  case vmIntrinsics::_getShort:
  case vmIntrinsics::_getChar:
  case vmIntrinsics::_getInt:
  case vmIntrinsics::_getLong:
  case vmIntrinsics::_getFloat:
  case vmIntrinsics::_getDouble:
  case vmIntrinsics::_putObject:
  case vmIntrinsics::_putBoolean:
  case vmIntrinsics::_putByte:
  case vmIntrinsics::_putShort:
  case vmIntrinsics::_putChar:
  case vmIntrinsics::_putInt:
  case vmIntrinsics::_putLong:
  case vmIntrinsics::_putFloat:
  case vmIntrinsics::_putDouble:
  case vmIntrinsics::_getObjectVolatile:
  case vmIntrinsics::_getBooleanVolatile:
  case vmIntrinsics::_getByteVolatile:
  case vmIntrinsics::_getShortVolatile:
  case vmIntrinsics::_getCharVolatile:
  case vmIntrinsics::_getIntVolatile:
  case vmIntrinsics::_getLongVolatile:
  case vmIntrinsics::_getFloatVolatile:
  case vmIntrinsics::_getDoubleVolatile:
  case vmIntrinsics::_putObjectVolatile:
  case vmIntrinsics::_putBooleanVolatile:
  case vmIntrinsics::_putByteVolatile:
  case vmIntrinsics::_putShortVolatile:
  case vmIntrinsics::_putCharVolatile:
  case vmIntrinsics::_putIntVolatile:
  case vmIntrinsics::_putLongVolatile:
  case vmIntrinsics::_putFloatVolatile:
  case vmIntrinsics::_putDoubleVolatile:
  case vmIntrinsics::_getByte_raw:
  case vmIntrinsics::_getShort_raw:
  case vmIntrinsics::_getChar_raw:
  case vmIntrinsics::_getInt_raw:
  case vmIntrinsics::_getLong_raw:
  case vmIntrinsics::_getFloat_raw:
  case vmIntrinsics::_getDouble_raw:
  case vmIntrinsics::_putByte_raw:
  case vmIntrinsics::_putShort_raw:
  case vmIntrinsics::_putChar_raw:
  case vmIntrinsics::_putInt_raw:
  case vmIntrinsics::_putLong_raw:
  case vmIntrinsics::_putFloat_raw:
  case vmIntrinsics::_putDouble_raw:
  case vmIntrinsics::_putOrderedObject:
  case vmIntrinsics::_putOrderedInt:
  case vmIntrinsics::_putOrderedLong:
  case vmIntrinsics::_getShortUnaligned:
  case vmIntrinsics::_getCharUnaligned:
  case vmIntrinsics::_getIntUnaligned:
  case vmIntrinsics::_getLongUnaligned:
  case vmIntrinsics::_putShortUnaligned:
  case vmIntrinsics::_putCharUnaligned:
  case vmIntrinsics::_putIntUnaligned:
  case vmIntrinsics::_putLongUnaligned:
  case vmIntrinsics::_checkIndex:
  case vmIntrinsics::_updateCRC32:
  case vmIntrinsics::_updateBytesCRC32:
  case vmIntrinsics::_updateByteBufferCRC32:
  case vmIntrinsics::_compareAndSwapInt:
  case vmIntrinsics::_compareAndSwapObject:
  case vmIntrinsics::_getCharStringU:
  case vmIntrinsics::_putCharStringU:
#ifdef TRACE_HAVE_INTRINSICS
  case vmIntrinsics::_counterTime:
#endif
    break;
  default:
    return false; // Intrinsics not on the previous list are not available.
  }

  return true;
}
示例#12
0
// Fill StackFrameInfo with declaringClass and bci and initialize memberName
void StackWalk::fill_stackframe(Handle stackFrame, const methodHandle& method, int bci) {
  java_lang_StackFrameInfo::set_declaringClass(stackFrame(), method->method_holder()->java_mirror());
  java_lang_StackFrameInfo::set_method_and_bci(stackFrame(), method, bci);
}