Пример #1
0
// Check all the formats of native implementation name to see if there is one
// for the specified method.
address NativeLookup::lookup_entry(methodHandle method, bool& in_base_library, TRAPS) {
  address entry = NULL;
  in_base_library = false;

  // Compute pure name
  char* pure_name = pure_jni_name(method);

  //计算指定本地方法的参数数量
  int args_size = 1                             // JNIEnv
                + (method->is_static() ? 1 : 0) // class for static methods
                + method->size_of_parameters(); // actual parameters


  // 1) Try JNI short style
  entry = lookup_style(method, pure_name, "",        args_size, true,  in_base_library, CHECK_NULL);
  if (entry != NULL) return entry;

  // Compute long name
  char* long_name = long_jni_name(method);

  // 2) Try JNI long style
  entry = lookup_style(method, pure_name, long_name, args_size, true,  in_base_library, CHECK_NULL);
  if (entry != NULL) return entry;

  // 3) Try JNI short style without os prefix/suffix
  entry = lookup_style(method, pure_name, "",        args_size, false, in_base_library, CHECK_NULL);
  if (entry != NULL) return entry;

  // 4) Try JNI long style without os prefix/suffix
  entry = lookup_style(method, pure_name, long_name, args_size, false, in_base_library, CHECK_NULL);

  return entry; // NULL indicates not found
}
Пример #2
0
AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) {
  // Abstract method?
  if (m->is_abstract()) return abstract;

  // Invoker for method handles?
  if (m->is_method_handle_invoke())  return method_handle;

  // Native method?
  // Note: This test must come _before_ the test for intrinsic
  //       methods. See also comments below.
  if (m->is_native()) {
    assert(!m->is_method_handle_invoke(), "overlapping bits here, watch out");
    return m->is_synchronized() ? native_synchronized : native;
  }

  // Synchronized?
  if (m->is_synchronized()) {
    return zerolocals_synchronized;
  }

  if (RegisterFinalizersAtInit && m->code_size() == 1 &&
      m->intrinsic_id() == vmIntrinsics::_Object_init) {
    // We need to execute the special return bytecode to check for
    // finalizer registration so create a normal frame.
    return zerolocals;
  }

  // Empty method?
  if (m->is_empty_method()) {
    return empty;
  }

  // Special intrinsic method?
  // Note: This test must come _after_ the test for native methods,
  //       otherwise we will run into problems with JDK 1.2, see also
  //       AbstractInterpreterGenerator::generate_method_entry() for
  //       for details.
  switch (m->intrinsic_id()) {
    case vmIntrinsics::_dsin  : return java_lang_math_sin  ;
    case vmIntrinsics::_dcos  : return java_lang_math_cos  ;
    case vmIntrinsics::_dtan  : return java_lang_math_tan  ;
    case vmIntrinsics::_dabs  : return java_lang_math_abs  ;
    case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
    case vmIntrinsics::_dlog  : return java_lang_math_log  ;
    case vmIntrinsics::_dlog10: return java_lang_math_log10;

    case vmIntrinsics::_Reference_get:
                                return java_lang_ref_reference_get;
  }

  // Accessor method?
  if (m->is_accessor()) {
    assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
    return accessor;
  }

  // Note: for now: zero locals for all non-empty methods
  return zerolocals;
}
Пример #3
0
// Initialize the methodDataOop corresponding to a given method.
void methodDataOopDesc::initialize(methodHandle method) {
  ResourceMark rm;

  // Set the method back-pointer.
  _method = method();
  set_creation_mileage(mileage_of(method()));

  // Initialize flags and trap history.
  _nof_decompiles = 0;
  _nof_overflow_recompiles = 0;
  _nof_overflow_traps = 0;
  assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
  Copy::zero_to_words((HeapWord*) &_trap_hist,
                      sizeof(_trap_hist) / sizeof(HeapWord));

  // Go through the bytecodes and allocate and initialize the
  // corresponding data cells.
  int data_size = 0;
  int empty_bc_count = 0;  // number of bytecodes lacking data
  BytecodeStream stream(method);
  Bytecodes::Code c;
  while ((c = stream.next()) >= 0) {
    int size_in_bytes = initialize_data(&stream, data_size);
    data_size += size_in_bytes;
    if (size_in_bytes == 0)  empty_bc_count += 1;
  }
  _data_size = data_size;
  int object_size = in_bytes(data_offset()) + data_size;

  // Add some extra DataLayout cells (at least one) to track stray traps.
  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
  int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);

  // Add a cell to record information about modified arguments.
  // Set up _args_modified array after traps cells so that
  // the code for traps cells works.
  DataLayout *dp = data_layout_at(data_size + extra_size);

  int arg_size = method->size_of_parameters();
  dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);

  object_size += extra_size + DataLayout::compute_size_in_bytes(arg_size+1);

  // Set an initial hint. Don't use set_hint_di() because
  // first_di() may be out of bounds if data_size is 0.
  // In that situation, _hint_di is never used, but at
  // least well-defined.
  _hint_di = first_di();

  post_initialize(&stream);

  set_object_is_parsable(object_size);
}
Пример #4
0
// Check all the formats of native implementation name to see if there is one
// for the specified method.
address NativeLookup::lookup_critical_entry(methodHandle method) {
  if (!CriticalJNINatives) return NULL;

  if (method->is_synchronized() ||
      !method->is_static()) {
    // Only static non-synchronized methods are allowed
    return NULL;
  }

  ResourceMark rm;
  address entry = NULL;

  Symbol* signature = method->signature();
  for (int end = 0; end < signature->utf8_length(); end++) {
    if (signature->byte_at(end) == 'L') {
      // Don't allow object types
      return NULL;
    }
  }

  // Compute critical name
  char* critical_name = critical_jni_name(method);

  // Compute argument size
  int args_size = 1                             // JNIEnv
                + (method->is_static() ? 1 : 0) // class for static methods
                + method->size_of_parameters(); // actual parameters


  // 1) Try JNI short style
  entry = lookup_critical_style(method, critical_name, "",        args_size, true);
  if (entry != NULL) return entry;

  // Compute long name
  char* long_name = long_jni_name(method);

  // 2) Try JNI long style
  entry = lookup_critical_style(method, critical_name, long_name, args_size, true);
  if (entry != NULL) return entry;

  // 3) Try JNI short style without os prefix/suffix
  entry = lookup_critical_style(method, critical_name, "",        args_size, false);
  if (entry != NULL) return entry;

  // 4) Try JNI long style without os prefix/suffix
  entry = lookup_critical_style(method, critical_name, long_name, args_size, false);

  return entry; // NULL indicates not found
}
Пример #5
0
// Compute the size of the methodDataOop necessary to store
// profiling information about a given method.  Size is in bytes.
int methodDataOopDesc::compute_allocation_size_in_bytes(methodHandle method) {
  int data_size = 0;
  BytecodeStream stream(method);
  Bytecodes::Code c;
  int empty_bc_count = 0;  // number of bytecodes lacking data
  while ((c = stream.next()) >= 0) {
    int size_in_bytes = compute_data_size(&stream);
    data_size += size_in_bytes;
    if (size_in_bytes == 0)  empty_bc_count += 1;
  }
  int object_size = in_bytes(data_offset()) + data_size;

  // Add some extra DataLayout cells (at least one) to track stray traps.
  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
  object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);

  // Add a cell to record information about modified arguments.
  int arg_size = method->size_of_parameters();
  object_size += DataLayout::compute_size_in_bytes(arg_size+1);
  return object_size;
}
Пример #6
0
  uint64_t fingerprint() {
    // See if we fingerprinted this method already
    if (mh->constMethod()->fingerprint() != CONST64(0)) {
      return mh->constMethod()->fingerprint();
    }

    if (mh->size_of_parameters() > max_size_of_parameters ) {
      _fingerprint = UCONST64(-1);
      mh->constMethod()->set_fingerprint(_fingerprint);
      return _fingerprint;
    }

    assert( (int)mh->result_type() <= (int)result_feature_mask, "bad result type");
    _fingerprint = mh->result_type();
    _fingerprint <<= static_feature_size;
    if (mh->is_static())  _fingerprint |= 1;
    _shift_count = result_feature_size + static_feature_size;
    iterate_parameters();
    _fingerprint |= ((uint64_t)done_parm) << _shift_count;// mark end of sig
    mh->constMethod()->set_fingerprint(_fingerprint);
    return _fingerprint;
  }
Пример #7
0
void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
                                                      Bytecodes::Code invoke_code,
                                                      const CallInfo &call_info) {
  // NOTE: This CPCE can be the subject of data races.
  // There are three words to update: flags, refs[f2], f1 (in that order).
  // Writers must store all other values before f1.
  // Readers must test f1 first for non-null before reading other fields.
  // Competing writers must acquire exclusive access via a lock.
  // A losing writer waits on the lock until the winner writes f1 and leaves
  // the lock, so that when the losing writer returns, he can use the linked
  // cache entry.

  MonitorLockerEx ml(cpool->lock());
  if (!is_f1_null()) {
    return;
  }

  const methodHandle adapter = call_info.resolved_method();
  const Handle appendix      = call_info.resolved_appendix();
  const Handle method_type   = call_info.resolved_method_type();
  const bool has_appendix    = appendix.not_null();
  const bool has_method_type = method_type.not_null();

  // Write the flags.
  set_method_flags(as_TosState(adapter->result_type()),
                   ((has_appendix    ? 1 : 0) << has_appendix_shift   ) |
                   ((has_method_type ? 1 : 0) << has_method_type_shift) |
                   (                   1      << is_final_shift       ),
                   adapter->size_of_parameters());

  if (TraceInvokeDynamic) {
    tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
                  invoke_code,
                  (void *)appendix(),    (has_appendix    ? "" : " (unused)"),
                  (void *)method_type(), (has_method_type ? "" : " (unused)"),
                  (intptr_t)adapter());
    adapter->print();
    if (has_appendix)  appendix()->print();
  }

  // Method handle invokes and invokedynamic sites use both cp cache words.
  // refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
  // In the general case, this could be the call site's MethodType,
  // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
  // f1 contains the adapter method which manages the actual call.
  // In the general case, this is a compiled LambdaForm.
  // (The Java code is free to optimize these calls by binding other
  // sorts of methods and appendices to call sites.)
  // JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
  // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
  // Even with the appendix, the method will never take more than 255 parameter slots.
  //
  // This means that given a call site like (List)mh.invoke("foo"),
  // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
  // not '(Ljava/lang/String;)Ljava/util/List;'.
  // The fact that String and List are involved is encoded in the MethodType in refs[f2].
  // This allows us to create fewer method oops, while keeping type safety.
  //

  objArrayHandle resolved_references = cpool->resolved_references();
  // Store appendix, if any.
  if (has_appendix) {
    const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
    assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
    assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
    resolved_references->obj_at_put(appendix_index, appendix());
  }

  // Store MethodType, if any.
  if (has_method_type) {
    const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;
    assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");
    assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");
    resolved_references->obj_at_put(method_type_index, method_type());
  }

  release_set_f1(adapter());  // This must be the last one to set (see NOTE above)!

  // The interpreter assembly code does not check byte_2,
  // but it is used by is_resolved, method_if_resolved, etc.
  set_bytecode_1(invoke_code);
  NOT_PRODUCT(verify(tty));
  if (TraceInvokeDynamic) {
    this->print(tty, 0);
  }
}
Пример #8
0
void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
                                                      Bytecodes::Code invoke_code,
                                                      methodHandle adapter,
                                                      Handle appendix, Handle method_type) {
  // NOTE: This CPCE can be the subject of data races.
  // There are three words to update: flags, f2, f1 (in that order).
  // Writers must store all other values before f1.
  // Readers must test f1 first for non-null before reading other fields.
  // Competing writers must acquire exclusive access via a lock.
  // A losing writer waits on the lock until the winner writes f1 and leaves
  // the lock, so that when the losing writer returns, he can use the linked
  // cache entry.

  Thread* THREAD = Thread::current();
  ObjectLocker ol(cpool, THREAD);
  if (!is_f1_null()) {
    return;
  }

  const bool has_appendix    = appendix.not_null();
  const bool has_method_type = method_type.not_null();

  if (!has_appendix) {
    // The extra argument is not used, but we need a non-null value to signify linkage state.
    // Set it to something benign that will never leak memory.
    appendix = Universe::void_mirror();
  }

  // Write the flags.
  set_method_flags(as_TosState(adapter->result_type()),
                   ((has_appendix    ? 1 : 0) << has_appendix_shift)    |
                   ((has_method_type ? 1 : 0) << has_method_type_shift) |
                   (                   1      << is_vfinal_shift)       |
                   (                   1      << is_final_shift),
                   adapter->size_of_parameters());

  if (TraceInvokeDynamic) {
    tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
                  invoke_code,
                  (intptr_t)appendix(),    (has_appendix    ? "" : " (unused)"),
                  (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
                  (intptr_t)adapter());
    adapter->print();
    if (has_appendix)  appendix()->print();
  }

  // Method handle invokes and invokedynamic sites use both cp cache words.
  // f1, if not null, contains a value passed as a trailing argument to the adapter.
  // In the general case, this could be the call site's MethodType,
  // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
  // f2 contains the adapter method which manages the actual call.
  // In the general case, this is a compiled LambdaForm.
  // (The Java code is free to optimize these calls by binding other
  // sorts of methods and appendices to call sites.)
  // JVM-level linking is via f2, as if for invokevfinal, and signatures are erased.
  // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
  // In principle this means that the method (with appendix) could take up to 256 parameter slots.
  //
  // This means that given a call site like (List)mh.invoke("foo"),
  // the f2 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
  // not '(Ljava/lang/String;)Ljava/util/List;'.
  // The fact that String and List are involved is encoded in the MethodType in f1.
  // This allows us to create fewer method oops, while keeping type safety.
  //

  set_f2_as_vfinal_method(adapter());

  // Store MethodType, if any.
  if (has_method_type) {
    ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(this);

    // Write the flags.
    e2->set_method_flags(as_TosState(adapter->result_type()),
                     ((has_method_type ? 1 : 0) << has_method_type_shift) |
                     (                   1      << is_vfinal_shift)       |
                     (                   1      << is_final_shift),
                     adapter->size_of_parameters());
    e2->release_set_f1(method_type());
  }

  assert(appendix.not_null(), "needed for linkage state");
  release_set_f1(appendix());  // This must be the last one to set (see NOTE above)!

  if (!is_secondary_entry()) {
    // The interpreter assembly code does not check byte_2,
    // but it is used by is_resolved, method_if_resolved, etc.
    set_bytecode_2(invoke_code);
  }

  NOT_PRODUCT(verify(tty));
  if (TraceInvokeDynamic) {
    this->print(tty, 0);
  }
}
Пример #9
0
AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) {
  // Abstract method?
  if (m->is_abstract()) return abstract;

  // Method handle primitive?
  if (m->is_method_handle_intrinsic()) {
    vmIntrinsics::ID id = m->intrinsic_id();
    assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic");
    MethodKind kind = (MethodKind)( method_handle_invoke_FIRST +
                                    ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) );
    assert(kind <= method_handle_invoke_LAST, "parallel enum ranges");
    return kind;
  }

#ifndef CC_INTERP
  if (UseCRC32Intrinsics && m->is_native()) {
    // Use optimized stub code for CRC32 native methods.
    switch (m->intrinsic_id()) {
      case vmIntrinsics::_updateCRC32            : return java_util_zip_CRC32_update;
      case vmIntrinsics::_updateBytesCRC32       : return java_util_zip_CRC32_updateBytes;
      case vmIntrinsics::_updateByteBufferCRC32  : return java_util_zip_CRC32_updateByteBuffer;
    }
  }
#endif

  // Native method?
  // Note: This test must come _before_ the test for intrinsic
  //       methods. See also comments below.
  if (m->is_native()) {
    assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out");
    return m->is_synchronized() ? native_synchronized : native;
  }

  // Synchronized?
  if (m->is_synchronized()) {
    return zerolocals_synchronized;
  }

  if (RegisterFinalizersAtInit && m->code_size() == 1 &&
      m->intrinsic_id() == vmIntrinsics::_Object_init) {
    // We need to execute the special return bytecode to check for
    // finalizer registration so create a normal frame.
    return zerolocals;
  }

  // Empty method?
  if (m->is_empty_method()) {
    return empty;
  }

  // Special intrinsic method?
  // Note: This test must come _after_ the test for native methods,
  //       otherwise we will run into problems with JDK 1.2, see also
  //       InterpreterGenerator::generate_method_entry() for
  //       for details.
  switch (m->intrinsic_id()) {
    case vmIntrinsics::_dsin  : return java_lang_math_sin  ;
    case vmIntrinsics::_dcos  : return java_lang_math_cos  ;
    case vmIntrinsics::_dtan  : return java_lang_math_tan  ;
    case vmIntrinsics::_dabs  : return java_lang_math_abs  ;
    case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
    case vmIntrinsics::_dlog  : return java_lang_math_log  ;
    case vmIntrinsics::_dlog10: return java_lang_math_log10;
    case vmIntrinsics::_dpow  : return java_lang_math_pow  ;
    case vmIntrinsics::_dexp  : return java_lang_math_exp  ;

    case vmIntrinsics::_Reference_get:
                                return java_lang_ref_reference_get;
  }

  // Accessor method?
  if (m->is_accessor()) {
    assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
    return accessor;
  }

  // Note: for now: zero locals for all non-empty methods
  return zerolocals;
}