Пример #1
0
 // add a new CP cache entry beyond the normal cache for the special case of
 // invokespecial with InterfaceMethodref as cpool operand.
 int add_invokespecial_cp_cache_entry(int cp_index) {
   assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration");
   // Don't add InterfaceMethodref if it already exists at the end.
   for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) {
    if (cp_cache_entry_pool_index(i) == cp_index) {
      return i;
    }
   }
   int cache_index = _cp_cache_map.append(cp_index);
   assert(cache_index >= _first_iteration_cp_cache_limit, "");
   // do not update _cp_map, since the mapping is one-to-many
   assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
   return cache_index;
 }
Пример #2
0
// Creates a constant pool cache given a CPC map
void Rewriter::make_constant_pool_cache(TRAPS) {
  const int length = _cp_cache_map.length();
  constantPoolCacheOop cache =
      oopFactory::new_constantPoolCache(length, CHECK);
  No_Safepoint_Verifier nsv;
  cache->initialize(_cp_cache_map);

  // Don't bother with the next pass if there is no JVM_CONSTANT_InvokeDynamic.
  if (_have_invoke_dynamic) {
    for (int i = 0; i < length; i++) {
      int pool_index = cp_cache_entry_pool_index(i);
      if (pool_index >= 0 &&
          _pool->tag_at(pool_index).is_invoke_dynamic()) {
        int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index);
        assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant");
        // There is a CP cache entry holding the BSM for these calls.
        int bsm_cache_index = cp_entry_to_cp_cache(bsm_index);
        cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index);
      }
    }
  }

  _pool->set_cache(cache);
  cache->set_constant_pool(_pool());
}
Пример #3
0
void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
  address p = bcp + offset;
  assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
  if (!reverse) {
    int cp_index = Bytes::get_Java_u2(p);
    int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
    int cpc2 = add_secondary_cp_cache_entry(cpc);

    // Replace the trailing four bytes with a CPC index for the dynamic
    // call site.  Unlike other CPC entries, there is one per bytecode,
    // not just one per distinct CP entry.  In other words, the
    // CPC-to-CP relation is many-to-one for invokedynamic entries.
    // This means we must use a larger index size than u2 to address
    // all these entries.  That is the main reason invokedynamic
    // must have a five-byte instruction format.  (Of course, other JVM
    // implementations can use the bytes for other purposes.)
    Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
    // Note: We use native_u4 format exclusively for 4-byte indexes.
  } else {
    int cache_index = constantPoolCacheOopDesc::decode_secondary_index(
                        Bytes::get_native_u4(p));
    int secondary_index = cp_cache_secondary_entry_main_index(cache_index);
    int pool_index = cp_cache_entry_pool_index(secondary_index);
    assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index");
    // zero out 4 bytes
    Bytes::put_Java_u4(p, 0);
    Bytes::put_Java_u2(p, pool_index);
  }
}
Пример #4
0
 int add_cp_cache_entry(int cp_index) {
   assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
   assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration");
   int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map);
   assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
   assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
   return cache_index;
 }
Пример #5
0
// Rewrite a classfile-order CP index into a native-order CPC index.
void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
  address p = bcp + offset;
  if (!reverse) {
    int  cp_index    = Bytes::get_Java_u2(p);
    int  cache_index = cp_entry_to_cp_cache(cp_index);
    Bytes::put_native_u2(p, cache_index);
  } else {
    int cache_index = Bytes::get_native_u2(p);
    int pool_index = cp_cache_entry_pool_index(cache_index);
    Bytes::put_Java_u2(p, pool_index);
  }
}
// Rewrite a classfile-order CP index into a native-order CPC index.
void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
  address p = bcp + offset;
  if (!reverse) {
    int  cp_index    = Bytes::get_Java_u2(p);
    int  cache_index = cp_entry_to_cp_cache(cp_index);
    Bytes::put_native_u2(p, cache_index);
    if (!_method_handle_invokers.is_empty())
      maybe_rewrite_invokehandle(p - 1, cp_index, reverse);
  } else {
    int cache_index = Bytes::get_native_u2(p);
    int pool_index = cp_cache_entry_pool_index(cache_index);
    Bytes::put_Java_u2(p, pool_index);
    if (!_method_handle_invokers.is_empty())
      maybe_rewrite_invokehandle(p - 1, pool_index, reverse);
  }
}
Пример #7
0
// Rewrite some ldc bytecodes to _fast_aldc
void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
                                 bool reverse) {
  if (!reverse) {
    assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
    address p = bcp + offset;
    int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
    constantTag tag = _pool->tag_at(cp_index).value();
    if (tag.is_method_handle() || tag.is_method_type()) {
      int cache_index = cp_entry_to_cp_cache(cp_index);
      if (is_wide) {
        (*bcp) = Bytecodes::_fast_aldc_w;
        assert(cache_index == (u2)cache_index, "index overflow");
        Bytes::put_native_u2(p, cache_index);
      } else {
        (*bcp) = Bytecodes::_fast_aldc;
        assert(cache_index == (u1)cache_index, "index overflow");
        (*p) = (u1)cache_index;
      }
    }
  } else {
    Bytecodes::Code rewritten_bc =
              (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
    if ((*bcp) == rewritten_bc) {
      address p = bcp + offset;
      int cache_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p);
      int pool_index = cp_cache_entry_pool_index(cache_index);
      if (is_wide) {
        (*bcp) = Bytecodes::_ldc_w;
        assert(pool_index == (u2)pool_index, "index overflow");
        Bytes::put_Java_u2(p, pool_index);
      } else {
        (*bcp) = Bytecodes::_ldc;
        assert(pool_index == (u1)pool_index, "index overflow");
        (*p) = (u1)pool_index;
      }
    }
  }
}