const juint   max_juint   = (juint)-1;   // 0xFFFFFFFF largest juint
const julong  max_julong  = (julong)-1;  // 0xFF....FF largest julong

typedef jbyte  s1;
typedef jshort s2;
typedef jint   s4;
typedef jlong  s8;

//----------------------------------------------------------------------------------------------------
// JVM spec restrictions

const int max_method_code_size = 64*K - 1;  // JVM spec, 2nd ed. section 4.8.1 (p.134)

// Default ProtectionDomainCacheSize values

const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017);

//----------------------------------------------------------------------------------------------------
// Default and minimum StringTableSize values

const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013);
const int minimumStringTableSize = 1009;

const int defaultSymbolTableSize = 20011;
const int minimumSymbolTableSize = 1009;


//----------------------------------------------------------------------------------------------------
// HotSwap - for JVMTI   aka Class File Replacement and PopFrame
//
// Determines whether on-the-fly class replacement and frame popping are enabled.
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
  if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
  else {
    const int slop = 2*BytesPerInstWord; // sethi;add  (needed for long offsets)
    if (is_vtable_stub) {
      // ld;ld;ld,jmp,nop
      const int basic = 5*BytesPerInstWord +
                        // shift;add for load_klass (only shift with zero heap based)
                        (UseCompressedClassPointers ?
                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
      return basic + slop;
    } else {
      const int basic = (48 LP64_ONLY(+ 6)) * BytesPerInstWord +
                        // shift;add for load_klass (only shift with zero heap based)
                        (UseCompressedClassPointers ?
                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
      return (basic + slop);
    }
  }

  // In order to tune these parameters, run the JVM with VM options
  // +PrintMiscellaneous and +WizardMode to see information about
  // actual itable stubs.  Look for lines like this:
  //   itable #1 at 0x5551212[116] left over: 8
  // Reduce the constants so that the "left over" number is 8
  // Do not aim at a left-over number of zero, because a very
  // large vtable or itable offset (> 4K) will require an extra
  // sethi/or pair of instructions.
  //
  // The JVM98 app. _202_jess has a megamorphic interface call.
  // The itable code looks like this:
  // Decoding VtableStub itbl[1]@16
  //   ld  [ %o0 + 4 ], %g3
  //   save  %sp, -64, %sp
  //   ld  [ %g3 + 0xe8 ], %l2
  //   sll  %l2, 2, %l2
  //   add  %l2, 0x134, %l2
  //   and  %l2, -8, %l2        ! NOT_LP64 only
  //   add  %g3, %l2, %l2
  //   add  %g3, 4, %g3
  //   ld  [ %l2 ], %l5
  //   brz,pn   %l5, throw_icce
  //   cmp  %l5, %g5
  //   be  %icc, success
  //   add  %l2, 8, %l2
  // loop:
  //   ld  [ %l2 ], %l5
  //   brz,pn   %l5, throw_icce
  //   cmp  %l5, %g5
  //   bne,pn   %icc, loop
  //   add  %l2, 8, %l2
  // success:
  //   ld  [ %l2 + -4 ], %l2
  //   ld  [ %g3 + %l2 ], %l5
  //   restore  %l5, 0, %g5
  //   ld  [ %g5 + 0x44 ], %g3
  //   jmp  %g3
  //   nop
  // throw_icce:
  //   sethi  %hi(throw_ICCE_entry), %g3
  //   ! 5 more instructions here, LP64_ONLY
  //   jmp  %g3 + %lo(throw_ICCE_entry)
  //   restore
}
Ejemplo n.º 3
0
int CompiledStaticCall::to_interp_stub_size() {
  return NOT_LP64(10)    // movl; jmp
         LP64_ONLY(15);  // movq (1+1+8); jmp (1+4)
}
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

// Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles.

// Adapters
enum /* platform_dependent_constants */ {
  adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
};

// Additional helper methods for MethodHandles code generation:
public:
  static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);

  static void verify_klass(MacroAssembler* _masm,
                           Register obj, KlassHandle klass,
                           const char* error_message = "wrong klass") NOT_DEBUG_RETURN;

  static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
    verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
                 "reference is a MH");
  }
Ejemplo n.º 5
0
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

// Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles.

  // Adapters
  enum /* platform_dependent_constants */ {
    adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000))
  };

  // Additional helper methods for MethodHandles code generation:
 public:
  static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);

  static void verify_klass(MacroAssembler* _masm,
                           Register obj_reg, SystemDictionary::WKID klass_id,
                           Register temp_reg, Register temp2_reg,
                           const char* error_message = "wrong klass") NOT_DEBUG_RETURN;

  static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
                                   Register temp_reg, Register temp2_reg) {
    verify_klass(_masm, mh_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MethodHandle),
                 temp_reg, temp2_reg,
Ejemplo n.º 6
0
void FrameMap::init() {
  if (_init_done) return;

  assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
  map_register(0, rsi);  rsi_opr = LIR_OprFact::single_cpu(0);
  map_register(1, rdi);  rdi_opr = LIR_OprFact::single_cpu(1);
  map_register(2, rbx);  rbx_opr = LIR_OprFact::single_cpu(2);
  map_register(3, rax);  rax_opr = LIR_OprFact::single_cpu(3);
  map_register(4, rdx);  rdx_opr = LIR_OprFact::single_cpu(4);
  map_register(5, rcx);  rcx_opr = LIR_OprFact::single_cpu(5);

#ifndef _LP64
  // The unallocatable registers are at the end
  map_register(6, rsp);
  map_register(7, rbp);
#else
  map_register( 6, r8);    r8_opr = LIR_OprFact::single_cpu(6);
  map_register( 7, r9);    r9_opr = LIR_OprFact::single_cpu(7);
  map_register( 8, r11);  r11_opr = LIR_OprFact::single_cpu(8);
  map_register( 9, r12);  r12_opr = LIR_OprFact::single_cpu(9);
  map_register(10, r13);  r13_opr = LIR_OprFact::single_cpu(10);
  map_register(11, r14);  r14_opr = LIR_OprFact::single_cpu(11);
  // The unallocatable registers are at the end
  map_register(12, r10);  r10_opr = LIR_OprFact::single_cpu(12);
  map_register(13, r15);  r15_opr = LIR_OprFact::single_cpu(13);
  map_register(14, rsp);
  map_register(15, rbp);
#endif // _LP64

#ifdef _LP64
  long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/);
  long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/);
#else
  long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/);
  long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/);
#endif // _LP64
  fpu0_float_opr   = LIR_OprFact::single_fpu(0);
  fpu0_double_opr  = LIR_OprFact::double_fpu(0);
  xmm0_float_opr   = LIR_OprFact::single_xmm(0);
  xmm0_double_opr  = LIR_OprFact::double_xmm(0);

  _caller_save_cpu_regs[0] = rsi_opr;
  _caller_save_cpu_regs[1] = rdi_opr;
  _caller_save_cpu_regs[2] = rbx_opr;
  _caller_save_cpu_regs[3] = rax_opr;
  _caller_save_cpu_regs[4] = rdx_opr;
  _caller_save_cpu_regs[5] = rcx_opr;

#ifdef _LP64
  _caller_save_cpu_regs[6]  = r8_opr;
  _caller_save_cpu_regs[7]  = r9_opr;
  _caller_save_cpu_regs[8]  = r11_opr;
  _caller_save_cpu_regs[9]  = r12_opr;
  _caller_save_cpu_regs[10] = r13_opr;
  _caller_save_cpu_regs[11] = r14_opr;
#endif // _LP64


  _xmm_regs[0] = xmm0;
  _xmm_regs[1] = xmm1;
  _xmm_regs[2] = xmm2;
  _xmm_regs[3] = xmm3;
  _xmm_regs[4] = xmm4;
  _xmm_regs[5] = xmm5;
  _xmm_regs[6] = xmm6;
  _xmm_regs[7] = xmm7;

#ifdef _LP64
  _xmm_regs[8]   = xmm8;
  _xmm_regs[9]   = xmm9;
  _xmm_regs[10]  = xmm10;
  _xmm_regs[11]  = xmm11;
  _xmm_regs[12]  = xmm12;
  _xmm_regs[13]  = xmm13;
  _xmm_regs[14]  = xmm14;
  _xmm_regs[15]  = xmm15;
#endif // _LP64

  for (int i = 0; i < 8; i++) {
    _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
  }

  for (int i = 0; i < nof_caller_save_xmm_regs ; i++) {
    _caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i);
  }

  _init_done = true;

  rsi_oop_opr = as_oop_opr(rsi);
  rdi_oop_opr = as_oop_opr(rdi);
  rbx_oop_opr = as_oop_opr(rbx);
  rax_oop_opr = as_oop_opr(rax);
  rdx_oop_opr = as_oop_opr(rdx);
  rcx_oop_opr = as_oop_opr(rcx);

  rsp_opr = as_pointer_opr(rsp);
  rbp_opr = as_pointer_opr(rbp);

#ifdef _LP64
  r8_oop_opr = as_oop_opr(r8);
  r9_oop_opr = as_oop_opr(r9);
  r11_oop_opr = as_oop_opr(r11);
  r12_oop_opr = as_oop_opr(r12);
  r13_oop_opr = as_oop_opr(r13);
  r14_oop_opr = as_oop_opr(r14);
#endif // _LP64

  VMRegPair regs;
  BasicType sig_bt = T_OBJECT;
  SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true);
  receiver_opr = as_oop_opr(regs.first()->as_Register());

}
Ejemplo n.º 7
0
  #define UNALLOCATED 4    // rsp, rbp, r15, r10
#else
  #define UNALLOCATED 2    // rsp, rbp
#endif // LP64

  pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - UNALLOCATED,  // number of registers killed by calls
  pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map,  // number of registers killed by calls
  pd_nof_caller_save_xmm_regs_frame_map = pd_nof_xmm_regs_frame_map,  // number of registers killed by calls

  pd_nof_cpu_regs_reg_alloc = pd_nof_caller_save_cpu_regs_frame_map,  // number of registers that are visible to register allocator
  pd_nof_fpu_regs_reg_alloc = 6,  // number of registers that are visible to register allocator

  pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan
  pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan
  pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan
  pd_first_cpu_reg = 0,
  pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
  pd_first_byte_reg = 2,
  pd_last_byte_reg = 5,
  pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
  pd_last_fpu_reg =  pd_first_fpu_reg + 7,
  pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
  pd_last_xmm_reg =  pd_first_xmm_reg + pd_nof_xmm_regs_frame_map - 1
};


// encoding of float value in debug info:
enum {
  pd_float_saved_as_double = true
};
void LIR_Assembler::check_codespace() {
  CodeSection* cs = _masm->code_section();
  if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
    BAILOUT("CodeBuffer overflow");
  }
}
Ejemplo n.º 9
0
 const llvm::IntegerType* intptr_type() const {
   return LP64_ONLY(jlong_type()) NOT_LP64(jint_type());
 }