address NativeGeneralJump::jump_destination() const {  
  int op_code = char_at(0)&0xFF;
  bool is_long = (op_code == 0xE9 || op_code == 0x0F);
  int  offset  = (op_code == 0x0F)  ? 2 : 1;
  int  length  = offset + ((is_long) ? 4 : 1);
  
  if (is_long) 
    return addr_at(0) + length + long_at(offset);
  else
    return addr_at(0) + length + ((int)(char_at(offset)&0xFF));
}
Exemplo n.º 2
0
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
  BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
  // make sure the pointer points inside the frame
  assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
  assert((intptr_t*) result < fp(),  "monitor end should be strictly below the frame pointer");
  return result;
}
Exemplo n.º 3
0
// Similar to replace_mt_safe, but just changes the destination.  The
// important thing is that free-running threads are able to execute this
// call instruction at all times.  Thus, the displacement field must be
// instruction-word-aligned.
//
// Used in the runtime linkage of calls; see class CompiledIC.
void NativeCall::set_destination_mt_safe(address dest) {

  assert(Patching_lock->is_locked() ||
         SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 
  // Get the address of the bundle containing the call
  IPF_Bundle *bundle = (IPF_Bundle*)addr_at(0);

  // Generate the bits for a "chk.a.nc GR0, .+0", which always branches to self
  M22 check(4 | Assembler::keep, GR0, 0, PR0);

  // Loop until the change is accomplished
  while (true) {
    uint41_t new_X, new_L;

    // verify that this is a movl
    guarantee( Assembler::is_movl( bundle->get_template(), bundle->get_slot2() ), "not a movl instruction");
  
    // Save the old bundle, and make an image that is updated
    IPF_Bundle old_bundle = *bundle;
    IPF_Bundle mid_bundle = old_bundle;
    IPF_Bundle new_bundle = old_bundle;
  
    // Change the middle bundle so that the 0 slot instruction branchs to self
    mid_bundle.set_slot0( check.bits() );
  
    // Update the new image
    X2::set_imm((uint64_t)dest, new_bundle.get_slot2(), new_X, new_L);
    new_bundle.set_slot1( new_L );
    new_bundle.set_slot2( new_X );
  
    // Now the synchronous work begins: get the halves
    uint64_t old_half0 = old_bundle.get_half0();
  
    // Exchange the low order half, verify it was unchanged, and retry if it was different
    int64_t cur_half0 = atomic::compare_and_exchange_long(
        (jlong)mid_bundle.get_half0(), (jlong*)bundle->addr_half0(), (jlong)old_half0);

    if( cur_half0 == old_half0 ) {
  
      // Force a memory barrier
      atomic::membar();
  
      // Write the upper half with the changed bits
      bundle->set_half1(new_bundle.get_half1());
  
      // Force a memory barrier
      atomic::membar();
  
      // Write the lower half
      bundle->set_half0(new_bundle.get_half0());
  
      // Final memory barries
      atomic::membar();
  
      break;
    }
  }

  ICache::invalidate_range((address)bundle, sizeof(bundle));
}
Exemplo n.º 4
0
void NativeInstruction::set_jlong_at(int offset, jlong i) {
  address addr = addr_at(offset);
  *(jlong*)addr = i;
  // Don't need to invalidate 2 words here, because
  // the flush instruction operates on doublewords.
  ICache::invalidate_word(addr);
}
Exemplo n.º 5
0
void NativeFarCall::set_destination(address dest) {
  // Address materialized in the instruction stream, so nothing to do.
  return;
#if 0 // What we'd do if we really did want to change the destination
  if (destination() == dest) {
    return;
  }
  ResourceMark rm;
  CodeBuffer buf(addr_at(0), instruction_size + 1);
  MacroAssembler* _masm = new MacroAssembler(&buf);
  // Generate the new sequence
  AddressLiteral(dest);
  _masm->jumpl_to(dest, O7, O7);
  ICache::invalidate_range(addr_at(0), instruction_size );
#endif
}
Exemplo n.º 6
0
void NativeInstruction::verify() {
  // make sure code pattern is actually an instruction address
  address addr = addr_at(0);
  if (addr == 0 || ((intptr_t)addr & 3) != 0) {
    fatal("not an instruction address");
  }
}
Exemplo n.º 7
0
int NativeMovConstRegPatching::data() const {
#ifdef _LP64
  return data64(addr_at(sethi_offset), long_at(add_offset));
#else
  return data32(long_at(sethi_offset), long_at(add_offset));
#endif
}
Exemplo n.º 8
0
bool NativeInstruction::is_call() const {

  // Must start with a "movl" to load the address
  IPF_Bundle *bundle0 = (IPF_Bundle*)addr_at(0);
  if( !Assembler::is_movl( bundle0->get_template(), bundle0->get_slot2() ) )
    return false;

// More accurate test required later
#if 0
  IPF_Bundle *bundle1 = (IPF_Bundle*)addr_at(sizeof(IPF_Bundle));
  if( !Assembler::is_call_indirect( bundle0->get_template(), bundle0->get_slot2() ) )
    return false;
#endif

  return true;
}
Exemplo n.º 9
0
void NativeMovConstReg::set_data(int64_t src)  {
  verify();
  uint41_t new_X;
  uint41_t new_L;
  IPF_Bundle *bundle = (IPF_Bundle *)addr_at(0);
  X2::set_imm((uint64_t)src, bundle->get_slot2(), new_X, new_L);
  bundle->set_slot1( new_L );
  bundle->set_slot2( new_X );

  ICache::invalidate_range((address)bundle, sizeof(bundle));

  // also store the value into an oop_Relocation cell, if any
  CodeBlob* nm = CodeCache::find_blob(instruction_address());
  if (nm != NULL) {
    RelocIterator iter(nm, instruction_address(), next_instruction_address());
    oop* oop_addr = NULL;
    while (iter.next()) {
      if (iter.type() == relocInfo::oop_type) {
        oop_Relocation *r = iter.oop_reloc();
        if (oop_addr == NULL) {
          oop_addr = r->oop_addr();
          *oop_addr = (oop)src;
        } else {
          assert(oop_addr == r->oop_addr(), "must be only one set-oop here") ;
        }
      }
    }
  }
}
Exemplo n.º 10
0
bool NativeInstruction::is_movl() const {

  IPF_Bundle *bundle0 = (IPF_Bundle*)addr_at(0);
  if( !Assembler::is_movl( bundle0->get_template(), bundle0->get_slot2() ) )
    return false;

  return true;
}
Exemplo n.º 11
0
// The instruction at "bci", whose size is "ilen", is changing size by
// "delta".  Reallocate, move code, recalculate jumps, and enqueue
// change items as necessary.
bool Relocator::relocate_code(int bci, int ilen, int delta) {
  int next_bci = bci + ilen;
  if (delta > 0 && code_length() + delta > code_array_length())  {
    // Expand allocated code space, if necessary.
    if (!expand_code_array(delta)) {
          return false;
    }
  }

  // We require 4-byte alignment of code arrays.
  assert(((intptr_t)code_array() & 3) == 0, "check code alignment");
  // Change jumps before doing the copying; this routine requires aligned switches.
  change_jumps(bci, delta);

  // In case we have shrunken a tableswitch/lookupswitch statement, we store the last
  // bytes that get overwritten. We have to copy the bytes after the change_jumps method
  // has been called, since it is likly to update last offset in a tableswitch/lookupswitch
  if (delta < 0) {
    assert(delta>=-3, "we cannot overwrite more than 3 bytes");
    memcpy(_overwrite, addr_at(bci + ilen + delta), -delta);
  }

  memmove(addr_at(next_bci + delta), addr_at(next_bci), code_length() - next_bci);
  set_code_length(code_length() + delta);
  // Also adjust exception tables...
  adjust_exception_table(bci, delta);
  // Line number tables...
  adjust_line_no_table(bci, delta);
  // And local variable table...
  adjust_local_var_table(bci, delta);

  // Adjust stack maps
  adjust_stack_map_table(bci, delta);

  // Relocate the pending change stack...
  for (int j = 0; j < _changes->length(); j++) {
    ChangeItem* ci = _changes->at(j);
    ci->relocate(bci, delta);
  }

  // Notify any listeners about code relocation
  notify(bci, delta, code_length());

  return true;
}
Exemplo n.º 12
0
inline intptr_t*    frame::sender_sp()        const {
  // Hmm this seems awfully expensive QQQ, is this really called with interpreted frames?
  if (is_interpreted_frame()) {
    assert(false, "should never happen");
    return get_interpreterState()->sender_sp();
  } else {
    return            addr_at(sender_sp_offset);
  }
}
// handle lookup/table switch instructions.  Called be ChangeSwitchPad class
bool Relocator::handle_switch_pad(int bci, int old_pad, bool is_lookup_switch) {  
  int ilen = rc_instr_len(bci);
  int new_pad = align(bci+1) - (bci+1);
  int pad_delta = new_pad - old_pad;
  if (pad_delta != 0) {
    int len;    
    if (!is_lookup_switch) {    
      int low  = int_at(bci+1+old_pad+4);
      int high = int_at(bci+1+old_pad+8);
      len = high-low+1 + 3; // 3 for default, hi, lo.
    } else {      
      int npairs = int_at(bci+1+old_pad+4);
      len = npairs*2 + 2; // 2 for default, npairs.
    }
    // Because "relocateCode" does a "changeJumps" loop,
    // which parses instructions to determine their length,
    // we need to call that before messing with the current
    // instruction.  Since it may also overwrite the current
    // instruction when moving down, remember the possibly
    // overwritten part. 
    
    // Move the code following the instruction...
    if (!relocate_code(bci, ilen, pad_delta)) return false;
    
    if (pad_delta < 0) {
      // Move the shrunken instruction down.      
      memmove(addr_at(bci + 1 + new_pad),
              addr_at(bci + 1 + old_pad),
	      len * 4 + pad_delta);
      memmove(addr_at(bci + 1 + new_pad + len*4 + pad_delta),
	      _overwrite, -pad_delta);
    } else {
      assert(pad_delta > 0, "check");
      // Move the expanded instruction up.
      memmove(addr_at(bci +1 + new_pad),
	      addr_at(bci +1 + old_pad),
	      len * 4);	
    }
  }
  return true;
}
Exemplo n.º 14
0
void NativeInstruction::set_addr_at(int offset, address x) {
  address addr = addr_at(offset);
  assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
  *(uintptr_t*)addr = (uintptr_t)x;
  // Don't need to invalidate 2 words here in the 64-bit case,
  // because the flush instruction operates on doublewords.
  ICache::invalidate_word(addr);
  // The Intel code has this assertion for NativeCall::set_destination,
  // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
  // NativeJump::set_jump_destination, and NativePushImm32::set_data
  //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
}
Exemplo n.º 15
0
bool frame::oop_iterate_interpreted_float_frame(OopClosure* blk) {
  methodOop m =  methodOopDesc::methodOop_from_hcode(hp());
  // Return if this activation has no floats (the marker is conservative)
  if (!m->has_float_temporaries()) return false;
        
  // Iterator from stack pointer to end of float section
  oop* end = (oop*) addr_at(m->float_section_start_offset() - m->float_section_size());
  for (oop* p = sp(); p <= end; p++) {
    blk->do_oop(p);
  }

  // Skip the float section and magic_value

  // Iterate from just before the float section to the first temp
  for (oop* q = (oop*) addr_at(m->float_section_start_offset() + 2); q <= temp_addr(0); q++) {
    blk->do_oop(q);
  }

  // The receiver
  blk->do_oop(receiver_addr());

  return true;
}
Exemplo n.º 16
0
bool frame::follow_roots_interpreted_float_frame() {
  methodOop m = methodOop(hp());
  assert(m->is_method(), "must be method");
  // Return if this activation has no floats (the marker is conservative)
  if (!m->has_float_temporaries()) return false;

  // Iterator from stack pointer to end of float section
  oop* end = (oop*) addr_at(m->float_section_start_offset() - m->float_section_size());
  for (oop* p = sp(); p <= end; p++) {
    MarkSweep::follow_root(p);
  }

  // Skip the float section and magic_value

  // Iterate from just before the float section to the first temp
  for (oop* q = (oop*) addr_at(m->float_section_start_offset() + 2); q <= temp_addr(0); q++) {
    MarkSweep::follow_root(q);
  }

  // The receiver
  MarkSweep::follow_root(receiver_addr());

  return true;
}
Exemplo n.º 17
0
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
  // SP is the raw SP from the sender after adapter or interpreter
  // extension.
  intptr_t* sender_sp = this->sender_sp();

  // This is the sp before any possible extension (adapter/locals).
  intptr_t* unextended_sp = interpreter_frame_sender_sp();

#ifdef COMPILER2
  if (map->update_map()) {
    update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
  }
#endif // COMPILER2

  return frame(sender_sp, unextended_sp, link(), sender_pc());
}
Exemplo n.º 18
0
void NativeMovConstRegPatching::set_data(int x) {
#ifdef _LP64
  set_data64_sethi(addr_at(sethi_offset), x);
#else
  set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
#endif
  set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));

  // also store the value into an oop_Relocation cell, if any
  CodeBlob* cb = CodeCache::find_blob(instruction_address());
  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
  if (nm != NULL) {
    RelocIterator iter(nm, instruction_address(), next_instruction_address());
    oop* oop_addr = NULL;
    Metadata** metadata_addr = NULL;
    while (iter.next()) {
      if (iter.type() == relocInfo::oop_type) {
        oop_Relocation *r = iter.oop_reloc();
        if (oop_addr == NULL) {
          oop_addr = r->oop_addr();
          *oop_addr = cast_to_oop(x);
        } else {
          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
        }
      }
      if (iter.type() == relocInfo::metadata_type) {
        metadata_Relocation *r = iter.metadata_reloc();
        if (metadata_addr == NULL) {
          metadata_addr = r->metadata_addr();
          *metadata_addr = (Metadata*)x;
        } else {
          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
        }
      }
    }
  }
}
Exemplo n.º 19
0
BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
  return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
}
Exemplo n.º 20
0
void AbstractAssembler::flush() {
  ICache::invalidate_range(addr_at(0), offset());
}
Exemplo n.º 21
0
 // We use an illtrap for marking a method as not_entrant or zombie
 // iff !UseSIGTRAP.
 bool is_sigill_zombie_not_entrant() {
   assert(!UseSIGTRAP, "precondition");
   // Work around a C++ compiler bug which changes 'this'.
   return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
 }
Exemplo n.º 22
0
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
  return (intptr_t*)addr_at(interpreter_frame_mdx_offset);
}
Exemplo n.º 23
0
inline intptr_t* frame::interpreter_frame_last_sp() const {
  return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
}
Exemplo n.º 24
0
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
#define CPU_X86_VM_FRAME_X86_INLINE_HPP

#include "code/codeCache.hpp"

// Inline functions for Intel frames:

// Constructors:

inline frame::frame() {
  _pc = NULL;
  _sp = NULL;
  _unextended_sp = NULL;
  _fp = NULL;
  _cb = NULL;
  _deopt_state = unknown;
}

inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
  _sp = sp;
  _unextended_sp = sp;
  _fp = fp;
  _pc = pc;
  assert(pc != NULL, "no pc?");
  _cb = CodeCache::find_blob(pc);
  adjust_unextended_sp();

  address original_pc = nmethod::get_deopt_original_pc(this);
  if (original_pc != NULL) {
    _pc = original_pc;
    _deopt_state = is_deoptimized;
  } else {
    _deopt_state = not_deoptimized;
  }
}

inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
  _sp = sp;
  _unextended_sp = unextended_sp;
  _fp = fp;
  _pc = pc;
  assert(pc != NULL, "no pc?");
  _cb = CodeCache::find_blob(pc);
  adjust_unextended_sp();

  address original_pc = nmethod::get_deopt_original_pc(this);
  if (original_pc != NULL) {
    _pc = original_pc;
    assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
    _deopt_state = is_deoptimized;
  } else {
    _deopt_state = not_deoptimized;
  }
}

inline frame::frame(intptr_t* sp, intptr_t* fp) {
  _sp = sp;
  _unextended_sp = sp;
  _fp = fp;
  _pc = (address)(sp[-1]);

  // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
  // when last_Java_sp is non-null but the pc fetched is junk. If we are truly
  // unlucky the junk value could be to a zombied method and we'll die on the
  // find_blob call. This is also why we can have no asserts on the validity
  // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
  // -> pd_last_frame should use a specialized version of pd_last_frame which could
  // call a specilaized frame constructor instead of this one.
  // Then we could use the assert below. However this assert is of somewhat dubious
  // value.
  // assert(_pc != NULL, "no pc?");

  _cb = CodeCache::find_blob(_pc);
  adjust_unextended_sp();

  address original_pc = nmethod::get_deopt_original_pc(this);
  if (original_pc != NULL) {
    _pc = original_pc;
    _deopt_state = is_deoptimized;
  } else {
    _deopt_state = not_deoptimized;
  }
}

// Accessors

inline bool frame::equal(frame other) const {
  bool ret =  sp() == other.sp()
              && unextended_sp() == other.unextended_sp()
              && fp() == other.fp()
              && pc() == other.pc();
  assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
  return ret;
}

// Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. NULL represents an invalid (incomparable)
// frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); }

// Relationals on frames based
// Return true if the frame is younger (more recent activation) than the frame represented by id
inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                    return this->id() < id ; }

// Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                    return this->id() > id ; }



inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
inline void      frame::set_link(intptr_t* addr)  { *(intptr_t **)addr_at(link_offset) = addr; }


inline intptr_t* frame::unextended_sp() const     { return _unextended_sp; }

// Return address:

inline address* frame::sender_pc_addr()      const { return (address*) addr_at( return_addr_offset); }
inline address  frame::sender_pc()           const { return *sender_pc_addr(); }

// return address of param, zero origin index.
inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); }

#ifdef CC_INTERP

inline interpreterState frame::get_interpreterState() const {
  return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
}

inline intptr_t*    frame::sender_sp()        const {
  // Hmm this seems awfully expensive QQQ, is this really called with interpreted frames?
  if (is_interpreted_frame()) {
    assert(false, "should never happen");
    return get_interpreterState()->sender_sp();
  } else {
    return            addr_at(sender_sp_offset);
  }
}

inline intptr_t** frame::interpreter_frame_locals_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return &(get_interpreterState()->_locals);
}

inline intptr_t* frame::interpreter_frame_bcx_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return (intptr_t*) &(get_interpreterState()->_bcp);
}


// Constant pool cache

inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return &(get_interpreterState()->_constants);
}

// Method

inline Method** frame::interpreter_frame_method_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return &(get_interpreterState()->_method);
}

inline intptr_t* frame::interpreter_frame_mdx_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return (intptr_t*) &(get_interpreterState()->_mdx);
}

// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
  assert(is_interpreted_frame(), "wrong frame type");
  return get_interpreterState()->_stack + 1;
}

#else /* asm interpreter */
inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
Exemplo n.º 25
0
void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
  *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
}
Exemplo n.º 26
0
inline interpreterState frame::get_interpreterState() const {
  return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
}
Exemplo n.º 27
0
// Used by template based interpreter deoptimization
void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
    *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
}
Exemplo n.º 28
0
inline address* frame::sender_pc_addr()      const { return (address*) addr_at( return_addr_offset); }
Exemplo n.º 29
0
bool frame::safe_for_sender(JavaThread *thread) {
  address   sp = (address)_sp;
  address   fp = (address)_fp;
  address   unextended_sp = (address)_unextended_sp;

  // consider stack guards when trying to determine "safe" stack pointers
  static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
  size_t usable_stack_size = thread->stack_size() - stack_guard_size;

  // sp must be within the usable part of the stack (not in guards)
  bool sp_safe = (sp < thread->stack_base()) &&
                 (sp >= thread->stack_base() - usable_stack_size);


  if (!sp_safe) {
    return false;
  }

  // unextended sp must be within the stack and above or equal sp
  bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
                            (unextended_sp >= sp);

  if (!unextended_sp_safe) {
    return false;
  }

  // an fp must be within the stack and above (but not equal) sp
  // second evaluation on fp+ is added to handle situation where fp is -1
  bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));

  // We know sp/unextended_sp are safe only fp is questionable here

  // If the current frame is known to the code cache then we can attempt to
  // to construct the sender and do some validation of it. This goes a long way
  // toward eliminating issues when we get in frame construction code

  if (_cb != NULL ) {

    // First check if frame is complete and tester is reliable
    // Unfortunately we can only check frame complete for runtime stubs and nmethod
    // other generic buffer blobs are more problematic so we just assume they are
    // ok. adapter blobs never have a frame complete and are never ok.

    if (!_cb->is_frame_complete_at(_pc)) {
      if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
        return false;
      }
    }

    // Could just be some random pointer within the codeBlob
    if (!_cb->code_contains(_pc)) {
      return false;
    }

    // Entry frame checks
    if (is_entry_frame()) {
      // an entry frame must have a valid fp.

      if (!fp_safe) return false;
      // Validate the JavaCallWrapper an entry frame must have

      address jcw = (address)entry_frame_call_wrapper();
      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
      return jcw_safe;
    }

    intptr_t* sender_sp = NULL;
    address   sender_pc = NULL;

    if (is_interpreted_frame()) {
      // fp must be safe
      if (!fp_safe) {
        return false;
      }

      sender_pc = (address) this->fp()[return_addr_offset];
      sender_sp = (intptr_t*) addr_at(sender_sp_offset);

    } else {
      // must be some sort of compiled/runtime frame
      // fp does not have to be safe (although it could be check for c1?)

      // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
      if (_cb->frame_size() <= 0) {
        return false;
      }

      sender_sp = _unextended_sp + _cb->frame_size();
      // On Intel the return_address is always the word on the stack
      sender_pc = (address) *(sender_sp-1);
    }


    // If the potential sender is the interpreter then we can do some more checking
    if (Interpreter::contains(sender_pc)) {

      // ebp is always saved in a recognizable place in any code we generate. However
      // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp
      // is really a frame pointer.

      intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);

      if (!saved_fp_safe) {
        return false;
      }

      // construct the potential sender

      frame sender(sender_sp, saved_fp, sender_pc);

      return sender.is_interpreted_frame_valid(thread);

    }

    // We must always be able to find a recognizable pc
    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
    if (sender_pc == NULL ||  sender_blob == NULL) {
      return false;
    }

    // Could be a zombie method
    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
      return false;
    }

    // Could just be some random pointer within the codeBlob
    if (!sender_blob->code_contains(sender_pc)) {
      return false;
    }

    // We should never be able to see an adapter if the current frame is something from code cache
    if (sender_blob->is_adapter_blob()) {
      return false;
    }

    // Could be the call_stub
    if (StubRoutines::returns_to_call_stub(sender_pc)) {
      intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);

      if (!saved_fp_safe) {
        return false;
      }

      // construct the potential sender

      frame sender(sender_sp, saved_fp, sender_pc);

      // Validate the JavaCallWrapper an entry frame must have
      address jcw = (address)sender.entry_frame_call_wrapper();

      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());

      return jcw_safe;
    }

    if (sender_blob->is_nmethod()) {
      nmethod* nm = sender_blob->as_nmethod_or_null();
      if (nm != NULL) {
        if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
          return false;
        }
      }
    }

    // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
    // because the return address counts against the callee's frame.

    if (sender_blob->frame_size() <= 0) {
      assert(!sender_blob->is_nmethod(), "should count return address at least");
      return false;
    }

    // We should never be able to see anything here except an nmethod. If something in the
    // code cache (current frame) is called by an entity within the code cache that entity
    // should not be anything but the call stub (already covered), the interpreter (already covered)
    // or an nmethod.

    if (!sender_blob->is_nmethod()) {
        return false;
    }

    // Could put some more validation for the potential non-interpreted sender
    // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...

    // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb

    // We've validated the potential sender that would be created
    return true;
  }

  // Must be native-compiled frame. Since sender will try and use fp to find
  // linkages it must be safe

  if (!fp_safe) {
    return false;
  }

  // Will the pc we fetch be non-zero (which we'll find at the oldest frame)

  if ( (address) this->fp()[return_addr_offset] == NULL) return false;


  // could try and do some more potential verification of native frame if we could think of some...

  return true;

}
Exemplo n.º 30
0
// return address of param, zero origin index.
inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); }