inline void Devirtualizer<true>::do_oop(OopClosureType* closure, T* p) {
  debug_only(closure->verify(p));
  closure->do_oop_nv(p);
}
示例#2
0
 // Add an exception, using the given JVM state, without commoning.
 void push_exception_state(SafePointNode* ex_map) {
     debug_only(verify_exception_state(ex_map));
     ex_map->set_next_exception(_exceptions);
     _exceptions = ex_map;
 }
示例#3
0
//-------------------------------debug_end-------------------------------------
uint JVMState::debug_end() const {
    debug_only(JVMState* jvmroot = of_depth(1));
    assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
    return endoff();
}
示例#4
0
 virtual void pass_double() {
   *_to++ = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
   debug_only(verify_tag(frame::TagValue));
   _from -= 2*Interpreter::stackElementSize();
  add_signature( double_sig );
  }
示例#5
0
 void set_map(SafePointNode* m)      {
     _map = m;
     debug_only(verify_map());
 }
示例#6
0
/**
 * 分配指定大小的内存空间,基于内存分代的分配策略(轮寻式):
 * 		1.(无锁式)年青代快速分配
 * 		2.(加锁式)
 * 		   1).抢占内存堆全局锁
 * 		   2).如果请求的内存大小>年青代内存容量 || Gc被触发但无法被执行 || 增量式Gc会失败, 则依次尝试从年青代-老年代分配内存
 * 		      否则,只从年青代分配内存
 * 		   3).如果Gc被触发但目前还无法被执行:
 * 		   		a).如果某一内存代还可扩展其内存容量,则依次从老年代-年青代尝试扩展内存分配
 * 		   		b).释放内存堆全局锁,并等待Gc被执行完成
 * 		   4).释放内存堆全局锁,触发一次GC操作请求,并等待其被执行或放弃
 * 		   5).如果Gc被放弃或由于Gc锁被禁止执行,则回到1
 * 		   6).如果Gc超时,返回NULL,否则返回分配的内存块
 *
 *
 * @param size 申请的内存空间大小
 * @param is_tlab 	false: 从内存堆中分配内存空间
 * 					true: 从当前线程的本地分配缓冲区中分配内存空间
 * @gc_overhead_limit_was_exceeded Full Gc是否超时
 *
 */
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, bool is_tlab,
		bool* gc_overhead_limit_was_exceeded) {

  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());

  //确保当前JVM没有正在进行GC
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {
    HandleMark hm; // discard any handles allocated in each iteration

    //年青代必须支持无锁并发方式的内存分配
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock");

    //当前是否应该优先考虑从年青代分配内存
    if (gen0->should_allocate(size, is_tlab)) {
      //试图从年青代快速分配内存块
      result = gen0->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }

    unsigned int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }

      // Note that only large objects get a shot at being
      // allocated in later generations.
      //当前是否应该只在年青代分配内存
      bool first_only = ! should_try_older_generation_allocation(size);

      //依次尝试从内存堆的各内存代中分配内存空间
      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {	//当前其它线程已经触发了Gc
        if (is_tlab) {
          //当前线程是为本地分配缓冲区申请内存(进而再从本地分配缓冲区为对象分配内存),则返回NULL,
          //以让其直接从内存代中为对象申请内存
          return NULL;
        }

        if (!gch->is_maximal_no_gc()) {	//内存堆中的某一个内存代允许扩展其大小
          //在允许扩展内存代大小的情况下尝试从内存堆的各内存代中分配内存空间
          result = expand_heap_and_allocate(size, is_tlab);
          // result could be null if we are out of space
          if (result != NULL) {
            return result;
          }
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          //等待所有的本地线程退出并执行完Gc操作
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while in jni critical section");
          }
          return NULL;
        }
      }

      //分配失败,决定触发一次GC操作
      gc_count_before = Universe::heap()->total_collections();
    }

    //触发一次Gc操作,将GC型JVM操作加入VMThread的操作队列中
    //Gc的真正执行是由VMThread或特型GC线程来完成的
    VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
    VMThread::execute(&op);

    if (op.prologue_succeeded()) {	//一次Gc操作已完成
      result = op.result();
      if (op.gc_locked()) {	//当前线程没有成功触发GC(可能刚被其它线程触发了),则继续重试分配
         assert(result == NULL, "must be NULL if gc_locked() is true");
         continue;  // retry and/or stall as necessary
      }

      // Allocation has failed and a collection
      // has been done.  If the gc time limit was exceeded the
      // this time, return NULL so that an out-of-memory
      // will be thrown.  Clear gc_overhead_limit_exceeded
      // so that the overhead exceeded does not persist.

      //本次Gc耗时是否超过了设置的GC时间上限
      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
      const bool softrefs_clear = all_soft_refs_clear();
      //本次GC超时一定是进行了清除软引用的操作
      assert(!limit_exceeded || softrefs_clear, "Should have been cleared");

      //Gc超时
      if (limit_exceeded && softrefs_clear) {
        *gc_overhead_limit_was_exceeded = true;
        size_policy()->set_gc_overhead_limit_exceeded(false);
        if (op.result() != NULL) {
          CollectedHeap::fill_with_object(op.result(), size);
        }
        //Gc超时,给上层调用返回NULL,让其抛出内存溢出错误
        return NULL;
      }

      //分配成功则确保该内存块一定在内存堆中
      assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }

  }// for

}
示例#7
0
 virtual void pass_float()  {
   *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
   debug_only(verify_tag(frame::TagValue));
   _from -= Interpreter::stackElementSize();
   add_signature( float_sig );
  }
示例#8
0
JavaCallWrapper::JavaCallWrapper(methodHandle callee_method, Handle receiver, JavaValue* result, TRAPS) {
    JavaThread* thread = (JavaThread *)THREAD;
    bool clear_pending_exception = true;

    guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code");
    assert(!thread->owns_locks(), "must release all locks when leaving VM");
    guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler");
    _result   = result;

    // Allocate handle block for Java code. This must be done before we change thread_state to _thread_in_Java_or_stub,
    // since it can potentially block.
    JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(thread);

    // After this, we are official in JavaCode. This needs to be done before we change any of the thread local
    // info, since we cannot find oops before the new information is set up completely.
    ThreadStateTransition::transition(thread, _thread_in_vm, _thread_in_Java);

    // Make sure that we handle asynchronous stops and suspends _before_ we clear all thread state
    // in JavaCallWrapper::JavaCallWrapper(). This way, we can decide if we need to do any pd actions
    // to prepare for stop/suspend (flush register windows on sparcs, cache sp, or other state).
    if (thread->has_special_runtime_exit_condition()) {
        thread->handle_special_runtime_exit_condition();
        if (HAS_PENDING_EXCEPTION) {
            clear_pending_exception = false;
        }
    }


    // Make sure to set the oop's after the thread transition - since we can block there. No one is GC'ing
    // the JavaCallWrapper before the entry frame is on the stack.
    _callee_method = callee_method();
    _receiver = receiver();

#ifdef CHECK_UNHANDLED_OOPS
    THREAD->allow_unhandled_oop(&_receiver);
#endif // CHECK_UNHANDLED_OOPS

    _thread       = (JavaThread *)thread;
    _handles      = _thread->active_handles();    // save previous handle block & Java frame linkage

    // For the profiler, the last_Java_frame information in thread must always be in
    // legal state. We have no last Java frame if last_Java_sp == NULL so
    // the valid transition is to clear _last_Java_sp and then reset the rest of
    // the (platform specific) state.

    _anchor.copy(_thread->frame_anchor());
    _thread->frame_anchor()->clear();

    debug_only(_thread->inc_java_call_counter());
    _thread->set_active_handles(new_handles);     // install new handle block and reset Java frame linkage

    assert (_thread->thread_state() != _thread_in_native, "cannot set native pc to NULL");

    // clear any pending exception in thread (native calls start with no exception pending)
    if(clear_pending_exception) {
        _thread->clear_pending_exception();
    }

    if (_anchor.last_Java_sp() == NULL) {
        _thread->record_base_of_stack_pointer();
    }
}
示例#9
0
HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
  const void* addr) const {
  assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
  assert(_bottom <= addr && addr < _end,
         "addr must be covered by this Array");
  // Must read this exactly once because it can be modified by parallel
  // allocation.
  HeapWord* ub = _unallocated_block;
  if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
    assert(ub < _end, "tautology (see above)");
    return ub;
  }

  // Otherwise, find the block start using the table.
  size_t index = _array->index_for(addr);
  HeapWord* q = _array->address_for_index(index);

  uint offset = _array->offset_array(index);    // Extend u_char to uint.
  while (offset >= N_words) {
    // The excess of the offset from N_words indicates a power of Base
    // to go back by.
    size_t n_cards_back = entry_to_cards_back(offset);
    q -= (N_words * n_cards_back);
    assert(q >= _sp->bottom(),
           err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
                   q, _sp->bottom()));
    assert(q < _sp->end(),
           err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
                   q, _sp->end()));
    index -= n_cards_back;
    offset = _array->offset_array(index);
  }
  assert(offset < N_words, "offset too large");
  index--;
  q -= offset;
  assert(q >= _sp->bottom(),
         err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
                 q, _sp->bottom()));
  assert(q < _sp->end(),
         err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
                 q, _sp->end()));
  HeapWord* n = q;

  while (n <= addr) {
    debug_only(HeapWord* last = q);   // for debugging
    q = n;
    n += _sp->block_size(n);
    assert(n > q,
           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT","
                   " while querying blk_start(" PTR_FORMAT ")"
                   " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
                   n, last, addr, _sp->bottom(), _sp->end()));
  }
  assert(q <= addr,
         err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
                 q, addr));
  assert(addr <= n,
         err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
                 addr, n));
  return q;
}
示例#10
0
inline void HandleMark::push() {
  // This is intentionally a NOP. pop_and_restore will reset
  // values to the HandleMark further down the stack, typically
  // in JavaCalls::call_helper.
  debug_only(_area->_handle_mark_nesting++);
}
void ContiguousSpace::mangle_region(MemRegion mr) {
  debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
}
// ------------------------------------------------------------------
// Record the failure reason and decide if we should recompile the nmethod.
bool Deoptimization::policy_for_recompile( JavaThread *thread, const CodeBlob *cb, int deopt_index ) {
  // Get the CPData where failure happened
  vframe vf(thread);       // The basic Java frame being deopt'd
  CodeProfile *cp;         // CodeProfile holding some C1 inlined data
  CPData *cpd = NULL;      // Place to record failures
  // For JVMTI operation, continous deopts are the expected norm, so we don't
  // do the 'find_nested_cpdata' and don't do the 'did_deopt' assert that
  // entails.  For static fields in <clinit> - it requires a thread-check
  // which C2 will not emit.  Hence we continously deopt & recompile until the
  // class is finally initialized and we can emit the static-field ref code.
  // For Reason_deopt, the code has already been patched due to non-optimization
  // related reasons like stack overflow or class loading so there is nothing
  // interesting to record here.
  if( deopt_index != Reason_unhandled &&
      deopt_index != Reason_jvmti &&
      deopt_index != Reason_static_in_clinit &&
      deopt_index != Reason_deopt )
    // Record a did_deopt attempt, mostly for debugging continous deopt bugs.
    // For *selected* deopt flavors we also record other interesting bits further below.
    cpd = CodeProfile::find_nested_cpdata(thread,vf,cb->owner().as_methodCodeOop(),&cp,true); 

  if( deopt_index < 0 )         // Convert unloaded-classes indexs
    deopt_index = Reason_unloaded; // Do not bother to load the class, the interpreter will do that.

  // Count flavors of deopt
  counters[deopt_index]++;

  // ---------------
  // Make the nmethod not entrant, so next time it is called it gets recompiled.
  bool make_not_entrant = true;
  switch( deopt_index ) {
  case Reason_deopt: // A real deopt in progress; containing nmethod already marked not-entrant
    break;
  case Reason_static_in_clinit:  // Plough ahead in the interpreter
  case Reason_uninitialized:    
  case Reason_unloaded_array_class:
  case Reason_unloaded:          // These are all fine once the interpreter 
    break;                      // loads the class and then recompile.
  case Reason_unhandled:  // Always must uncommon-trap for this (big multi anewarray)
  case Reason_jvmti:             // Cannot compile for this
    make_not_entrant = false;   // Suffer along in the interpreter
    break;                      
  case Reason_stuck_in_loop:
    make_not_entrant = false;
    break;                      
  case Reason_unexpected_klass: {// Had a TypeProfile failure.  After profiling in C1
    CPData_Invoke* cpdi = (CPData_Invoke*)cpd;
    debug_only( assert(cpdi->is_Invoke(), "Not an invoke cpdata structure!") );
    cpdi->_poly_inlining_fail = 1;
    break;                      
  }
  case Reason_unreached: {
assert(cpd,"Unexpected cpd=null.  Are we deopt'ing on an unprofiled bci?");
    if (cpd) {
      CPData_Branch* cpdb = (CPData_Branch*)cpd;
      debug_only( assert(cpdb->is_Branch(), "Not a branch cpdata structure!") );
      if (cpdb->_taken==0) cpdb->_taken++;
      if (cpdb->_nottaken==0) cpdb->_nottaken++;
    }
    break;
  }

  case Reason_null_check:
    // Saw a null, e.g., as an array base.
  case Reason_div0_check: 
  case Reason_unexpected_null_cast:
    // If we see an unexpected null at a check-cast we record it and force a
    // recompile; the offending check-cast will be compiled to handle NULLs.
    ((CPData_Null*)cpd)->_null = 1;
    break;

  case Reason_array_store_check:
    // We come here we tried to cast an oop array to it's declared type and
    // that cast failed.  Flag the bytecode to not attempt the heroic opt
    // again.
  case Reason_intrinsic_check:
    // Math.pow intrinsic returned a NaN, which requires StrictMath.pow to
    // handle.  Recompile without intrinsifying Math.pow.  Or maybe
    // System.arraycopy must go slow; do not intrinsify call at this bci.
    // Intrisinc ops come from calls, which use CPData_Invoke which includes
    // CPData_Null.
  case Reason_athrow:     
    // Actually throwing.  Recompile catching/throwing as needed.
  case Reason_cast_check: 
    // Cast is actually failing.  Recompile catching/throwing as needed.
    ((CPData_Null*)cpd)->_fail = 1;
    break;

  case Reason_range_check: {
    // Method in question is actually throwing range check exceptions.
    // Recompile catching them.
methodOop moop=vf.method();
    Bytecodes::Code bc = (Bytecodes::Code)*moop->bcp_from(vf.bci());
    if (cpd->is_Null(bc)) {
      ((CPData_Null*)cpd)->_rchk = 1;
    }
    if (cpd->is_Branch(bc)) {
      Untested();
      CPData_Branch* cpdb = (CPData_Branch*)cpd;
      debug_only( assert(cpdb->is_Branch(), "Not a branch cpdata structure!") );
      if (cpdb->_taken==0) cpdb->_taken++;
      if (cpdb->_nottaken==0) cpdb->_nottaken++;
    }
    break;
  }

  case Reason_range_check_widened: {
    // We might deopt for a range check 'speculatively', if we've widened some
    // check in the method.  Recompile without the optimization
methodOop moop=vf.method();
    Bytecodes::Code bc = (Bytecodes::Code)*moop->bcp_from(vf.bci());
    if (cpd->is_Null(bc)) {
      ((CPData_Null*)cpd)->_rchk_wide = 1;
    }
    if (cpd->is_Branch(bc)) {
      Untested();
      CPData_Branch* cpdb = (CPData_Branch*)cpd;
      debug_only( assert(cpdb->is_Branch(), "Not a branch cpdata structure!") );
      if (cpdb->_taken==0) cpdb->_taken++;
      if (cpdb->_nottaken==0) cpdb->_nottaken++;
    }
    break;
  }

  default:
    ShouldNotReachHere();
  }
  // Recompile if needed
  return make_not_entrant;
}
void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
  // Copy all live objects to their new location
  // Used by MarkSweep::mark_sweep_phase4()

  HeapWord*       q = space()->bottom();
  HeapWord* const t = _end_of_live;
  debug_only(HeapWord* prev_q = NULL);

  if (q < t && _first_dead > q &&
      !oop(q)->is_gc_marked()) {
#ifdef ASSERT
    // we have a chunk of the space which hasn't moved and we've reinitialized the
    // mark word during the previous pass, so we can't use is_gc_marked for the
    // traversal.
    HeapWord* const end = _first_dead;

    while (q < end) {
      size_t size = oop(q)->size();
      assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
      debug_only(prev_q = q);
      q += size;
    }
#endif

    if (_first_dead == t) {
      q = t;
    } else {
      // $$$ Funky
      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();
    }
  }

  const intx scan_interval = PrefetchScanIntervalInBytes;
  const intx copy_interval = PrefetchCopyIntervalInBytes;

  while (q < t) {
    if (!oop(q)->is_gc_marked()) {
      // mark is pointer to next marked oop
      debug_only(prev_q = q);
      q = (HeapWord*) oop(q)->mark()->decode_pointer();
      assert(q > prev_q, "we should be moving forward through memory");
    } else {
      // prefetch beyond q
      Prefetch::read(q, scan_interval);

      // size and destination
      size_t size = oop(q)->size();
      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();

      // prefetch beyond compaction_top
      Prefetch::write(compaction_top, copy_interval);

      // copy object and reinit its mark
      assert(q != compaction_top, "everything in this pass should be moving");
      Copy::aligned_conjoint_words(q, compaction_top, size);
      oop(compaction_top)->init_mark();
      assert(oop(compaction_top)->klass() != NULL, "should have a class");

      debug_only(prev_q = q);
      q += size;
    }
  }

  assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(),
         "should point inside space");
  space()->set_top(compaction_top());

  if (mangle_free_space) {
    space()->mangle_unused_area();
  }
}
//------------------------------do_call----------------------------------------
// Handle your basic call.  Inline if we can & want to, else just setup call.
void Parse::do_call() {
  // It's likely we are going to add debug info soon.
  // Also, if we inline a guy who eventually needs debug info for this JVMS,
  // our contribution to it is cleaned up right here.
  kill_dead_locals();

  // Set frequently used booleans
  bool is_virtual = bc() == Bytecodes::_invokevirtual;
  bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
  bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;

  // Find target being called
  bool             will_link;
  ciMethod*        dest_method   = iter().get_method(will_link);
  ciInstanceKlass* holder_klass  = dest_method->holder();
  ciKlass* holder = iter().get_declared_method_holder();
  ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);

  int   nargs    = dest_method->arg_size();
  // See if the receiver (if any) is NULL, hence we always throw BEFORE
  // attempting to resolve the call or initialize the holder class.  Doing so
  // out of order opens a window where we can endlessly deopt because the call
  // holder is not initialized, but the call never actually happens (forcing
  // class initialization) because we only see NULL receivers.
  CPData_Invoke *caller_cpdi = cpdata()->as_Invoke(bc());
  debug_only( assert(caller_cpdi->is_Invoke(), "Not invoke!") );
  if( is_virtual_or_interface &&
      _gvn.type(stack(sp() - nargs))->higher_equal(TypePtr::NULL_PTR) ) {
    builtin_throw( Deoptimization::Reason_null_check, "null receiver", caller_cpdi, caller_cpdi->saw_null(), /*must_throw=*/true );
    return;
  }

  // uncommon-trap when callee is unloaded, uninitialized or will not link
  // bailout when too many arguments for register representation
  if (!will_link || can_not_compile_call_site(dest_method, klass)) {
    return;
  }
assert(FAM||holder_klass->is_loaded(),"");
  assert(dest_method->is_static() == !has_receiver, "must match bc");
  // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
  // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
  assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
  // Note:  In the absence of miranda methods, an abstract class K can perform
  // an invokevirtual directly on an interface method I.m if K implements I.

  // ---------------------
  // Does Class Hierarchy Analysis reveal only a single target of a v-call?
  // Then we may inline or make a static call, but become dependent on there being only 1 target.
  // Does the call-site type profile reveal only one receiver?
  // Then we may introduce a run-time check and inline on the path where it succeeds.
  // The other path may uncommon_trap, check for another receiver, or do a v-call.

  // Choose call strategy.
  bool call_is_virtual = is_virtual_or_interface;
  int vtable_index = methodOopDesc::invalid_vtable_index;
  ciMethod* call_method = dest_method;

  // Try to get the most accurate receiver type
  if (is_virtual_or_interface) {
    Node*             receiver_node = stack(sp() - nargs);
const TypeInstPtr*inst_type=_gvn.type(receiver_node)->isa_instptr();
    if( inst_type ) {
ciInstanceKlass*ikl=inst_type->klass()->as_instance_klass();
      // If the receiver is not yet linked then: (1) we never can make this
      // call because no objects can be created until linkage, and (2) CHA
      // reports incorrect answers... so do not bother with making the call
      // until after the klass gets linked.
      ciInstanceKlass *ikl2 = ikl->is_subtype_of(klass) ? ikl : klass;
if(!ikl->is_linked()){
        uncommon_trap(Deoptimization::Reason_uninitialized,klass,"call site where receiver is not linked",false);
        return;
      }
    }
    const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
    ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);

    // Have the call been sufficiently improved such that it is no longer a virtual?
    if (optimized_virtual_method != NULL) {
      call_method     = optimized_virtual_method;
      call_is_virtual = false;
    } else if (false) {
      // We can make a vtable call at this site
      vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
    }
  }

  // Note:  It's OK to try to inline a virtual call.
  // The call generator will not attempt to inline a polymorphic call
  // unless it knows how to optimize the receiver dispatch.
bool try_inline=(C->do_inlining()||InlineAccessors)&&
                    (!C->method()->should_disable_inlining()) &&
                    (call_method->number_of_breakpoints() == 0);

  // Get profile data for the *callee*.  First see if we have precise
  // CodeProfile for this exact inline because C1 inlined it already.
  CodeProfile *callee_cp;
  int callee_cp_inloff;

  if( caller_cpdi->inlined_method_oid() == call_method->objectId() ) {
    callee_cp = c1_cp();        // Use same CodeProfile as current
    callee_cp_inloff = caller_cpdi->cpd_offset(); // But use inlined portion
  } else {
    // If callee has a cp, clone it and use
    callee_cp = call_method->codeprofile(true);
    callee_cp_inloff = 0;

    if (callee_cp || FAM) {
      // The cloned cp needs to be freed later
      Compile* C = Compile::current();
      C->record_cloned_cp(callee_cp);
    } else { // Had profile info at top level, but not for this call site?
      // callee_cp will hold the just created cp, or whatever cp allocated by
      // other thread which wins the race in set_codeprofile
      callee_cp = call_method->set_codeprofile(CodeProfile::make(call_method));
    }
  }

  CPData_Invoke *c2_caller_cpdi = UseC1 ? c2cpdata()->as_Invoke(bc()) : NULL;

  // ---------------------
  inc_sp(- nargs);              // Temporarily pop args for JVM state of call
  JVMState* jvms = sync_jvms();

  // ---------------------
  // Decide call tactic.
  // This call checks with CHA, the interpreter profile, intrinsics table, etc.
  // It decides whether inlining is desirable or not.
CallGenerator*cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),callee_cp,callee_cp_inloff,c2_caller_cpdi,caller_cpdi);

  // ---------------------
  // Round double arguments before call
  round_double_arguments(dest_method);

#ifndef PRODUCT
  // Record first part of parsing work for this call
  parse_histogram()->record_change();
#endif // not PRODUCT

  assert(jvms == this->jvms(), "still operating on the right JVMS");
  assert(jvms_in_sync(),       "jvms must carry full info into CG");

  // save across call, for a subsequent cast_not_null.
  Node* receiver = has_receiver ? argument(0) : NULL;

  JVMState* new_jvms = cg->generate(jvms, caller_cpdi, is_private_copy());
  if( new_jvms == NULL ) {      // Did it work?
    // When inlining attempt fails (e.g., too many arguments),
    // it may contaminate the current compile state, making it
    // impossible to pull back and try again.  Once we call
    // cg->generate(), we are committed.  If it fails, the whole
    // compilation task is compromised.
    if (failing())  return;
    if (PrintOpto || PrintInlining || PrintC2Inlining) {
      // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
      if (cg->is_intrinsic() && call_method->code_size() > 0) {
C2OUT->print("Bailed out of intrinsic, will not inline: ");
        call_method->print_name(C2OUT); C2OUT->cr();
      }
    }
    // This can happen if a library intrinsic is available, but refuses
    // the call site, perhaps because it did not match a pattern the
    // intrinsic was expecting to optimize.  The fallback position is
    // to call out-of-line.
    try_inline = false;  // Inline tactic bailed out.
cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),c1_cp(),c1_cp_inloff(),c2_caller_cpdi,caller_cpdi);
new_jvms=cg->generate(jvms,caller_cpdi,is_private_copy());
assert(new_jvms!=NULL,"call failed to generate:  calls should work");
    if (c2_caller_cpdi) c2_caller_cpdi->_inlining_failure_id = IF_GENERALFAILURE;
  }

  if (cg->is_inline()) {
    C->env()->notice_inlined_method(call_method);
  }

  // Reset parser state from [new_]jvms, which now carries results of the call.
  // Return value (if any) is already pushed on the stack by the cg.
  add_exception_states_from(new_jvms);
  if (new_jvms->map()->control() == top()) {
    stop_and_kill_map();
  } else {
    assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
    set_jvms(new_jvms);
  }

  if (!stopped()) {
    // This was some sort of virtual call, which did a null check for us.
    // Now we can assert receiver-not-null, on the normal return path.
    if (receiver != NULL && cg->is_virtual()) {
Node*cast=cast_not_null(receiver,true);
      // %%% assert(receiver == cast, "should already have cast the receiver");
    }

    // Round double result after a call from strict to non-strict code
    round_double_result(dest_method);

    // If the return type of the method is not loaded, assert that the
    // value we got is a null.  Otherwise, we need to recompile.
    if (!dest_method->return_type()->is_loaded()) {
      // If there is going to be a trap, put it at the next bytecode:
      set_bci(iter().next_bci());
      do_null_assert(peek(), T_OBJECT);
      set_bci(iter().cur_bci()); // put it back
    } else {
      assert0( call_method->return_type()->is_loaded() );
      BasicType result_type = dest_method->return_type()->basic_type();
if(result_type==T_OBJECT||result_type==T_ARRAY){
        const Type *t = peek()->bottom_type();
        assert0( t == TypePtr::NULL_PTR || t->is_oopptr()->klass()->is_loaded() );
      }
    }
  }

  // Restart record of parsing work after possible inlining of call
#ifndef PRODUCT
  parse_histogram()->set_initial_state(bc());
#endif
}
void Devirtualizer<false>::do_oop(OopClosureType* closure, T* p) {
  debug_only(closure->verify(p));
  closure->do_oop(p);
}
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
                                        bool is_tlab,
                                        bool* gc_overhead_limit_was_exceeded) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {
    HandleMark hm; // discard any handles allocated in each iteration

    // First allocation attempt is lock-free.
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (gen0->should_allocate(size, is_tlab)) {
      result = gen0->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }
    unsigned int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }
      // Note that only large objects get a shot at being
      // allocated in later generations.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {
        if (is_tlab) {
          return NULL;  // Caller will retry allocating individual object
        }
        if (!gch->is_maximal_no_gc()) {
          // Try and expand heap to satisfy request
          result = expand_heap_and_allocate(size, is_tlab);
          // result could be null if we are out of space
          if (result != NULL) {
            return result;
          }
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          // Wait for JNI critical section to be exited
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = Universe::heap()->total_collections();
    }

    VM_GenCollectForAllocation op(size,
                                  is_tlab,
                                  gc_count_before);
    VMThread::execute(&op);
    if (op.prologue_succeeded()) {
      result = op.result();
      if (op.gc_locked()) {
         assert(result == NULL, "must be NULL if gc_locked() is true");
         continue;  // retry and/or stall as necessary
      }

      // Allocation has failed and a collection
      // has been done.  If the gc time limit was exceeded the
      // this time, return NULL so that an out-of-memory
      // will be thrown.  Clear gc_overhead_limit_exceeded
      // so that the overhead exceeded does not persist.

      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
      const bool softrefs_clear = all_soft_refs_clear();
      assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
      if (limit_exceeded && softrefs_clear) {
        *gc_overhead_limit_was_exceeded = true;
        size_policy()->set_gc_overhead_limit_exceeded(false);
        if (op.result() != NULL) {
          CollectedHeap::fill_with_object(op.result(), size);
        }
        return NULL;
      }
      assert(result == NULL || gch->is_in_reserved(result),
             "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) &&
        (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
                  " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }
  }
}
示例#17
0
int DebugInformationRecorder::data_size() {
    debug_only(mark_recorders_frozen());  // mark it "frozen" for asserts
    return _stream->position();
}
int DebugInformationRecorder::pcs_size() {
  debug_only(_oop_recorder->oop_size());  // mark it "frozen" for asserts
  if (last_pc()->pc_offset() != PcDesc::upper_offset_limit)
    add_new_pc_offset(PcDesc::upper_offset_limit);
  return _pcs_length * sizeof(PcDesc);
}