void LoopFinder::gather_loop_blocks(LoopList* loops) {
  int lng = loops->length();
  BitMap blocks_in_loop(max_blocks());
  for (int i = 0; i < lng; i++) {
    // for each loop do the following
    blocks_in_loop.clear();
    Loop* loop = loops->at(i);
    BlockList* ends = loop->ends();
    if (!loop->is_end(loop->start())) {
      GrowableArray<BlockBegin*>* stack = new GrowableArray<BlockBegin*>();
      blocks_in_loop.at_put(loop->start()->block_id(), true);
      
      // insert all the ends into the list
      for (int i = 0; i < ends->length(); i++) {
        blocks_in_loop.at_put(ends->at(i)->block_id()  , true);
        stack->push(ends->at(i));
      }
      
      while (!stack->is_empty()) {
        BlockBegin* bb = stack->pop();
        BlockLoopInfo* bli = get_block_info(bb);
        // push all predecessors that are not yet in loop
        int npreds = bli->nof_preds();
        for (int m = 0; m < npreds; m++) {
          BlockBegin* pred = bli->pred_no(m);
          if (!blocks_in_loop.at(pred->block_id())) {
            blocks_in_loop.at_put(pred->block_id(), true);
            loop->append_node(pred);
            stack->push(pred);
          }
        }
      }
      loop->append_node(loop->start());
    }
    // insert all the ends into the loop
    for (int i = 0; i < ends->length(); i++) {
      loop->append_node(ends->at(i));
    }
  }
}
Exemple #2
0
GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const {
  // Natives has no scope
  if (scope() == NULL) {
    CompiledMethod* nm = code();
    Method* method = nm->method();
    assert(method->is_native() || nm->is_aot(), "Expect a native method or precompiled method");
    if (!method->is_synchronized()) {
      return new GrowableArray<MonitorInfo*>(0);
    }
    // This monitor is really only needed for UseBiasedLocking, but
    // return it in all cases for now as it might be useful for stack
    // traces and tools as well
    GrowableArray<MonitorInfo*> *monitors = new GrowableArray<MonitorInfo*>(1);
    // Casting away const
    frame& fr = (frame&) _fr;
    MonitorInfo* info = new MonitorInfo(
        fr.get_native_receiver(), fr.get_native_monitor(), false, false);
    monitors->push(info);
    return monitors;
  }
  GrowableArray<MonitorValue*>* monitors = scope()->monitors();
  if (monitors == NULL) {
    return new GrowableArray<MonitorInfo*>(0);
  }
  GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(monitors->length());
  for (int index = 0; index < monitors->length(); index++) {
    MonitorValue* mv = monitors->at(index);
    ScopeValue*   ov = mv->owner();
    StackValue *owner_sv = create_stack_value(ov); // it is an oop
    if (ov->is_object() && owner_sv->obj_is_scalar_replaced()) { // The owner object was scalar replaced
      assert(mv->eliminated(), "monitor should be eliminated for scalar replaced object");
      // Put klass for scalar replaced object.
      ScopeValue* kv = ((ObjectValue *)ov)->klass();
      assert(kv->is_constant_oop(), "klass should be oop constant for scalar replaced object");
      Handle k(((ConstantOopReadValue*)kv)->value()());
      assert(java_lang_Class::is_instance(k()), "must be");
      result->push(new MonitorInfo(k(), resolve_monitor_lock(mv->basic_lock()),
                                   mv->eliminated(), true));
    } else {
      result->push(new MonitorInfo(owner_sv->get_obj()(), resolve_monitor_lock(mv->basic_lock()),
                                   mv->eliminated(), false));
    }
  }
  return result;
}
Exemple #3
0
  void do_it(InlinedScope* s) {
    GrowableArray<NonTrivialNode*>* tests = s->typeTests();
    int len = tests->length();
    for (int i = 0; i < len; i++) {
      NonTrivialNode* n = tests->at(i);
      assert(n->doesTypeTests(), "shouldn't be in list");
      if (n->deleted) continue;
      if (n->hasUnknownCode()) continue;	  // can't optimize - expects other klasses, so would get uncommon trap at run-time
      if (!theLoop->isInLoop(n)) continue;	  // not in this loop
      GrowableArray<PReg*> regs(4);
      GrowableArray<GrowableArray<klassOop>*> klasses(4);
      n->collectTypeTests(regs, klasses);
      for (int j = 0; j < regs.length(); j++) {
	PReg* r = regs.at(j);
	if (theLoop->defsInLoop(r) == 0) {
	  // this test can be hoisted
	  if (CompilerDebug || PrintLoopOpts) cout(PrintLoopOpts)->print("*moving type test of %s at N%d out of loop\n", r->name(), n->id());
	  hoistableTests->append(new HoistedTypeTest(n, r, klasses.at(j)));
	}
      }
    }
  }
//
// Fabricate heavyweight monitor information for each lightweight monitor
// found in the Java VFrame.
//
void javaVFrame::jvmpi_fab_heavy_monitors(bool query, int* fab_index, int frame_count, GrowableArray<ObjectMonitor*>* fab_list) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  ResourceMark rm;

  GrowableArray<MonitorInfo*>* mons = monitors();
  if (mons->is_empty()) return;

  bool found_first_monitor = false;
  for (int index = (mons->length()-1); index >= 0; index--) {
    MonitorInfo* monitor = mons->at(index);
    if (monitor->owner() == NULL) continue; // skip unowned monitor
    //
    // If we haven't found a monitor before, this is the first frame, and
    // the thread is blocked, then we are trying to enter this monitor.
    // We skip it because we have already seen it before from the monitor 
    // cache walk.
    //
    if (!found_first_monitor && frame_count == 0) {
      switch (thread()->thread_state()) {
      case _thread_blocked:
      case _thread_blocked_trans:
        continue;
      }
    }
    found_first_monitor = true;

    markOop mark = monitor->owner()->mark();
    if (mark->has_locker()) {
      if (!query) {   // not just counting so create and store at the current element
        // fabricate the heavyweight monitor from lightweight info
        ObjectMonitor *heavy = new ObjectMonitor();
        heavy->set_object(monitor->owner());  // use the owning object
        heavy->set_owner(thread());           // use thread instead of stack address for speed
        fab_list->at_put(*fab_index, heavy);
      }
      (*fab_index)++;
    }
  }
}
void CodeBlobCollector::do_blob(CodeBlob* cb) {

    // ignore nmethods
    if (cb->is_nmethod()) {
        return;
    }

    // check if this starting address has been seen already - the
    // assumption is that stubs are inserted into the list before the
    // enclosing BufferBlobs.
    address addr = cb->code_begin();
    for (int i=0; i<_global_code_blobs->length(); i++) {
        JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i);
        if (addr == scb->code_begin()) {
            return;
        }
    }

    // record the CodeBlob details as a JvmtiCodeBlobDesc
    JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->code_begin(), cb->code_end());
    _global_code_blobs->append(scb);
}
void CodeBlobCollector::do_blob(CodeBlob* cb) {

if(cb->is_methodCode()){
    return;
  }

  // check if this starting address has been seen already - the 
  // assumption is that stubs are inserted into the list before the
  // enclosing BufferBlobs.
address addr=cb->code_begins();
  for (int i=0; i<_global_code_blobs->length(); i++) {
    JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i);
    if (addr == scb->code_begin()) {
      return;
    }
  }

  // we must name the CodeBlob - some CodeBlobs already have names :- 
  // - stubs used by compiled code to call a (static) C++ runtime routine
  // - non-relocatable machine code such as the interpreter, stubroutines, etc.
  // - various singleton blobs
  //
  // others are unnamed so we create a name :-
  // - OSR adapter (interpreter frame that has been on-stack replaced) 
  // - I2C and C2I adapters
  //
  // CodeBlob::name() should return any defined name string, first. 
  const char* name = cb->methodname();  // returns NULL or methodname+signature
  if (! name ) {    
    name = cb->name();   // returns generic name...
  } else {
    ShouldNotReachHere();
  }

  // record the CodeBlob details as a JvmtiCodeBlobDesc
JvmtiCodeBlobDesc*scb=new JvmtiCodeBlobDesc(name,cb->code_begins(),
cb->code_ends());
  _global_code_blobs->append(scb);
}
Exemple #7
0
// Fill LiveStackFrameInfo with locals, monitors, and expressions
void LiveFrameStream::fill_live_stackframe(Handle stackFrame,
        const methodHandle& method, TRAPS) {
    fill_stackframe(stackFrame, method);
    if (_jvf != NULL) {
        StackValueCollection* locals = _jvf->locals();
        StackValueCollection* expressions = _jvf->expressions();
        GrowableArray<MonitorInfo*>* monitors = _jvf->monitors();

        if (!locals->is_empty()) {
            objArrayHandle locals_h = values_to_object_array(locals, CHECK);
            java_lang_LiveStackFrameInfo::set_locals(stackFrame(), locals_h());
        }
        if (!expressions->is_empty()) {
            objArrayHandle expressions_h = values_to_object_array(expressions, CHECK);
            java_lang_LiveStackFrameInfo::set_operands(stackFrame(), expressions_h());
        }
        if (monitors->length() > 0) {
            objArrayHandle monitors_h = monitors_to_object_array(monitors, CHECK);
            java_lang_LiveStackFrameInfo::set_monitors(stackFrame(), monitors_h());
        }
    }
}
void javaVFrame::print() {
  ResourceMark rm;
  vframe::print();
  tty->print("\t");
  method()->print_value();
  tty->cr();
  tty->print_cr("\tbci:    %d", bci());

  print_stack_values("locals",      locals());
  print_stack_values("expressions", expressions());

  GrowableArray<MonitorInfo*>* list = monitors();
  if (list->is_empty()) return;
  tty->print_cr("\tmonitor list:");
  for (int index = (list->length()-1); index >= 0; index--) {
    MonitorInfo* monitor = list->at(index);
    tty->print("\t  obj\t");
    if (monitor->owner_is_scalar_replaced()) {
      Klass* k = java_lang_Class::as_Klass(monitor->owner_klass());
      tty->print("( is scalar replaced %s)", k->external_name());
    } else if (monitor->owner() == NULL) {
      tty->print("( null )");
    } else {
      monitor->owner()->print_value();
      tty->print("(owner=" INTPTR_FORMAT ")", (address)monitor->owner());
    }
    if (monitor->eliminated()) {
      if(is_compiled_frame()) {
        tty->print(" ( lock is eliminated in compiled frame )");
      } else {
        tty->print(" ( lock is eliminated, frame not compiled )");
      }
    }
    tty->cr();
    tty->print("\t  ");
    monitor->lock()->print_on(tty);
    tty->cr();
  }
}
GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
  assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?");

  GrowableArray<ClassLoaderData*>* array = new GrowableArray<ClassLoaderData*>();

  // The CLDs in [_head, _saved_head] were all added during last call to remember_new_clds(true);
  ClassLoaderData* curr = _head;
  while (curr != _saved_head) {
    if (!curr->claimed()) {
      array->push(curr);

      if (TraceClassLoaderData) {
        tty->print("[ClassLoaderData] found new CLD: ");
        curr->print_value_on(tty);
        tty->cr();
      }
    }

    curr = curr->_next;
  }

  return array;
}
Exemple #10
0
 // Put node on worklist if it is (or was) not there.
 inline void add_to_worklist(PointsToNode* pt) {
   PointsToNode* ptf = pt;
   uint pidx_bias = 0;
   if (PointsToNode::is_base_use(pt)) {
     // Create a separate entry in _in_worklist for a marked base edge
     // because _worklist may have an entry for a normal edge pointing
     // to the same node. To separate them use _next_pidx as bias.
     ptf = PointsToNode::get_use_node(pt)->as_Field();
     pidx_bias = _next_pidx;
   }
   if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
     _worklist.append(pt);
   }
 }
void javaVFrame::print() {
  ResourceMark rm;
  vframe::print();
  tty->print("\t"); 
  method()->print_value();
  tty->cr();
  tty->print_cr("\tbci:    %d", bci());

  print_stack_values("locals",      locals());
  print_stack_values("expressions", expressions());

  GrowableArray<MonitorInfo*>* list = monitors();
  if (list->is_empty()) return;
  tty->print_cr("\tmonitor list:");
  for (int index = (list->length()-1); index >= 0; index--) {
    MonitorInfo* monitor = list->at(index);
    tty->print("\t  obj\t"); monitor->owner()->print_value(); 
    tty->print("(" INTPTR_FORMAT ")", monitor->owner());
    tty->cr();
    tty->print("\t  ");
    monitor->lock()->print_on(tty);
    tty->cr(); 
  }
}
Exemple #12
0
void CompiledLoop::hoistTypeTests() {
  // collect all type tests that can be hoisted out of the loop
  _loopHeader->_tests = _hoistableTests = new GrowableArray<HoistedTypeTest*>(10);
  TTHoister tth(this, _hoistableTests);
  _scope->subScopesDo(&tth);

  // add type tests to loop header for testing (avoid duplicates)
  // (currently quadratic algorithm but there should be very few)
  GrowableArray<HoistedTypeTest*>* headerTests = new GrowableArray<HoistedTypeTest*>(_hoistableTests->length());
  for (int i = _hoistableTests->length() - 1; i >= 0; i--) {
    HoistedTypeTest* t = _hoistableTests->at(i);
    PReg* tested = t->testedPR;
    for (int j = headerTests->length() - 1; j >= 0; j--) {
      if (headerTests->at(j)->testedPR == tested) {
	// already testing this PReg
	if (isEquivalentType(headerTests->at(j)->klasses, t->klasses)) {
	  // nothing to do
	} else {
	  // Whoa!  The same PReg is tested for different types in different places.
	  // Possible but rare.
	  headerTests->at(j)->invalid = t->invalid = true;
	  if (WizardMode) {
	    compiler_warning("CompiledLoop::hoistTypeTests: PReg tested for different types\n");
	    t->print();
	    headerTests->at(j)->print();
	  }
	}
	tested = NULL;    // don't add it to list
	break;
      }
    }
    if (tested) headerTests->append(t);
  }

  // now delete all hoisted type tests from loop body
  for (i = _hoistableTests->length() - 1; i >= 0; i--) {
    HoistedTypeTest* t = _hoistableTests->at(i);
    if (!t->invalid) {
      t->node->assert_preg_type(t->testedPR, t->klasses, _loopHeader);
    }
  }
  if (!_loopHeader->isActivated()) _loopHeader->activate();
}
void nmethodCollector::do_nmethod(nmethod* nm) {
  // ignore zombies
  if (!nm->is_alive()) {
    return;
  }

  assert(nm->method() != NULL, "checking");

  // create the location map for the nmethod.
  jvmtiAddrLocationMap* map;
  jint map_length;
  JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length);

  // record the nmethod details
  methodHandle mh(nm->method());
  nmethodDesc* snm = new nmethodDesc(mh,
                                     nm->code_begin(),
                                     nm->code_end(),
                                     map,
                                     map_length);
  _global_nmethods->append(snm);
}
void nmethodCollector::do_nmethod(CodeBlob*  /* nmethod* */ nm) {
Unimplemented();//FIXME - if nmethod/methodCodeOop is not alive, return....
  
  // if (nm->is_methodCode()) {
  //   return;
  // }
  // // verify that there is code...
  // assert(nm->code_size() != 0, "checking");  

  // create the location map for the nmethod.
  jvmtiAddrLocationMap* map;
  jint map_length;
  JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length);

  // record the nmethod details 
  methodHandle mh(nm->method());
  nmethodDesc* snm = new nmethodDesc(mh,
nm->code_begins(),
nm->code_ends(),
				     map,
				     map_length);
  _global_nmethods->append(snm);
}
void CodeBlobCollector::collect() {
    assert_locked_or_safepoint(CodeCache_lock);
    assert(_global_code_blobs == NULL, "checking");

    // create the global list
    _global_code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(50,true);

    // iterate over the stub code descriptors and put them in the list first.
    int index = 0;
    StubCodeDesc* desc;
    while ((desc = StubCodeDesc::desc_for_index(++index)) != NULL) {
        _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
    }

    // next iterate over all the non-nmethod code blobs and add them to
    // the list - as noted above this will filter out duplicates and
    // enclosing blobs.
    CodeCache::blobs_do(do_blob);

    // make the global list the instance list so that it can be used
    // for other iterations.
    _code_blobs = _global_code_blobs;
    _global_code_blobs = NULL;
}
 static CommandLineFlagConstraint* at(int i) { return (_constraints != NULL) ? _constraints->at(i) : NULL; }
 static int length() { return (_constraints != NULL) ? _constraints->length() : 0; }
Exemple #18
0
StackValueCollection* compiledVFrame::locals() const {
  // Natives has no scope
  if (scope() == NULL) return new StackValueCollection(0);
  GrowableArray<ScopeValue*>*  scv_list = scope()->locals();
  if (scv_list == NULL) return new StackValueCollection(0);

  // scv_list is the list of ScopeValues describing the JVM stack state.
  // There is one scv_list entry for every JVM stack state in use.
  int length = scv_list->length();
  StackValueCollection* result = new StackValueCollection(length);
  // In rare instances set_locals may have occurred in which case
  // there are local values that are not described by the ScopeValue anymore
  GrowableArray<jvmtiDeferredLocalVariable*>* deferred = NULL;
  GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread()->deferred_locals();
  if (list != NULL ) {
    // In real life this never happens or is typically a single element search
    for (int i = 0; i < list->length(); i++) {
      if (list->at(i)->matches((vframe*)this)) {
        deferred = list->at(i)->locals();
        break;
      }
    }
  }

  for( int i = 0; i < length; i++ ) {
    result->add( create_stack_value(scv_list->at(i)) );
  }

  // Replace specified locals with any deferred writes that are present
  if (deferred != NULL) {
    for ( int l = 0;  l < deferred->length() ; l ++) {
      jvmtiDeferredLocalVariable* val = deferred->at(l);
      switch (val->type()) {
      case T_BOOLEAN:
        result->set_int_at(val->index(), val->value().z);
        break;
      case T_CHAR:
        result->set_int_at(val->index(), val->value().c);
        break;
      case T_FLOAT:
        result->set_float_at(val->index(), val->value().f);
        break;
      case T_DOUBLE:
        result->set_double_at(val->index(), val->value().d);
        break;
      case T_BYTE:
        result->set_int_at(val->index(), val->value().b);
        break;
      case T_SHORT:
        result->set_int_at(val->index(), val->value().s);
        break;
      case T_INT:
        result->set_int_at(val->index(), val->value().i);
        break;
      case T_LONG:
        result->set_long_at(val->index(), val->value().j);
        break;
      case T_OBJECT:
        {
          Handle obj((oop)val->value().l);
          result->set_obj_at(val->index(), obj);
        }
        break;
      default:
        ShouldNotReachHere();
      }
    }
  }

  return result;
}
Exemple #19
0
void compiledVFrame::update_local(BasicType type, int index, jvalue value) {

#ifdef ASSERT

  assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization");
#endif /* ASSERT */
  GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = thread()->deferred_locals();
  if (deferred != NULL ) {
    // See if this vframe has already had locals with deferred writes
    int f;
    for ( f = 0 ; f < deferred->length() ; f++ ) {
      if (deferred->at(f)->matches(this)) {
        // Matching, vframe now see if the local already had deferred write
        GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals();
        int l;
        for (l = 0 ; l < locals->length() ; l++ ) {
          if (locals->at(l)->index() == index) {
            locals->at(l)->set_value(value);
            return;
          }
        }
        // No matching local already present. Push a new value onto the deferred collection
        locals->push(new jvmtiDeferredLocalVariable(index, type, value));
        return;
      }
    }
    // No matching vframe must push a new vframe
  } else {
    // No deferred updates pending for this thread.
    // allocate in C heap
    deferred =  new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true);
    thread()->set_deferred_locals(deferred);
  }
  deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr().id()));
  assert(deferred->top()->id() == fr().id(), "Huh? Must match");
  deferred->top()->set_local_at(index, type, value);
}
 JvmtiEnvThreadState *env_thread_state(int index) {
     return _env_thread_states->at(index);
 }
 int env_count() {
     return _env_thread_states->length();
 }
void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, GrowableArray<ciBlock *> &successors) {

  blk->set_processed();
  ciBytecodeStream s(method());
  int limit_bci = blk->limit_bci();
  bool fall_through = false;
  ArgumentMap allocated_obj;
  allocated_obj.add_allocated();
  ArgumentMap unknown_obj;
  unknown_obj.add_unknown();
  ArgumentMap empty_map;

  s.reset_to_bci(blk->start_bci());
  while (s.next() != ciBytecodeStream::EOBC() && s.cur_bci() < limit_bci) {
    fall_through = true;
    switch (s.cur_bc()) {
      case Bytecodes::_nop:
        break;
      case Bytecodes::_aconst_null:
        state.apush(empty_map);
        break;
      case Bytecodes::_iconst_m1:
      case Bytecodes::_iconst_0:
      case Bytecodes::_iconst_1:
      case Bytecodes::_iconst_2:
      case Bytecodes::_iconst_3:
      case Bytecodes::_iconst_4:
      case Bytecodes::_iconst_5:
      case Bytecodes::_fconst_0:
      case Bytecodes::_fconst_1:
      case Bytecodes::_fconst_2:
      case Bytecodes::_bipush:
      case Bytecodes::_sipush:
        state.spush();
        break;
      case Bytecodes::_lconst_0:
      case Bytecodes::_lconst_1:
      case Bytecodes::_dconst_0:
      case Bytecodes::_dconst_1:
        state.lpush();
        break;
      case Bytecodes::_ldc:
      case Bytecodes::_ldc_w:
      case Bytecodes::_ldc2_w:
        if (type2size[s.get_constant().basic_type()] == 1) {
          state.spush();
        } else {
          state.lpush();
        }
        break;
      case Bytecodes::_aload:
        state.apush(state._vars[s.get_index()]);
        break;
      case Bytecodes::_iload:
      case Bytecodes::_fload:
      case Bytecodes::_iload_0:
      case Bytecodes::_iload_1:
      case Bytecodes::_iload_2:
      case Bytecodes::_iload_3:
      case Bytecodes::_fload_0:
      case Bytecodes::_fload_1:
      case Bytecodes::_fload_2:
      case Bytecodes::_fload_3:
        state.spush();
        break;
      case Bytecodes::_lload:
      case Bytecodes::_dload:
      case Bytecodes::_lload_0:
      case Bytecodes::_lload_1:
      case Bytecodes::_lload_2:
      case Bytecodes::_lload_3:
      case Bytecodes::_dload_0:
      case Bytecodes::_dload_1:
      case Bytecodes::_dload_2:
      case Bytecodes::_dload_3:
        state.lpush();
        break;
      case Bytecodes::_aload_0:
        state.apush(state._vars[0]);
        break;
      case Bytecodes::_aload_1:
        state.apush(state._vars[1]);
        break;
      case Bytecodes::_aload_2:
        state.apush(state._vars[2]);
        break;
      case Bytecodes::_aload_3:
        state.apush(state._vars[3]);
        break;
      case Bytecodes::_iaload:
      case Bytecodes::_faload:
      case Bytecodes::_baload:
      case Bytecodes::_caload:
      case Bytecodes::_saload:
        state.spop();
        set_method_escape(state.apop());
        state.spush();
        break;
      case Bytecodes::_laload:
      case Bytecodes::_daload:
        state.spop();
        set_method_escape(state.apop());
        state.lpush();
        break;
      case Bytecodes::_aaload:
        { state.spop();
          ArgumentMap array = state.apop();
          set_method_escape(array);
          state.apush(unknown_obj);
          set_dirty(array);
        }
        break;
      case Bytecodes::_istore:
      case Bytecodes::_fstore:
      case Bytecodes::_istore_0:
      case Bytecodes::_istore_1:
      case Bytecodes::_istore_2:
      case Bytecodes::_istore_3:
      case Bytecodes::_fstore_0:
      case Bytecodes::_fstore_1:
      case Bytecodes::_fstore_2:
      case Bytecodes::_fstore_3:
        state.spop();
        break;
      case Bytecodes::_lstore:
      case Bytecodes::_dstore:
      case Bytecodes::_lstore_0:
      case Bytecodes::_lstore_1:
      case Bytecodes::_lstore_2:
      case Bytecodes::_lstore_3:
      case Bytecodes::_dstore_0:
      case Bytecodes::_dstore_1:
      case Bytecodes::_dstore_2:
      case Bytecodes::_dstore_3:
        state.lpop();
        break;
      case Bytecodes::_astore:
        state._vars[s.get_index()] = state.apop();
        break;
      case Bytecodes::_astore_0:
        state._vars[0] = state.apop();
        break;
      case Bytecodes::_astore_1:
        state._vars[1] = state.apop();
        break;
      case Bytecodes::_astore_2:
        state._vars[2] = state.apop();
        break;
      case Bytecodes::_astore_3:
        state._vars[3] = state.apop();
        break;
      case Bytecodes::_iastore:
      case Bytecodes::_fastore:
      case Bytecodes::_bastore:
      case Bytecodes::_castore:
      case Bytecodes::_sastore:
      {
        state.spop();
        state.spop();
        ArgumentMap arr = state.apop();
        set_method_escape(arr);
        break;
      }
      case Bytecodes::_lastore:
      case Bytecodes::_dastore:
      {
        state.lpop();
        state.spop();
        ArgumentMap arr = state.apop();
        set_method_escape(arr);
        break;
      }
      case Bytecodes::_aastore:
      {
        set_global_escape(state.apop());
        state.spop();
        ArgumentMap arr = state.apop();
        break;
      }
      case Bytecodes::_pop:
        state.raw_pop();
        break;
      case Bytecodes::_pop2:
        state.raw_pop();
        state.raw_pop();
        break;
      case Bytecodes::_dup:
        { ArgumentMap w1 = state.raw_pop();
          state.raw_push(w1);
          state.raw_push(w1);
        }
        break;
      case Bytecodes::_dup_x1:
        { ArgumentMap w1 = state.raw_pop();
          ArgumentMap w2 = state.raw_pop();
          state.raw_push(w1);
          state.raw_push(w2);
          state.raw_push(w1);
        }
        break;
      case Bytecodes::_dup_x2:
        { ArgumentMap w1 = state.raw_pop();
          ArgumentMap w2 = state.raw_pop();
          ArgumentMap w3 = state.raw_pop();
          state.raw_push(w1);
          state.raw_push(w3);
          state.raw_push(w2);
          state.raw_push(w1);
        }
        break;
      case Bytecodes::_dup2:
        { ArgumentMap w1 = state.raw_pop();
          ArgumentMap w2 = state.raw_pop();
          state.raw_push(w2);
          state.raw_push(w1);
          state.raw_push(w2);
          state.raw_push(w1);
        }
        break;
      case Bytecodes::_dup2_x1:
        { ArgumentMap w1 = state.raw_pop();
          ArgumentMap w2 = state.raw_pop();
          ArgumentMap w3 = state.raw_pop();
          state.raw_push(w2);
          state.raw_push(w1);
          state.raw_push(w3);
          state.raw_push(w2);
          state.raw_push(w1);
        }
        break;
      case Bytecodes::_dup2_x2:
        { ArgumentMap w1 = state.raw_pop();
          ArgumentMap w2 = state.raw_pop();
          ArgumentMap w3 = state.raw_pop();
          ArgumentMap w4 = state.raw_pop();
          state.raw_push(w2);
          state.raw_push(w1);
          state.raw_push(w4);
          state.raw_push(w3);
          state.raw_push(w2);
          state.raw_push(w1);
        }
        break;
      case Bytecodes::_swap:
        { ArgumentMap w1 = state.raw_pop();
          ArgumentMap w2 = state.raw_pop();
          state.raw_push(w1);
          state.raw_push(w2);
        }
        break;
      case Bytecodes::_iadd:
      case Bytecodes::_fadd:
      case Bytecodes::_isub:
      case Bytecodes::_fsub:
      case Bytecodes::_imul:
      case Bytecodes::_fmul:
      case Bytecodes::_idiv:
      case Bytecodes::_fdiv:
      case Bytecodes::_irem:
      case Bytecodes::_frem:
      case Bytecodes::_iand:
      case Bytecodes::_ior:
      case Bytecodes::_ixor:
        state.spop();
        state.spop();
        state.spush();
        break;
      case Bytecodes::_ladd:
      case Bytecodes::_dadd:
      case Bytecodes::_lsub:
      case Bytecodes::_dsub:
      case Bytecodes::_lmul:
      case Bytecodes::_dmul:
      case Bytecodes::_ldiv:
      case Bytecodes::_ddiv:
      case Bytecodes::_lrem:
      case Bytecodes::_drem:
      case Bytecodes::_land:
      case Bytecodes::_lor:
      case Bytecodes::_lxor:
        state.lpop();
        state.lpop();
        state.lpush();
        break;
      case Bytecodes::_ishl:
      case Bytecodes::_ishr:
      case Bytecodes::_iushr:
        state.spop();
        state.spop();
        state.spush();
        break;
      case Bytecodes::_lshl:
      case Bytecodes::_lshr:
      case Bytecodes::_lushr:
        state.spop();
        state.lpop();
        state.lpush();
        break;
      case Bytecodes::_ineg:
      case Bytecodes::_fneg:
        state.spop();
        state.spush();
        break;
      case Bytecodes::_lneg:
      case Bytecodes::_dneg:
        state.lpop();
        state.lpush();
        break;
      case Bytecodes::_iinc:
        break;
      case Bytecodes::_i2l:
      case Bytecodes::_i2d:
      case Bytecodes::_f2l:
      case Bytecodes::_f2d:
        state.spop();
        state.lpush();
        break;
      case Bytecodes::_i2f:
      case Bytecodes::_f2i:
        state.spop();
        state.spush();
        break;
      case Bytecodes::_l2i:
      case Bytecodes::_l2f:
      case Bytecodes::_d2i:
      case Bytecodes::_d2f:
        state.lpop();
        state.spush();
        break;
      case Bytecodes::_l2d:
      case Bytecodes::_d2l:
        state.lpop();
        state.lpush();
        break;
      case Bytecodes::_i2b:
      case Bytecodes::_i2c:
      case Bytecodes::_i2s:
        state.spop();
        state.spush();
        break;
      case Bytecodes::_lcmp:
      case Bytecodes::_dcmpl:
      case Bytecodes::_dcmpg:
        state.lpop();
        state.lpop();
        state.spush();
        break;
      case Bytecodes::_fcmpl:
      case Bytecodes::_fcmpg:
        state.spop();
        state.spop();
        state.spush();
        break;
      case Bytecodes::_ifeq:
      case Bytecodes::_ifne:
      case Bytecodes::_iflt:
      case Bytecodes::_ifge:
      case Bytecodes::_ifgt:
      case Bytecodes::_ifle:
      {
        state.spop();
        int dest_bci = s.get_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        successors.push(_methodBlocks->block_containing(dest_bci));
        break;
      }
      case Bytecodes::_if_icmpeq:
      case Bytecodes::_if_icmpne:
      case Bytecodes::_if_icmplt:
      case Bytecodes::_if_icmpge:
      case Bytecodes::_if_icmpgt:
      case Bytecodes::_if_icmple:
      {
        state.spop();
        state.spop();
        int dest_bci = s.get_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        successors.push(_methodBlocks->block_containing(dest_bci));
        break;
      }
      case Bytecodes::_if_acmpeq:
      case Bytecodes::_if_acmpne:
      {
        set_method_escape(state.apop());
        set_method_escape(state.apop());
        int dest_bci = s.get_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        successors.push(_methodBlocks->block_containing(dest_bci));
        break;
      }
      case Bytecodes::_goto:
      {
        int dest_bci = s.get_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        successors.push(_methodBlocks->block_containing(dest_bci));
        fall_through = false;
        break;
      }
      case Bytecodes::_jsr:
      {
        int dest_bci = s.get_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        state.apush(empty_map);
        successors.push(_methodBlocks->block_containing(dest_bci));
        fall_through = false;
        break;
      }
      case Bytecodes::_ret:
        // we don't track  the destination of a "ret" instruction
        assert(s.next_bci() == limit_bci, "branch must end block");
        fall_through = false;
        break;
      case Bytecodes::_return:
        assert(s.next_bci() == limit_bci, "return must end block");
        fall_through = false;
        break;
      case Bytecodes::_tableswitch:
        {
          state.spop();
          Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp());
          int len = switch_->length();
          int dest_bci;
          for (int i = 0; i < len; i++) {
            dest_bci = s.cur_bci() + switch_->dest_offset_at(i);
            assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
            successors.push(_methodBlocks->block_containing(dest_bci));
          }
          dest_bci = s.cur_bci() + switch_->default_offset();
          assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
          successors.push(_methodBlocks->block_containing(dest_bci));
          assert(s.next_bci() == limit_bci, "branch must end block");
          fall_through = false;
          break;
        }
      case Bytecodes::_lookupswitch:
        {
          state.spop();
          Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
          int len = switch_->number_of_pairs();
          int dest_bci;
          for (int i = 0; i < len; i++) {
            dest_bci = s.cur_bci() + switch_->pair_at(i)->offset();
            assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
            successors.push(_methodBlocks->block_containing(dest_bci));
          }
          dest_bci = s.cur_bci() + switch_->default_offset();
          assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
          successors.push(_methodBlocks->block_containing(dest_bci));
          fall_through = false;
          break;
        }
      case Bytecodes::_ireturn:
      case Bytecodes::_freturn:
        state.spop();
        fall_through = false;
        break;
      case Bytecodes::_lreturn:
      case Bytecodes::_dreturn:
        state.lpop();
        fall_through = false;
        break;
      case Bytecodes::_areturn:
        set_returned(state.apop());
        fall_through = false;
        break;
      case Bytecodes::_getstatic:
      case Bytecodes::_getfield:
        { bool will_link;
          ciField* field = s.get_field(will_link);
          BasicType field_type = field->type()->basic_type();
          if (s.cur_bc() != Bytecodes::_getstatic) {
            set_method_escape(state.apop());
          }
          if (field_type == T_OBJECT || field_type == T_ARRAY) {
            state.apush(unknown_obj);
          } else if (type2size[field_type] == 1) {
            state.spush();
          } else {
            state.lpush();
          }
        }
        break;
      case Bytecodes::_putstatic:
      case Bytecodes::_putfield:
        { bool will_link;
          ciField* field = s.get_field(will_link);
          BasicType field_type = field->type()->basic_type();
          if (field_type == T_OBJECT || field_type == T_ARRAY) {
            set_global_escape(state.apop());
          } else if (type2size[field_type] == 1) {
            state.spop();
          } else {
            state.lpop();
          }
          if (s.cur_bc() != Bytecodes::_putstatic) {
            ArgumentMap p = state.apop();
            set_method_escape(p);
          }
        }
        break;
      case Bytecodes::_invokevirtual:
      case Bytecodes::_invokespecial:
      case Bytecodes::_invokestatic:
      case Bytecodes::_invokeinterface:
        { bool will_link;
          ciMethod* target = s.get_method(will_link);
          ciKlass* holder = s.get_declared_method_holder();
          invoke(state, s.cur_bc(), target, holder);
          ciType* return_type = target->return_type();
          if (!return_type->is_primitive_type()) {
            state.apush(unknown_obj);
          } else if (return_type->is_one_word()) {
            state.spush();
          } else if (return_type->is_two_word()) {
            state.lpush();
          }
        }
        break;
      case Bytecodes::_xxxunusedxxx:
        ShouldNotReachHere();
        break;
      case Bytecodes::_new:
        state.apush(allocated_obj);
        break;
      case Bytecodes::_newarray:
      case Bytecodes::_anewarray:
        state.spop();
        state.apush(allocated_obj);
        break;
      case Bytecodes::_multianewarray:
        { int i = s.cur_bcp()[3];
          while (i-- > 0) state.spop();
          state.apush(allocated_obj);
        }
        break;
      case Bytecodes::_arraylength:
        set_method_escape(state.apop());
        state.spush();
        break;
      case Bytecodes::_athrow:
        set_global_escape(state.apop());
        fall_through = false;
        break;
      case Bytecodes::_checkcast:
        { ArgumentMap obj = state.apop();
          set_method_escape(obj);
          state.apush(obj);
        }
        break;
      case Bytecodes::_instanceof:
        set_method_escape(state.apop());
        state.spush();
        break;
      case Bytecodes::_monitorenter:
      case Bytecodes::_monitorexit:
        state.apop();
        break;
      case Bytecodes::_wide:
        ShouldNotReachHere();
        break;
      case Bytecodes::_ifnull:
      case Bytecodes::_ifnonnull:
      {
        set_method_escape(state.apop());
        int dest_bci = s.get_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        successors.push(_methodBlocks->block_containing(dest_bci));
        break;
      }
      case Bytecodes::_goto_w:
      {
        int dest_bci = s.get_far_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        successors.push(_methodBlocks->block_containing(dest_bci));
        fall_through = false;
        break;
      }
      case Bytecodes::_jsr_w:
      {
        int dest_bci = s.get_far_dest();
        assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
        assert(s.next_bci() == limit_bci, "branch must end block");
        state.apush(empty_map);
        successors.push(_methodBlocks->block_containing(dest_bci));
        fall_through = false;
        break;
      }
      case Bytecodes::_breakpoint:
        break;
      default:
        ShouldNotReachHere();
        break;
    }

  }
  if (fall_through) {
    int fall_through_bci = s.cur_bci();
    if (fall_through_bci < _method->code_size()) {
      assert(_methodBlocks->is_block_start(fall_through_bci), "must fall through to block start.");
      successors.push(_methodBlocks->block_containing(fall_through_bci));
    }
  }
}
Exemple #23
0
void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
  ResourceMark rm;

  // If this is the first frame, and java.lang.Object.wait(...) then print out the receiver.
  if (frame_count == 0) {
    if (method()->name() == vmSymbols::wait_name() &&
        method()->method_holder()->name() == vmSymbols::java_lang_Object()) {
      StackValueCollection* locs = locals();
      if (!locs->is_empty()) {
        StackValue* sv = locs->at(0);
        if (sv->type() == T_OBJECT) {
          Handle o = locs->at(0)->get_obj();
          print_locked_object_class_name(st, o, "waiting on");
        }
      }
    } else if (thread()->current_park_blocker() != NULL) {
      oop obj = thread()->current_park_blocker();
      Klass* k = obj->klass();
      st->print_cr("\t- %s <" INTPTR_FORMAT "> (a %s)", "parking to wait for ", (address)obj, k->external_name());
    }
  }


  // Print out all monitors that we have locked or are trying to lock
  GrowableArray<MonitorInfo*>* mons = monitors();
  if (!mons->is_empty()) {
    bool found_first_monitor = false;
    for (int index = (mons->length()-1); index >= 0; index--) {
      MonitorInfo* monitor = mons->at(index);
      if (monitor->eliminated() && is_compiled_frame()) { // Eliminated in compiled code
        if (monitor->owner_is_scalar_replaced()) {
          Klass* k = java_lang_Class::as_Klass(monitor->owner_klass());
          st->print("\t- eliminated <owner is scalar replaced> (a %s)", k->external_name());
        } else {
          oop obj = monitor->owner();
          if (obj != NULL) {
            print_locked_object_class_name(st, obj, "eliminated");
          }
        }
        continue;
      }
      if (monitor->owner() != NULL) {

        // First, assume we have the monitor locked. If we haven't found an
        // owned monitor before and this is the first frame, then we need to
        // see if we have completed the lock or we are blocked trying to
        // acquire it - we can only be blocked if the monitor is inflated

        const char *lock_state = "locked"; // assume we have the monitor locked
        if (!found_first_monitor && frame_count == 0) {
          markOop mark = monitor->owner()->mark();
          if (mark->has_monitor() &&
              mark->monitor() == thread()->current_pending_monitor()) {
            lock_state = "waiting to lock";
          }
        }

        found_first_monitor = true;
        print_locked_object_class_name(st, monitor->owner(), lock_state);
      }
    }
  }
}
Exemple #24
0
 instanceHandle get_threadObj(int index) { return _threads_array->at(index); }
Exemple #25
0
//------------------------------sched_call-------------------------------------
uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
  RegMask regs;

  // Schedule all the users of the call right now.  All the users are
  // projection Nodes, so they must be scheduled next to the call.
  // Collect all the defined registers.
  for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
    Node* n = mcall->fast_out(i);
    assert( n->is_MachProj(), "" );
    int n_cnt = ready_cnt.at(n->_idx)-1;
    ready_cnt.at_put(n->_idx, n_cnt);
    assert( n_cnt == 0, "" );
    // Schedule next to call
    block->map_node(n, node_cnt++);
    // Collect defined registers
    regs.OR(n->out_RegMask());
    // Check for scheduling the next control-definer
    if( n->bottom_type() == Type::CONTROL )
      // Warm up next pile of heuristic bits
      needed_for_next_call(block, n, next_call);

    // Children of projections are now all ready
    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
      Node* m = n->fast_out(j); // Get user
      if(get_block_for_node(m) != block) {
        continue;
      }
      if( m->is_Phi() ) continue;
      int m_cnt = ready_cnt.at(m->_idx)-1;
      ready_cnt.at_put(m->_idx, m_cnt);
      if( m_cnt == 0 )
        worklist.push(m);
    }

  }

  // Act as if the call defines the Frame Pointer.
  // Certainly the FP is alive and well after the call.
  regs.Insert(_matcher.c_frame_pointer());

  // Set all registers killed and not already defined by the call.
  uint r_cnt = mcall->tf()->range()->cnt();
  int op = mcall->ideal_Opcode();
  MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
  map_node_to_block(proj, block);
  block->insert_node(proj, node_cnt++);

  // Select the right register save policy.
  const char * save_policy;
  switch (op) {
    case Op_CallRuntime:
    case Op_CallLeaf:
    case Op_CallLeafNoFP:
      // Calling C code so use C calling convention
      save_policy = _matcher._c_reg_save_policy;
      break;

    case Op_CallStaticJava:
    case Op_CallDynamicJava:
      // Calling Java code so use Java calling convention
      save_policy = _matcher._register_save_policy;
      break;

    default:
      ShouldNotReachHere();
  }

  // When using CallRuntime mark SOE registers as killed by the call
  // so values that could show up in the RegisterMap aren't live in a
  // callee saved register since the register wouldn't know where to
  // find them.  CallLeaf and CallLeafNoFP are ok because they can't
  // have debug info on them.  Strictly speaking this only needs to be
  // done for oops since idealreg2debugmask takes care of debug info
  // references but there no way to handle oops differently than other
  // pointers as far as the kill mask goes.
  bool exclude_soe = op == Op_CallRuntime;

  // If the call is a MethodHandle invoke, we need to exclude the
  // register which is used to save the SP value over MH invokes from
  // the mask.  Otherwise this register could be used for
  // deoptimization information.
  if (op == Op_CallStaticJava) {
    MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
    if (mcallstaticjava->_method_handle_invoke)
      proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
  }

  add_call_kills(proj, regs, save_policy, exclude_soe);

  return node_cnt;
}
 static void add(CommandLineFlagConstraint* constraint) { _constraints->append(constraint); }
Exemple #27
0
 State state() {
     return (State)(_state->top());
 }
Exemple #28
0
 int            num_threads()              { return _threads->length(); }
Exemple #29
0
//------------------------------schedule_local---------------------------------
// Topological sort within a block.  Someday become a real scheduler.
bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
  // Already "sorted" are the block start Node (as the first entry), and
  // the block-ending Node and any trailing control projections.  We leave
  // these alone.  PhiNodes and ParmNodes are made to follow the block start
  // Node.  Everything else gets topo-sorted.

#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
      for (uint i = 0;i < block->number_of_nodes(); i++) {
        tty->print("# ");
        block->get_node(i)->fast_dump();
      }
      tty->print_cr("#");
    }
#endif

  // RootNode is already sorted
  if (block->number_of_nodes() == 1) {
    return true;
  }

  // Move PhiNodes and ParmNodes from 1 to cnt up to the start
  uint node_cnt = block->end_idx();
  uint phi_cnt = 1;
  uint i;
  for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
    Node *n = block->get_node(i);
    if( n->is_Phi() ||          // Found a PhiNode or ParmNode
        (n->is_Proj()  && n->in(0) == block->head()) ) {
      // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
      block->map_node(block->get_node(phi_cnt), i);
      block->map_node(n, phi_cnt++);  // swap Phi/Parm up front
    } else {                    // All others
      // Count block-local inputs to 'n'
      uint cnt = n->len();      // Input count
      uint local = 0;
      for( uint j=0; j<cnt; j++ ) {
        Node *m = n->in(j);
        if( m && get_block_for_node(m) == block && !m->is_top() )
          local++;              // One more block-local input
      }
      ready_cnt.at_put(n->_idx, local); // Count em up

#ifdef ASSERT
      if( UseConcMarkSweepGC || UseG1GC ) {
        if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
          // Check the precedence edges
          for (uint prec = n->req(); prec < n->len(); prec++) {
            Node* oop_store = n->in(prec);
            if (oop_store != NULL) {
              assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
            }
          }
        }
      }
#endif

      // A few node types require changing a required edge to a precedence edge
      // before allocation.
      if( n->is_Mach() && n->req() > TypeFunc::Parms &&
          (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
           n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
        // MemBarAcquire could be created without Precedent edge.
        // del_req() replaces the specified edge with the last input edge
        // and then removes the last edge. If the specified edge > number of
        // edges the last edge will be moved outside of the input edges array
        // and the edge will be lost. This is why this code should be
        // executed only when Precedent (== TypeFunc::Parms) edge is present.
        Node *x = n->in(TypeFunc::Parms);
        n->del_req(TypeFunc::Parms);
        n->add_prec(x);
      }
    }
  }
  for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
    ready_cnt.at_put(block->get_node(i2)->_idx, 0);

  // All the prescheduled guys do not hold back internal nodes
  uint i3;
  for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
    Node *n = block->get_node(i3);       // Get pre-scheduled
    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
      Node* m = n->fast_out(j);
      if (get_block_for_node(m) == block) { // Local-block user
        int m_cnt = ready_cnt.at(m->_idx)-1;
        ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
      }
    }
  }

  Node_List delay;
  // Make a worklist
  Node_List worklist;
  for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
    Node *m = block->get_node(i4);
    if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
      if (m->is_iteratively_computed()) {
        // Push induction variable increments last to allow other uses
        // of the phi to be scheduled first. The select() method breaks
        // ties in scheduling by worklist order.
        delay.push(m);
      } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) {
        // Force the CreateEx to the top of the list so it's processed
        // first and ends up at the start of the block.
        worklist.insert(0, m);
      } else {
        worklist.push(m);         // Then on to worklist!
      }
    }
  }
  while (delay.size()) {
    Node* d = delay.pop();
    worklist.push(d);
  }

  // Warm up the 'next_call' heuristic bits
  needed_for_next_call(block, block->head(), next_call);

#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      for (uint j=0; j< block->number_of_nodes(); j++) {
        Node     *n = block->get_node(j);
        int     idx = n->_idx;
        tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
        tty->print("latency:%3d  ", get_latency_for_node(n));
        tty->print("%4d: %s\n", idx, n->Name());
      }
    }
#endif

  uint max_idx = (uint)ready_cnt.length();
  // Pull from worklist and schedule
  while( worklist.size() ) {    // Worklist is not ready

#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      tty->print("#   ready list:");
      for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
        Node *n = worklist[i];      // Get Node on worklist
        tty->print(" %d", n->_idx);
      }
      tty->cr();
    }
#endif

    // Select and pop a ready guy from worklist
    Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
    block->map_node(n, phi_cnt++);    // Schedule him next

#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      tty->print("#    select %d: %s", n->_idx, n->Name());
      tty->print(", latency:%d", get_latency_for_node(n));
      n->dump();
      if (Verbose) {
        tty->print("#   ready list:");
        for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
          Node *n = worklist[i];      // Get Node on worklist
          tty->print(" %d", n->_idx);
        }
        tty->cr();
      }
    }

#endif
    if( n->is_MachCall() ) {
      MachCallNode *mcall = n->as_MachCall();
      phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
      continue;
    }

    if (n->is_Mach() && n->as_Mach()->has_call()) {
      RegMask regs;
      regs.Insert(_matcher.c_frame_pointer());
      regs.OR(n->out_RegMask());

      MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
      map_node_to_block(proj, block);
      block->insert_node(proj, phi_cnt++);

      add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
    }

    // Children are now all ready
    for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
      Node* m = n->fast_out(i5); // Get user
      if (get_block_for_node(m) != block) {
        continue;
      }
      if( m->is_Phi() ) continue;
      if (m->_idx >= max_idx) { // new node, skip it
        assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
        continue;
      }
      int m_cnt = ready_cnt.at(m->_idx)-1;
      ready_cnt.at_put(m->_idx, m_cnt);
      if( m_cnt == 0 )
        worklist.push(m);
    }
  }

  if( phi_cnt != block->end_idx() ) {
    // did not schedule all.  Retry, Bailout, or Die
    if (C->subsume_loads() == true && !C->failing()) {
      // Retry with subsume_loads == false
      // If this is the first failure, the sentinel string will "stick"
      // to the Compile object, and the C2Compiler will see it and retry.
      C->record_failure(C2Compiler::retry_no_subsuming_loads());
    }
    // assert( phi_cnt == end_idx(), "did not schedule all" );
    return false;
  }

#ifndef PRODUCT
  if (trace_opto_pipelining()) {
    tty->print_cr("#");
    tty->print_cr("# after schedule_local");
    for (uint i = 0;i < block->number_of_nodes();i++) {
      tty->print("# ");
      block->get_node(i)->fast_dump();
    }
    tty->cr();
  }
#endif


  return true;
}
Exemple #30
0
 int            num_threads()            { return _threads_array->length(); }