Пример #1
0
//---------------------------clone_deep----------------------------------------
JVMState* JVMState::clone_deep() const {
    JVMState* n = clone_shallow();
    for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
        p->_caller = p->_caller->clone_shallow();
    }
    assert(n->depth() == depth(), "sanity");
    assert(n->debug_depth() == debug_depth(), "sanity");
    return n;
}
Пример #2
0
//--------------------------clone_shallow--------------------------------------
JVMState* JVMState::clone_shallow() const {
    JVMState* n = has_method() ? new JVMState(_method, _caller) : new JVMState(0);
    n->set_bci(_bci);
    n->set_locoff(_locoff);
    n->set_stkoff(_stkoff);
    n->set_monoff(_monoff);
    n->set_endoff(_endoff);
    n->set_sp(_sp);
    n->set_map(_map);
    return n;
}
Пример #3
0
//------------------------------verify_base_ptrs-------------------------------
// Verify that base pointers and derived pointers are still sane.
// Basically, if a derived pointer is live at a safepoint, then its
// base pointer must be live also.
void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
    Block *b = _cfg._blocks[i];
    for( uint j = b->end_idx() + 1; j > 1; j-- ) {
      Node *n = b->_nodes[j-1];
      if( n->is_Phi() ) break;
      MachNode *mach = n->is_Mach();
      MachSafePointNode *sfpt = (mach != NULL) ? mach->is_MachSafePoint() : NULL;
      // Found a safepoint?
      if( sfpt != NULL ) {
        JVMState* jvms = sfpt->jvms();
        if (jvms != NULL) {
          // Now scan for a live derived pointer
          if (jvms->oopoff() < sfpt->req()) {
            // Check each derived/base pair
            for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx += 2) {
              Node *check = sfpt->in(idx);
              uint j = 0;
              // search upwards through spills and spill phis for AddP
              while(true) {
                if( !check ) break;
                int idx = check->is_Copy();
                if( idx ) {
                  check = check->in(idx);
                } else if( check->is_Phi() && check->_idx >= _oldphi ) {
                  check = check->in(1);
                } else
                  break;
                j++;
                assert(j < 100000,"Derived pointer checking in infinite loop");
              } // End while
              MachNode *machcheck = check->is_Mach();
              assert(machcheck && machcheck->ideal_Opcode() == Op_AddP,"Bad derived pointer")
            }
          } // End of check for derived pointers
        } // End of Kcheck for debug info
      } // End of if found a safepoint
    } // End of forall instructions in block
  } // End of forall blocks
//-----------------------------try_to_inline-----------------------------------
// return true if ok
// Relocated from "InliningClosure::try_to_inline"
bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
                               int caller_bci, JVMState* jvms, ciCallProfile& profile,
                               WarmCallInfo* wci_result, bool& should_delay) {

  if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) {
    if (!callee_method->force_inline() || !IncrementalInline) {
      set_msg("size > DesiredMethodLimit");
      return false;
    } else if (!C->inlining_incrementally()) {
      should_delay = true;
    }
  }

  _forced_inline = false; // Reset
  if (!should_inline(callee_method, caller_method, caller_bci, profile,
                     wci_result)) {
    return false;
  }
  if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
    return false;
  }

  if (InlineAccessors && callee_method->is_accessor()) {
    // accessor methods are not subject to any of the following limits.
    set_msg("accessor");
    return true;
  }

  // suppress a few checks for accessors and trivial methods
  if (callee_method->code_size() > MaxTrivialSize) {

    // don't inline into giant methods
    if (C->over_inlining_cutoff()) {
      if ((!callee_method->force_inline() && !caller_method->is_compiled_lambda_form())
          || !IncrementalInline) {
        set_msg("NodeCountInliningCutoff");
        return false;
      } else {
        should_delay = true;
      }
    }

    if ((!UseInterpreter || CompileTheWorld) &&
        is_init_with_ea(callee_method, caller_method, C)) {
      // Escape Analysis stress testing when running Xcomp or CTW:
      // inline constructors even if they are not reached.
    } else if (forced_inline()) {
      // Inlining was forced by CompilerOracle, ciReplay or annotation
    } else if (profile.count() == 0) {
      // don't inline unreached call sites
       set_msg("call site not reached");
       return false;
    }
  }

  if (!C->do_inlining() && InlineAccessors) {
    set_msg("not an accessor");
    return false;
  }

  // Limit inlining depth in case inlining is forced or
  // _max_inline_level was increased to compensate for lambda forms.
  if (inline_level() > MaxForceInlineLevel) {
    set_msg("MaxForceInlineLevel");
    return false;
  }
  if (inline_level() > _max_inline_level) {
    if (!callee_method->force_inline() || !IncrementalInline) {
      set_msg("inlining too deep");
      return false;
    } else if (!C->inlining_incrementally()) {
      should_delay = true;
    }
  }

  // detect direct and indirect recursive inlining
  {
    // count the current method and the callee
    const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
    int inline_level = 0;
    if (!is_compiled_lambda_form) {
      if (method() == callee_method) {
        inline_level++;
      }
    }
    // count callers of current method and callee
    Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
    for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
      if (j->method() == callee_method) {
        if (is_compiled_lambda_form) {
          // Since compiled lambda forms are heavily reused we allow recursive inlining.  If it is truly
          // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
          // compiler stack.
          Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
          if (caller_argument0 == callee_argument0) {
            inline_level++;
          }
        } else {
          inline_level++;
        }
      }
    }
    if (inline_level > MaxRecursiveInlineLevel) {
      set_msg("recursive inlining is too deep");
      return false;
    }
  }

  int size = callee_method->code_size_for_inlining();

  if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
    if (!callee_method->force_inline() || !IncrementalInline) {
      set_msg("size > DesiredMethodLimit");
      return false;
    } else if (!C->inlining_incrementally()) {
      should_delay = true;
    }
  }

  // ok, inline this method
  return true;
}
//--------------------gen_stub-------------------------------
void GraphKit::gen_stub(address C_function,
                        const char *name,
                        int is_fancy_jump,
                        bool pass_tls,
                        bool return_pc) {
  ResourceMark rm;

  const TypeTuple *jdomain = C->tf()->domain();
  const TypeTuple *jrange  = C->tf()->range();

  // The procedure start
  StartNode* start = new (C) StartNode(root(), jdomain);
  _gvn.set_type_bottom(start);

  // Make a map, with JVM state
  uint parm_cnt = jdomain->cnt();
  uint max_map = MAX2(2*parm_cnt+1, jrange->cnt());
  // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
  assert(SynchronizationEntryBCI == InvocationEntryBci, "");
  JVMState* jvms = new (C) JVMState(0);
  jvms->set_bci(InvocationEntryBci);
  jvms->set_monoff(max_map);
  jvms->set_scloff(max_map);
  jvms->set_endoff(max_map);
  {
    SafePointNode *map = new (C) SafePointNode( max_map, jvms );
    jvms->set_map(map);
    set_jvms(jvms);
    assert(map == this->map(), "kit.map is set");
  }

  // Make up the parameters
  uint i;
  for( i = 0; i < parm_cnt; i++ )
    map()->init_req(i, _gvn.transform(new (C) ParmNode(start, i)));
  for( ; i<map()->req(); i++ )
    map()->init_req(i, top());      // For nicer debugging

  // GraphKit requires memory to be a MergeMemNode:
  set_all_memory(map()->memory());

  // Get base of thread-local storage area
  Node* thread = _gvn.transform( new (C) ThreadLocalNode() );

  const int NoAlias = Compile::AliasIdxBot;

  Node* adr_last_Java_pc = basic_plus_adr(top(),
                                            thread,
                                            in_bytes(JavaThread::frame_anchor_offset()) +
                                            in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
#if defined(SPARC)
  Node* adr_flags = basic_plus_adr(top(),
                                   thread,
                                   in_bytes(JavaThread::frame_anchor_offset()) +
                                   in_bytes(JavaFrameAnchor::flags_offset()));
#endif /* defined(SPARC) */


  // Drop in the last_Java_sp.  last_Java_fp is not touched.
  // Always do this after the other "last_Java_frame" fields are set since
  // as soon as last_Java_sp != NULL the has_last_Java_frame is true and
  // users will look at the other fields.
  //
  Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
  Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
  store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias);

  // Set _thread_in_native
  // The order of stores into TLS is critical!  Setting _thread_in_native MUST
  // be last, because a GC is allowed at any time after setting it and the GC
  // will require last_Java_pc and last_Java_sp.
  Node* adr_state = basic_plus_adr(top(), thread, in_bytes(JavaThread::thread_state_offset()));

  //-----------------------------
  // Compute signature for C call.  Varies from the Java signature!
  const Type **fields = TypeTuple::fields(2*parm_cnt+2);
  uint cnt = TypeFunc::Parms;
  // The C routines gets the base of thread-local storage passed in as an
  // extra argument.  Not all calls need it, but its cheap to add here.
  for( ; cnt<parm_cnt; cnt++ )
    fields[cnt] = jdomain->field_at(cnt);
  fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
  // Also pass in the caller's PC, if asked for.
  if( return_pc )
    fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC

  const TypeTuple* domain = TypeTuple::make(cnt,fields);
  // The C routine we are about to call cannot return an oop; it can block on
  // exit and a GC will trash the oop while it sits in C-land.  Instead, we
  // return the oop through TLS for runtime calls.
  // Also, C routines returning integer subword values leave the high
  // order bits dirty; these must be cleaned up by explicit sign extension.
  const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms);
  // Make a private copy of jrange->fields();
  const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms);
  // Fixup oop returns
  int retval_ptr = retval->isa_oop_ptr();
  if( retval_ptr ) {
    assert( pass_tls, "Oop must be returned thru TLS" );
    // Fancy-jumps return address; others return void
    rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP;

  } else if( retval->isa_int() ) { // Returning any integer subtype?
    // "Fatten" byte, char & short return types to 'int' to show that
    // the native C code can return values with junk high order bits.
    // We'll sign-extend it below later.
    rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext

  } else if( jrange->cnt() >= TypeFunc::Parms+1 ) { // Else copy other types
    rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
    if( jrange->cnt() == TypeFunc::Parms+2 )
      rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
  }
  const TypeTuple* range = TypeTuple::make(jrange->cnt(),rfields);

  // Final C signature
  const TypeFunc *c_sig = TypeFunc::make(domain,range);

  //-----------------------------
  // Make the call node
  CallRuntimeNode *call = new (C)
    CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM);
  //-----------------------------

  // Fix-up the debug info for the call
  call->set_jvms( new (C) JVMState(0) );
  call->jvms()->set_bci(0);
  call->jvms()->set_offsets(cnt);

  // Set fixed predefined input arguments
  cnt = 0;
  for( i=0; i<TypeFunc::Parms; i++ )
    call->init_req( cnt++, map()->in(i) );
  // A little too aggressive on the parm copy; return address is not an input
  call->set_req(TypeFunc::ReturnAdr, top());
  for( ; i<parm_cnt; i++ )    // Regular input arguments
    call->init_req( cnt++, map()->in(i) );

  call->init_req( cnt++, thread );
  if( return_pc )             // Return PC, if asked for
    call->init_req( cnt++, returnadr() );
  _gvn.transform_no_reclaim(call);


  //-----------------------------
  // Now set up the return results
  set_control( _gvn.transform( new (C) ProjNode(call,TypeFunc::Control)) );
  set_i_o(     _gvn.transform( new (C) ProjNode(call,TypeFunc::I_O    )) );
  set_all_memory_call(call);
  if (range->cnt() > TypeFunc::Parms) {
    Node* retnode = _gvn.transform( new (C) ProjNode(call,TypeFunc::Parms) );
    // C-land is allowed to return sub-word values.  Convert to integer type.
    assert( retval != Type::TOP, "" );
    if (retval == TypeInt::BOOL) {
      retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFF)) );
    } else if (retval == TypeInt::CHAR) {
      retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) );
    } else if (retval == TypeInt::BYTE) {
      retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) );
      retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) );
    } else if (retval == TypeInt::SHORT) {
      retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) );
      retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) );
    }
    map()->set_req( TypeFunc::Parms, retnode );
  }

  //-----------------------------

  // Clear last_Java_sp
  store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias);
  // Clear last_Java_pc and (optionally)_flags
  store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias);
#if defined(SPARC)
  store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias);
#endif /* defined(SPARC) */
#ifdef IA64
  Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
  if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease);
  store_to_memory(NULL, adr_last_Java_fp,    null(),    T_ADDRESS, NoAlias);
#endif

  // For is-fancy-jump, the C-return value is also the branch target
  Node* target = map()->in(TypeFunc::Parms);
  // Runtime call returning oop in TLS?  Fetch it out
  if( pass_tls ) {
    Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
    Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);
    map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
    // clear thread-local-storage(tls)
    store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias);
  }

  //-----------------------------
  // check exception
  Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
  Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);

  Node* exit_memory = reset_memory();

  Node* cmp = _gvn.transform( new (C) CmpPNode(pending, null()) );
  Node* bo  = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) );
  IfNode   *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN);

  Node* if_null     = _gvn.transform( new (C) IfFalseNode(iff) );
  Node* if_not_null = _gvn.transform( new (C) IfTrueNode(iff)  );

  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
  Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() ));
  Node *to_exc = new (C) TailCallNode(if_not_null,
                                      i_o(),
                                      exit_memory,
                                      frameptr(),
                                      returnadr(),
                                      exc_target, null());
  root()->add_req(_gvn.transform(to_exc));  // bind to root to keep live
  C->init_start(start);

  //-----------------------------
  // If this is a normal subroutine return, issue the return and be done.
  Node *ret;
  switch( is_fancy_jump ) {
  case 0:                       // Make a return instruction
    // Return to caller, free any space for return address
    ret = new (C) ReturnNode(TypeFunc::Parms, if_null,
                             i_o(),
                             exit_memory,
                             frameptr(),
                             returnadr());
    if (C->tf()->range()->cnt() > TypeFunc::Parms)
      ret->add_req( map()->in(TypeFunc::Parms) );
    break;
  case 1:    // This is a fancy tail-call jump.  Jump to computed address.
    // Jump to new callee; leave old return address alone.
    ret = new (C) TailCallNode(if_null,
                               i_o(),
                               exit_memory,
                               frameptr(),
                               returnadr(),
                               target, map()->in(TypeFunc::Parms));
    break;
  case 2:                       // Pop return address & jump
    // Throw away old return address; jump to new computed address
    //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow");
    ret = new (C) TailJumpNode(if_null,
                               i_o(),
                               exit_memory,
                               frameptr(),
                               target, map()->in(TypeFunc::Parms));
    break;
  default:
    ShouldNotReachHere();
  }
  root()->add_req(_gvn.transform(ret));
}
Пример #6
0
void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {

  if (edges) {

    // Output edge
    node_idx_t dest_id = n->_idx;
    for ( uint i = 0; i < n->len(); i++ ) {
      if ( n->in(i) ) {
        Node *source = n->in(i);
        begin_elem(EDGE_ELEMENT);
        print_attr(FROM_PROPERTY, source->_idx);
        print_attr(TO_PROPERTY, dest_id);
        print_attr(INDEX_PROPERTY, i);
        end_elem();
      }
    }

  } else {

    // Output node
    begin_head(NODE_ELEMENT);
    print_attr(NODE_ID_PROPERTY, n->_idx);
    end_head();

    head(PROPERTIES_ELEMENT);

    Node *node = n;
#ifndef PRODUCT
    Compile::current()->_in_dump_cnt++;
    print_prop(NODE_NAME_PROPERTY, (const char *)node->Name());
    const Type *t = node->bottom_type();
    print_prop("type", t->msg());
    print_prop("idx", node->_idx);
#ifdef ASSERT
    print_prop("debug_idx", node->_debug_idx);
#endif

    if (C->cfg() != NULL) {
      Block* block = C->cfg()->get_block_for_node(node);
      if (block == NULL) {
        print_prop("block", C->cfg()->get_block(0)->_pre_order);
      } else {
        print_prop("block", block->_pre_order);
      }
    }

    const jushort flags = node->flags();
    if (flags & Node::Flag_is_Copy) {
      print_prop("is_copy", "true");
    }
    if (flags & Node::Flag_rematerialize) {
      print_prop("rematerialize", "true");
    }
    if (flags & Node::Flag_needs_anti_dependence_check) {
      print_prop("needs_anti_dependence_check", "true");
    }
    if (flags & Node::Flag_is_macro) {
      print_prop("is_macro", "true");
    }
    if (flags & Node::Flag_is_Con) {
      print_prop("is_con", "true");
    }
    if (flags & Node::Flag_is_cisc_alternate) {
      print_prop("is_cisc_alternate", "true");
    }
    if (flags & Node::Flag_is_dead_loop_safe) {
      print_prop("is_dead_loop_safe", "true");
    }
    if (flags & Node::Flag_may_be_short_branch) {
      print_prop("may_be_short_branch", "true");
    }
    if (flags & Node::Flag_has_call) {
      print_prop("has_call", "true");
    }

    if (C->matcher() != NULL) {
      if (C->matcher()->is_shared(node)) {
        print_prop("is_shared", "true");
      } else {
        print_prop("is_shared", "false");
      }
      if (C->matcher()->is_dontcare(node)) {
        print_prop("is_dontcare", "true");
      } else {
        print_prop("is_dontcare", "false");
      }

#ifdef ASSERT
      Node* old = C->matcher()->find_old_node(node);
      if (old != NULL) {
        print_prop("old_node_idx", old->_idx);
      }
#endif
    }

    if (node->is_Proj()) {
      print_prop("con", (int)node->as_Proj()->_con);
    }

    if (node->is_Mach()) {
      print_prop("idealOpcode", (const char *)NodeClassNames[node->as_Mach()->ideal_Opcode()]);
    }

    buffer[0] = 0;
    stringStream s2(buffer, sizeof(buffer) - 1);

    node->dump_spec(&s2);
    if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
      const TypeInstPtr  *toop = t->isa_instptr();
      const TypeKlassPtr *tkls = t->isa_klassptr();
      ciKlass*           klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
      if( klass && klass->is_loaded() && klass->is_interface() ) {
        s2.print("  Interface:");
      } else if( toop ) {
        s2.print("  Oop:");
      } else if( tkls ) {
        s2.print("  Klass:");
      }
      t->dump_on(&s2);
    } else if( t == Type::MEMORY ) {
      s2.print("  Memory:");
      MemNode::dump_adr_type(node, node->adr_type(), &s2);
    }

    assert(s2.size() < sizeof(buffer), "size in range");
    print_prop("dump_spec", buffer);

    if (node->is_block_proj()) {
      print_prop("is_block_proj", "true");
    }

    if (node->is_block_start()) {
      print_prop("is_block_start", "true");
    }

    const char *short_name = "short_name";
    if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) {
      int index = node->as_Proj()->_con - TypeFunc::Parms;
      if (index >= 10) {
        print_prop(short_name, "PA");
      } else {
        sprintf(buffer, "P%d", index);
        print_prop(short_name, buffer);
      }
    } else if (strcmp(node->Name(), "IfTrue") == 0) {
      print_prop(short_name, "T");
    } else if (strcmp(node->Name(), "IfFalse") == 0) {
      print_prop(short_name, "F");
    } else if ((node->is_Con() && node->is_Type()) || node->is_Proj()) {

      if (t->base() == Type::Int && t->is_int()->is_con()) {
        const TypeInt *typeInt = t->is_int();
        assert(typeInt->is_con(), "must be constant");
        jint value = typeInt->get_con();

        // max. 2 chars allowed
        if (value >= -9 && value <= 99) {
          sprintf(buffer, "%d", value);
          print_prop(short_name, buffer);
        } else {
          print_prop(short_name, "I");
        }
      } else if (t == Type::TOP) {
        print_prop(short_name, "^");
      } else if (t->base() == Type::Long && t->is_long()->is_con()) {
        const TypeLong *typeLong = t->is_long();
        assert(typeLong->is_con(), "must be constant");
        jlong value = typeLong->get_con();

        // max. 2 chars allowed
        if (value >= -9 && value <= 99) {
          sprintf(buffer, JLONG_FORMAT, value);
          print_prop(short_name, buffer);
        } else {
          print_prop(short_name, "L");
        }
      } else if (t->base() == Type::KlassPtr) {
        const TypeKlassPtr *typeKlass = t->is_klassptr();
        print_prop(short_name, "CP");
      } else if (t->base() == Type::Control) {
        print_prop(short_name, "C");
      } else if (t->base() == Type::Memory) {
        print_prop(short_name, "M");
      } else if (t->base() == Type::Abio) {
        print_prop(short_name, "IO");
      } else if (t->base() == Type::Return_Address) {
        print_prop(short_name, "RA");
      } else if (t->base() == Type::AnyPtr) {
        print_prop(short_name, "P");
      } else if (t->base() == Type::RawPtr) {
        print_prop(short_name, "RP");
      } else if (t->base() == Type::AryPtr) {
        print_prop(short_name, "AP");
      }
    }

    JVMState* caller = NULL;
    if (node->is_SafePoint()) {
      caller = node->as_SafePoint()->jvms();
    } else {
      Node_Notes* notes = C->node_notes_at(node->_idx);
      if (notes != NULL) {
        caller = notes->jvms();
      }
    }

    if (caller != NULL) {
      stringStream bciStream;
      ciMethod* last = NULL;
      int last_bci;
      while(caller) {
        if (caller->has_method()) {
          last = caller->method();
          last_bci = caller->bci();
        }
        bciStream.print("%d ", caller->bci());
        caller = caller->caller();
      }
      print_prop("bci", bciStream.as_string());
      if (last != NULL && last->has_linenumber_table() && last_bci >= 0) {
        print_prop("line", last->line_number_from_bci(last_bci));
      }
    }

#ifdef ASSERT
    if (node->debug_orig() != NULL) {
      temp_set->Clear();
      stringStream dorigStream;
      Node* dorig = node->debug_orig();
      while (dorig && temp_set->test_set(dorig->_idx)) {
        dorigStream.print("%d ", dorig->_idx);
      }
      print_prop("debug_orig", dorigStream.as_string());
    }
#endif

    if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) {
      buffer[0] = 0;
      _chaitin->dump_register(node, buffer);
      print_prop("reg", buffer);
      uint lrg_id = 0;
      if (node->_idx < _chaitin->_lrg_map.size()) {
        lrg_id = _chaitin->_lrg_map.live_range_id(node);
      }
      print_prop("lrg", lrg_id);
    }

    Compile::current()->_in_dump_cnt--;
#endif

    tail(PROPERTIES_ELEMENT);
    tail(NODE_ELEMENT);
  }
}
Пример #7
0
void LateInlineCallGenerator::do_late_inline() {
  // Can't inline it
  CallStaticJavaNode* call = call_node();
  if (call == NULL || call->outcnt() == 0 ||
      call->in(0) == NULL || call->in(0)->is_top()) {
    return;
  }

  const TypeTuple *r = call->tf()->domain();
  for (int i1 = 0; i1 < method()->arg_size(); i1++) {
    if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
      assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
      return;
    }
  }

  if (call->in(TypeFunc::Memory)->is_top()) {
    assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
    return;
  }

  Compile* C = Compile::current();
  // Remove inlined methods from Compiler's lists.
  if (call->is_macro()) {
    C->remove_macro_node(call);
  }

  // Make a clone of the JVMState that appropriate to use for driving a parse
  JVMState* old_jvms = call->jvms();
  JVMState* jvms = old_jvms->clone_shallow(C);
  uint size = call->req();
  SafePointNode* map = new (C) SafePointNode(size, jvms);
  for (uint i1 = 0; i1 < size; i1++) {
    map->init_req(i1, call->in(i1));
  }

  // Make sure the state is a MergeMem for parsing.
  if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
    Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
    C->initial_gvn()->set_type_bottom(mem);
    map->set_req(TypeFunc::Memory, mem);
  }

  uint nargs = method()->arg_size();
  // blow away old call arguments
  Node* top = C->top();
  for (uint i1 = 0; i1 < nargs; i1++) {
    map->set_req(TypeFunc::Parms + i1, top);
  }
  jvms->set_map(map);

  // Make enough space in the expression stack to transfer
  // the incoming arguments and return value.
  map->ensure_stack(jvms, jvms->method()->max_stack());
  for (uint i1 = 0; i1 < nargs; i1++) {
    map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
  }

  // This check is done here because for_method_handle_inline() method
  // needs jvms for inlined state.
  if (!do_late_inline_check(jvms)) {
    map->disconnect_inputs(NULL, C);
    return;
  }

  C->print_inlining_insert(this);

  CompileLog* log = C->log();
  if (log != NULL) {
    log->head("late_inline method='%d'", log->identify(method()));
    JVMState* p = jvms;
    while (p != NULL) {
      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
      p = p->caller();
    }
    log->tail("late_inline");
  }

  // Setup default node notes to be picked up by the inlining
  Node_Notes* old_nn = C->default_node_notes();
  if (old_nn != NULL) {
    Node_Notes* entry_nn = old_nn->clone(C);
    entry_nn->set_jvms(jvms);
    C->set_default_node_notes(entry_nn);
  }

  // Now perform the inling using the synthesized JVMState
  JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
  if (new_jvms == NULL)  return;  // no change
  if (C->failing())      return;

  // Capture any exceptional control flow
  GraphKit kit(new_jvms);

  // Find the result object
  Node* result = C->top();
  int   result_size = method()->return_type()->size();
  if (result_size != 0 && !kit.stopped()) {
    result = (result_size == 1) ? kit.pop() : kit.pop_pair();
  }

  C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
  C->env()->notice_inlined_method(_inline_cg->method());
  C->set_inlining_progress(true);

  kit.replace_call(call, result);
}
Пример #8
0
JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
  // The code we want to generate here is:
  //    if (receiver == NULL)
  //        uncommon_Trap
  //    if (predicate(0))
  //        do_intrinsic(0)
  //    else
  //    if (predicate(1))
  //        do_intrinsic(1)
  //    ...
  //    else
  //        do_java_comp

  GraphKit kit(jvms);
  PhaseGVN& gvn = kit.gvn();

  CompileLog* log = kit.C->log();
  if (log != NULL) {
    log->elem("predicated_intrinsic bci='%d' method='%d'",
              jvms->bci(), log->identify(method()));
  }

  if (!method()->is_static()) {
    // We need an explicit receiver null_check before checking its type in predicate.
    // We share a map with the caller, so his JVMS gets adjusted.
    Node* receiver = kit.null_check_receiver_before_call(method());
    if (kit.stopped()) {
      return kit.transfer_exceptions_into_jvms();
    }
  }

  int n_predicates = _intrinsic->predicates_count();
  assert(n_predicates > 0, "sanity");

  JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));

  // Region for normal compilation code if intrinsic failed.
  Node* slow_region = new (kit.C) RegionNode(1);

  int results = 0;
  for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
#ifdef ASSERT
    JVMState* old_jvms = kit.jvms();
    SafePointNode* old_map = kit.map();
    Node* old_io  = old_map->i_o();
    Node* old_mem = old_map->memory();
    Node* old_exc = old_map->next_exception();
#endif
    Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
#ifdef ASSERT
    // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
    assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
    SafePointNode* new_map = kit.map();
    assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
    assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
    assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
#endif
    if (!kit.stopped()) {
      PreserveJVMState pjvms(&kit);
      // Generate intrinsic code:
      JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
      if (new_jvms == NULL) {
        // Intrinsic failed, use normal compilation path for this predicate.
        slow_region->add_req(kit.control());
      } else {
        kit.add_exception_states_from(new_jvms);
        kit.set_jvms(new_jvms);
        if (!kit.stopped()) {
          result_jvms[results++] = kit.jvms();
        }
      }
    }
    if (else_ctrl == NULL) {
      else_ctrl = kit.C->top();
    }
    kit.set_control(else_ctrl);
  }
  if (!kit.stopped()) {
    // Final 'else' after predicates.
    slow_region->add_req(kit.control());
  }
  if (slow_region->req() > 1) {
    PreserveJVMState pjvms(&kit);
    // Generate normal compilation code:
    kit.set_control(gvn.transform(slow_region));
    JVMState* new_jvms = _cg->generate(kit.sync_jvms());
    if (kit.failing())
      return NULL;  // might happen because of NodeCountInliningCutoff
    assert(new_jvms != NULL, "must be");
    kit.add_exception_states_from(new_jvms);
    kit.set_jvms(new_jvms);
    if (!kit.stopped()) {
      result_jvms[results++] = kit.jvms();
    }
  }

  if (results == 0) {
    // All paths ended in uncommon traps.
    (void) kit.stop();
    return kit.transfer_exceptions_into_jvms();
  }

  if (results == 1) { // Only one path
    kit.set_jvms(result_jvms[0]);
    return kit.transfer_exceptions_into_jvms();
  }

  // Merge all paths.
  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
  RegionNode* region = new (kit.C) RegionNode(results + 1);
  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
  for (int i = 0; i < results; i++) {
    JVMState* jvms = result_jvms[i];
    int path = i + 1;
    SafePointNode* map = jvms->map();
    region->init_req(path, map->control());
    iophi->set_req(path, map->i_o());
    if (i == 0) {
      kit.set_jvms(jvms);
    } else {
      kit.merge_memory(map->merged_memory(), region, path);
    }
  }
  kit.set_control(gvn.transform(region));
  kit.set_i_o(gvn.transform(iophi));
  // Transform new memory Phis.
  for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
    Node* phi = mms.memory();
    if (phi->is_Phi() && phi->in(0) == region) {
      mms.set_memory(gvn.transform(phi));
    }
  }

  // Merge debug info.
  Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
  uint tos = kit.jvms()->stkoff() + kit.sp();
  Node* map = kit.map();
  uint limit = map->req();
  for (uint i = TypeFunc::Parms; i < limit; i++) {
    // Skip unused stack slots; fast forward to monoff();
    if (i == tos) {
      i = kit.jvms()->monoff();
      if( i >= limit ) break;
    }
    Node* n = map->in(i);
    ins[0] = n;
    const Type* t = gvn.type(n);
    bool needs_phi = false;
    for (int j = 1; j < results; j++) {
      JVMState* jvms = result_jvms[j];
      Node* jmap = jvms->map();
      Node* m = NULL;
      if (jmap->req() > i) {
        m = jmap->in(i);
        if (m != n) {
          needs_phi = true;
          t = t->meet_speculative(gvn.type(m));
        }
      }
      ins[j] = m;
    }
    if (needs_phi) {
      Node* phi = PhiNode::make(region, n, t);
      for (int j = 1; j < results; j++) {
        phi->set_req(j + 1, ins[j]);
      }
      map->set_req(i, gvn.transform(phi));
    }
  }

  return kit.transfer_exceptions_into_jvms();
}
Пример #9
0
//------------------------------insert_copies----------------------------------
void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
  // We do LRGs compressing and fix a liveout data only here since the other
  // place in Split() is guarded by the assert which we never hit.
  _phc.compress_uf_map_for_nodes();
  // Fix block's liveout data for compressed live ranges.
  for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) {
    uint compressed_lrg = _phc.Find(lrg);
    if( lrg != compressed_lrg ) {
      for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) {
        IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]);
        if( liveout->member(lrg) ) {
          liveout->remove(lrg);
          liveout->insert(compressed_lrg);
        }
      }
    }
  }

  // All new nodes added are actual copies to replace virtual copies.
  // Nodes with index less than '_unique' are original, non-virtual Nodes.
  _unique = C->unique();

  for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
    Block *b = _phc._cfg._blocks[i];
    uint cnt = b->num_preds();  // Number of inputs to the Phi

    for( uint l = 1; l<b->_nodes.size(); l++ ) {
      Node *n = b->_nodes[l];

      // Do not use removed-copies, use copied value instead
      uint ncnt = n->req();
      for( uint k = 1; k<ncnt; k++ ) {
        Node *copy = n->in(k);
        uint cidx = copy->is_Copy();
        if( cidx ) {
          Node *def = copy->in(cidx);
          if( _phc.Find(copy) == _phc.Find(def) )
            n->set_req(k,def);
        }
      }

      // Remove any explicit copies that get coalesced.
      uint cidx = n->is_Copy();
      if( cidx ) {
        Node *def = n->in(cidx);
        if( _phc.Find(n) == _phc.Find(def) ) {
          n->replace_by(def);
          n->set_req(cidx,NULL);
          b->_nodes.remove(l);
          l--;
          continue;
        }
      }

      if( n->is_Phi() ) {
        // Get the chosen name for the Phi
        uint phi_name = _phc.Find( n );
        // Ignore the pre-allocated specials
        if( !phi_name ) continue;
        // Check for mismatch inputs to Phi
        for( uint j = 1; j<cnt; j++ ) {
          Node *m = n->in(j);
          uint src_name = _phc.Find(m);
          if( src_name != phi_name ) {
            Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
            Node *copy;
            assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
            // Rematerialize constants instead of copying them
            if( m->is_Mach() && m->as_Mach()->is_Con() &&
                m->as_Mach()->rematerialize() ) {
              copy = m->clone();
              // Insert the copy in the predecessor basic block
              pred->add_inst(copy);
              // Copy any flags as well
              _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg );
            } else {
              const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
              copy = new (C) MachSpillCopyNode(m,*rm,*rm);
              // Find a good place to insert.  Kinda tricky, use a subroutine
              insert_copy_with_overlap(pred,copy,phi_name,src_name);
            }
            // Insert the copy in the use-def chain
            n->set_req( j, copy );
            _phc._cfg._bbs.map( copy->_idx, pred );
            // Extend ("register allocate") the names array for the copy.
            _phc._names.extend( copy->_idx, phi_name );
          } // End of if Phi names do not match
        } // End of for all inputs to Phi
      } else { // End of if Phi

        // Now check for 2-address instructions
        uint idx;
        if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) {
          // Get the chosen name for the Node
          uint name = _phc.Find( n );
          assert( name, "no 2-address specials" );
          // Check for name mis-match on the 2-address input
          Node *m = n->in(idx);
          if( _phc.Find(m) != name ) {
            Node *copy;
            assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
            // At this point it is unsafe to extend live ranges (6550579).
            // Rematerialize only constants as we do for Phi above.
            if( m->is_Mach() && m->as_Mach()->is_Con() &&
                m->as_Mach()->rematerialize() ) {
              copy = m->clone();
              // Insert the copy in the basic block, just before us
              b->_nodes.insert( l++, copy );
              if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) )
                l++;
            } else {
              const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
              copy = new (C) MachSpillCopyNode( m, *rm, *rm );
              // Insert the copy in the basic block, just before us
              b->_nodes.insert( l++, copy );
            }
            // Insert the copy in the use-def chain
            n->set_req(idx, copy );
            // Extend ("register allocate") the names array for the copy.
            _phc._names.extend( copy->_idx, name );
            _phc._cfg._bbs.map( copy->_idx, b );
          }

        } // End of is two-adr

        // Insert a copy at a debug use for a lrg which has high frequency
        if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) {
          // Walk the debug inputs to the node and check for lrg freq
          JVMState* jvms = n->jvms();
          uint debug_start = jvms ? jvms->debug_start() : 999999;
          uint debug_end   = jvms ? jvms->debug_end()   : 999999;
          for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) {
            // Do not split monitors; they are only needed for debug table
            // entries and need no code.
            if( jvms->is_monitor_use(inpidx) ) continue;
            Node *inp = n->in(inpidx);
            uint nidx = _phc.n2lidx(inp);
            LRG &lrg = lrgs(nidx);

            // If this lrg has a high frequency use/def
            if( lrg._maxfreq >= _phc.high_frequency_lrg() ) {
              // If the live range is also live out of this block (like it
              // would be for a fast/slow idiom), the normal spill mechanism
              // does an excellent job.  If it is not live out of this block
              // (like it would be for debug info to uncommon trap) splitting
              // the live range now allows a better allocation in the high
              // frequency blocks.
              //   Build_IFG_virtual has converted the live sets to
              // live-IN info, not live-OUT info.
              uint k;
              for( k=0; k < b->_num_succs; k++ )
                if( _phc._live->live(b->_succs[k])->member( nidx ) )
                  break;      // Live in to some successor block?
              if( k < b->_num_succs )
                continue;     // Live out; do not pre-split
              // Split the lrg at this use
              const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()];
              Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm );
              // Insert the copy in the use-def chain
              n->set_req(inpidx, copy );
              // Insert the copy in the basic block, just before us
              b->_nodes.insert( l++, copy );
              // Extend ("register allocate") the names array for the copy.
              _phc.new_lrg( copy, _phc._maxlrg++ );
              _phc._cfg._bbs.map( copy->_idx, b );
              //tty->print_cr("Split a debug use in Aggressive Coalesce");
            }  // End of if high frequency use/def
          }  // End of for all debug inputs
        }  // End of if low frequency safepoint

      } // End of if Phi

    } // End of for all instructions
  } // End of for all blocks
}
Пример #10
0
void LateInlineCallGenerator::do_late_inline() {
  // Can't inline it
  if (call_node() == NULL || call_node()->outcnt() == 0 ||
      call_node()->in(0) == NULL || call_node()->in(0)->is_top())
    return;

  CallStaticJavaNode* call = call_node();

  // Make a clone of the JVMState that appropriate to use for driving a parse
  Compile* C = Compile::current();
  JVMState* jvms     = call->jvms()->clone_shallow(C);
  uint size = call->req();
  SafePointNode* map = new (C, size) SafePointNode(size, jvms);
  for (uint i1 = 0; i1 < size; i1++) {
    map->init_req(i1, call->in(i1));
  }

  // Make sure the state is a MergeMem for parsing.
  if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
  }

  // Make enough space for the expression stack and transfer the incoming arguments
  int nargs    = method()->arg_size();
  jvms->set_map(map);
  map->ensure_stack(jvms, jvms->method()->max_stack());
  if (nargs > 0) {
    for (int i1 = 0; i1 < nargs; i1++) {
      map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
    }
  }

  CompileLog* log = C->log();
  if (log != NULL) {
    log->head("late_inline method='%d'", log->identify(method()));
    JVMState* p = jvms;
    while (p != NULL) {
      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
      p = p->caller();
    }
    log->tail("late_inline");
  }

  // Setup default node notes to be picked up by the inlining
  Node_Notes* old_nn = C->default_node_notes();
  if (old_nn != NULL) {
    Node_Notes* entry_nn = old_nn->clone(C);
    entry_nn->set_jvms(jvms);
    C->set_default_node_notes(entry_nn);
  }

  // Now perform the inling using the synthesized JVMState
  JVMState* new_jvms = _inline_cg->generate(jvms);
  if (new_jvms == NULL)  return;  // no change
  if (C->failing())      return;

  // Capture any exceptional control flow
  GraphKit kit(new_jvms);

  // Find the result object
  Node* result = C->top();
  int   result_size = method()->return_type()->size();
  if (result_size != 0 && !kit.stopped()) {
    result = (result_size == 1) ? kit.pop() : kit.pop_pair();
  }

  kit.replace_call(call, result);
}