// ------------------------------------------------------------------
// ciFieldLayout::ciFieldLayout
ciFieldLayout::ciFieldLayout(ciInstanceKlass* my_klass) {
  assert(my_klass->is_loaded(), "must be loaded");
  ASSERT_IN_VM;

  klassOop klass = my_klass->get_klassOop();

  Arena* arena = CURRENT_ENV->arena();
  GrowableArray<BasicType>* fieldtypes =
    new (arena) GrowableArray<BasicType>(arena, 8, 0, T_VOID);
  GrowableArray<int>* aflags =
    new (arena) GrowableArray<int>(arena, 8, 0, 0);
  GrowableArray<int>* fieldoffsets =
    new (arena) GrowableArray<int>(arena, 8, 0, 0);

  int pos = 0;

  fill_in_header_fields(fieldtypes, fieldoffsets, pos);
  _header_count = pos;
  fill_in_instance_fields(fieldtypes, fieldoffsets, aflags, pos, klass);

#if 0
  // [RGV] instance size is in word's but pos is number
  // of fields.
  int fill_to = my_klass->instance_size();
  if (fieldtypes->length() < fill_to)
    fields->at_put_grow(fill_to-1, T_VOID, T_VOID);
  if (aflags->length() < fill_to)
    aflags->at_put_grow(fill_to-1, 0, 0);
#endif

  _fieldtypes = fieldtypes;
  _access_flags = aflags;
  _fieldoffsets = fieldoffsets;
}
Example #2
0
//------------------------------implicit_null_check----------------------------
// Detect implicit-null-check opportunities.  Basically, find NULL checks 
// with suitable memory ops nearby.  Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
void Block::implicit_null_check(Block_Array &bbs, GrowableArray<uint> &latency, Node *proj, Node *val) {
  // Assume if null check need for 0 offset then always needed
  // Intel solaris doesn't support any null checks yet and no
  // mechanism exists (yet) to set the switches at an os_cpu level
  if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;

  // Make sure the ptr-is-null path appears to be uncommon!
  float f = end()->is_Mach()->is_MachIf()->_prob;
  if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
  if( f > 0.0001 ) return;

  uint bidx = 0;                // Capture index of value into memop
  bool was_store;               // Memory op is a store op

  // Search the successor block for a load or store who's base value is also
  // the tested value.  There may be several.
  Node_List *out = new Node_List(Thread::current()->resource_area());
  MachNode *best = NULL;        // Best found so far
  for (DUIterator i = val->outs(); val->has_out(i); i++) {
    MachNode *mach = val->out(i)->is_Mach();
    if( !mach ) continue;
    was_store = false;
    switch( mach->ideal_Opcode() ) {
    case Op_LoadB:
    case Op_LoadC:
    case Op_LoadD:
    case Op_LoadF:
    case Op_LoadI:
    case Op_LoadL:
    case Op_LoadP:
    case Op_LoadS:
    case Op_LoadKlass:
    case Op_LoadRange:
    case Op_LoadD_unaligned:
    case Op_LoadL_unaligned:
      break;
    case Op_StoreB:
    case Op_StoreC:
    case Op_StoreCM:
    case Op_StoreD:
    case Op_StoreF:
    case Op_StoreI:
    case Op_StoreL:
    case Op_StoreP:
      was_store = true;         // Memory op is a store op
      // Stores will have their address in slot 2 (memory in slot 1).
      // If the value being nul-checked is in another slot, it means we
      // are storing the checked value, which does NOT check the value!
      if( mach->in(2) != val ) continue;
      break;                    // Found a memory op?
    case Op_StrComp:		
      // Not a legit memory op for implicit null check regardless of 
      // embedded loads
      continue;
    default:                    // Also check for embedded loads
      if( !mach->check_for_anti_dependence() )
        continue;               // Not an memory op; skip it
      break;
    }
    // check if the offset is not too high for implicit exception
    {
      intptr_t offset = 0;
      const TypePtr *adr_type = NULL;  // Do not need this return value here
      const Node* base = mach->get_base_and_disp(offset, adr_type);
      if (base == NULL || base == (Node*)-1) {
        // cannot reason about it; is probably not implicit null exception
      } else {
        const TypePtr* tptr = base->bottom_type()->is_ptr();
        // Give up if offset is not a compile-time constant
        if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot )
          continue;
        offset += tptr->_offset; // correct if base is offseted
	if( MacroAssembler::needs_explicit_null_check(offset) ) 
          continue;             // Give up is reference is beyond 4K page size
      }
    }

    // Check ctrl input to see if the null-check dominates the memory op
    Block *cb = bbs[mach->_idx];
    cb = cb->_idom;		// Always hoist at least 1 block
    if( !was_store ) {		// Stores can be hoisted only one block
      while( cb->_dom_depth > _dom_depth )
        cb = cb->_idom;		// Hoist loads as far as we want
    }
    if( cb != this ) continue;

    // Found a memory user; see if it can be hoisted to check-block
    uint vidx = 0;              // Capture index of value into memop
    uint j;
    for( j = mach->req()-1; j > 0; j-- ) {
      if( mach->in(j) == val ) vidx = j;
      // Block of memory-op input
      Block *inb = bbs[mach->in(j)->_idx];
      Block *b = this;          // Start from nul check
      while( b != inb && b->_dom_depth > inb->_dom_depth )
        b = b->_idom;           // search upwards for input
      // See if input dominates null check
      if( b != inb )
        break;
    }
    if( j > 0 ) 
      continue;
    Block *mb = bbs[mach->_idx]; 
    // Hoisting stores requires more checks for the anti-dependence case.
    // Give up hoisting if we have to move the store past any load.
    if( was_store ) {
      Block *b = mb;            // Start searching here for a local load
      // mach use (faulting) trying to hoist
      // n might be blocker to hoisting
      while( b != this ) {
        uint k;
        for( k = 1; k < b->_nodes.size(); k++ ) {
          Node *n = b->_nodes[k];
          if( n->check_for_anti_dependence() && 
              n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
	    break;              // Found anti-dependent load
        }
        if( k < b->_nodes.size() )
          break;                // Found anti-dependent load
        // Make sure control does not do a merge (would have to check allpaths)
        if( b->num_preds() != 2 ) break;
        b = bbs[b->pred(1)->_idx]; // Move up to predecessor block
      }
      if( b != this ) continue;
    }

    // Make sure this memory op is not already being used for a NullCheck
    MachNode *e = mb->end()->is_Mach();
    if( e && e->is_MachNullCheck() && e->in(1) == mach )
      continue;                 // Already being used as a NULL check

    // Found a candidate!  Pick one with least dom depth - the highest 
    // in the dom tree should be closest to the null check.
    if( !best || 
        bbs[mach->_idx]->_dom_depth < bbs[best->_idx]->_dom_depth ) {
      best = mach;
      bidx = vidx;

    }
  }
  // No candidate!
  if( !best ) return;

  // ---- Found an implicit null check
  extern int implicit_null_checks;
  implicit_null_checks++;

  // Hoist the memory candidate up to the end of the test block.
  Block *old_block = bbs[best->_idx];
  old_block->find_remove(best);
  add_inst(best);
  bbs.map(best->_idx,this);

  // Move the control dependence
  if (best->in(0) && best->in(0) == old_block->_nodes[0])
    best->set_req(0, _nodes[0]);

  // Check for flag-killing projections that also need to be hoisted
  // Should be DU safe because no edge updates.
  for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
    Node* n = best->fast_out(j);
    if( n->Opcode() == Op_MachProj ) {
      bbs[n->_idx]->find_remove(n);
      add_inst(n);
      bbs.map(n->_idx,this);
    }
  }

  // proj==Op_True --> ne test; proj==Op_False --> eq test.
  // One of two graph shapes got matched:
  //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
  //   (IfFalse (If (Bool EQ (CmpP ptr NULL))))
  // NULL checks are always branch-if-eq.  If we see a IfTrue projection
  // then we are replacing a 'ne' test with a 'eq' NULL check test.
  // We need to flip the projections to keep the same semantics.
  if( proj->Opcode() == Op_IfTrue ) {
    // Swap order of projections in basic block to swap branch targets
    Node *tmp1 = _nodes[end_idx()+1];
    Node *tmp2 = _nodes[end_idx()+2];
    _nodes.map(end_idx()+1, tmp2);
    _nodes.map(end_idx()+2, tmp1);    
    Node *tmp = new (1) Node(1);
    tmp1->replace_by(tmp);
    tmp2->replace_by(tmp1);
    tmp->replace_by(tmp2);
  }

  // Remove the existing null check; use a new implicit null check instead.
  // Since schedule-local needs precise def-use info, we need to correct
  // it as well.
  Node *old_tst = proj->in(0);
  MachNode *nul_chk = new MachNullCheckNode(old_tst->in(0),best,bidx);
  _nodes.map(end_idx(),nul_chk);
  bbs.map(nul_chk->_idx,this);
  // Redirect users of old_test to nul_chk
  for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
    old_tst->last_out(i2)->set_req(0, nul_chk);
  // Clean-up any dead code
  for (uint i3 = 0; i3 < old_tst->req(); i3++)
    old_tst->set_req(i3, NULL);
  latency.at_put_grow(nul_chk->_idx, nul_chk->latency_from_uses(bbs, latency));
  latency.at_put_grow(best   ->_idx, best   ->latency_from_uses(bbs, latency));

#ifndef PRODUCT
  if (TraceOptoPipelining) {
    tty->print("# implicit_null_check: latency %4d for ", latency.at_grow(best->_idx));
    best->fast_dump();
    tty->print("# implicit_null_check: latency %4d for ", latency.at_grow(nul_chk->_idx));
    nul_chk->fast_dump();
  }
#endif
}