Пример #1
0
 Scheme<T>& operator+=(Scheme<T> const &a) {
   for (auto i =0; i < a.ind.size(); ++i) {
     push_pair(a.ind[i], a.val[i]); 
   }
   push_constant(a.c); 
   return *this;
 } 
Пример #2
0
 // Push the node, which may be zero, one, or two words.
 void push_node(BasicType n_type, Node* n) {
     int n_size = type2size[n_type];
     if      (n_size == 1)  push(      n );  // T_INT, ...
     else if (n_size == 2)  push_pair( n );  // T_DOUBLE, T_LONG
     else                   {
         assert(n_size == 0, "must be T_VOID");
     }
 }
Пример #3
0
bool Parse::push_constant(ciConstant constant, bool require_constant) {
  switch (constant.basic_type()) {
  case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
  case T_INT:      push( intcon(constant.as_int())     ); break;
  case T_CHAR:     push( intcon(constant.as_char())    ); break;
  case T_BYTE:     push( intcon(constant.as_byte())    ); break;
  case T_SHORT:    push( intcon(constant.as_short())   ); break;
  case T_FLOAT:    push( makecon(TypeF::make(constant.as_float())) );  break;
  case T_DOUBLE:   push_pair( makecon(TypeD::make(constant.as_double())) );  break;
  case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
  case T_ARRAY:
  case T_OBJECT: {
    // cases:
    //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
    //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
    // An oop is not scavengable if it is in the perm gen.
    ciObject* oop_constant = constant.as_object();
    if (oop_constant->is_null_object()) {
      push( zerocon(T_OBJECT) );
      break;
    } else if (require_constant || oop_constant->should_be_constant()) {
      push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
      break;
    } else {
      // we cannot inline the oop, but we can use it later to narrow a type
      return false;
    }
  }
  case T_ILLEGAL: {
    // Invalid ciConstant returned due to OutOfMemoryError in the CI
    assert(C->env()->failing(), "otherwise should not see this");
    // These always occur because of object types; we are going to
    // bail out anyway, so make the stack depths match up
    push( zerocon(T_OBJECT) );
    return false;
  }
  default:
    ShouldNotReachHere();
    return false;
  }

  // success
  return true;
}
Пример #4
0
void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
  // Does this field have a constant value?  If so, just push the value.
  if (field->is_constant()) {
    if (field->is_static()) {
      // final static field
      if (push_constant(field->constant_value()))
        return;
    }
    else {
      // final non-static field of a trusted class (classes in
      // java.lang.invoke and sun.invoke packages and subpackages).
      if (obj->is_Con()) {
        const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
        ciObject* constant_oop = oop_ptr->const_oop();
        ciConstant constant = field->constant_value_of(constant_oop);

        if (push_constant(constant, true))
          return;
      }
    }
  }

  ciType* field_klass = field->type();
  bool is_vol = field->is_volatile();

  // Compute address and memory type.
  int offset = field->offset_in_bytes();
  const TypePtr* adr_type = C->alias_type(field)->adr_type();
  Node *adr = basic_plus_adr(obj, obj, offset);
  BasicType bt = field->layout_type();

  // Build the resultant type of the load
  const Type *type;

  bool must_assert_null = false;

  if( bt == T_OBJECT ) {
    if (!field->type()->is_loaded()) {
      type = TypeInstPtr::BOTTOM;
      must_assert_null = true;
    } else if (field->is_constant() && field->is_static()) {
      // This can happen if the constant oop is non-perm.
      ciObject* con = field->constant_value().as_object();
      // Do not "join" in the previous type; it doesn't add value,
      // and may yield a vacuous result if the field is of interface type.
      type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
      assert(type != NULL, "field singleton type must be consistent");
    } else {
      type = TypeOopPtr::make_from_klass(field_klass->as_klass());
    }
  } else {
    type = Type::get_const_basic_type(bt);
  }
  // Build the load.
  Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);

  // Adjust Java stack
  if (type2size[bt] == 1)
    push(ld);
  else
    push_pair(ld);

  if (must_assert_null) {
    // Do not take a trap here.  It's possible that the program
    // will never load the field's class, and will happily see
    // null values in this field forever.  Don't stumble into a
    // trap for such a program, or we might get a long series
    // of useless recompilations.  (Or, we might load a class
    // which should not be loaded.)  If we ever see a non-null
    // value, we will then trap and recompile.  (The trap will
    // not need to mention the class index, since the class will
    // already have been loaded if we ever see a non-null value.)
    // uncommon_trap(iter().get_field_signature_index());
#ifndef PRODUCT
    if (PrintOpto && (Verbose || WizardMode)) {
      method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
    }
#endif
    if (C->log() != NULL) {
      C->log()->elem("assert_null reason='field' klass='%d'",
                     C->log()->identify(field->type()));
    }
    // If there is going to be a trap, put it at the next bytecode:
    set_bci(iter().next_bci());
    do_null_assert(peek(), T_OBJECT);
    set_bci(iter().cur_bci()); // put it back
  }

  // If reference is volatile, prevent following memory ops from
  // floating up past the volatile read.  Also prevents commoning
  // another volatile read.
  if (field->is_volatile()) {
    // Memory barrier includes bogus read of value to force load BEFORE membar
    insert_mem_bar(Op_MemBarAcquire, ld);
  }
}
Пример #5
0
    __normal_call void_type roll_back (
        list_type &_tnew,
        list_type &_told
        )
    {
        iptr_type _npos ;
        iptr_type _fpos ;
        for (auto _tpos  = _tnew.head();
                  _tpos != _tnew.tend();
                ++_tpos )
        {
        for (_npos = tria_pred::_dims+1; 
             _npos-- != +0 ; )
        {
            node(tria(*_tpos )
               ->node( _npos))
               ->next() = null_flag();
        }
        }
        
        for (auto _tpos  = _told.head();
                  _tpos != _told.tend();
                ++_tpos )
        {
        for (_npos = tria_pred::_dims+1; 
             _npos-- != +0 ; )
        {
            node(tria(*_tpos )
               ->node( _npos))
               ->next() = *_tpos ;
        }

        for (_fpos = tria_pred::_dims+1; 
             _fpos-- != +0 ; )
        {
            iptr_type  _tadj;
            iptr_type  _fadj;
            iptr_type  _flag;
            find_pair(*_tpos, _tadj , 
                _fpos, _fadj, _flag )  ;

            push_pair(*_tpos, _tadj , 
                       _fpos, _fadj , 
                       _flag) ;
        }
        }

        for (auto _tpos  = _tnew.head();
                  _tpos != _tnew.tend();
                ++_tpos )
        {
        for (_npos = tria_pred::_dims+1; 
             _npos-- != +0 ; )
        {
            if (node(tria(*_tpos )
                   ->node( _npos)) 
                   ->mark()>= +0 )
            if (node(tria(*_tpos )
                   ->node( _npos))
                   ->next() == null_flag())
            {
           _put_node(tria(*_tpos )
                   ->node( _npos)) ;
            }
        }
        }
        
        for (auto _tpos  = _tnew.head();
                  _tpos != _tnew.tend();
                ++_tpos )
        {
            _put_tria(*_tpos) ;
        }
    }