DictionaryEntry* Dictionary::new_entry(unsigned int hash, klassOop klass,
                                       oop loader) {
  DictionaryEntry* entry;
entry=(DictionaryEntry*)Hashtable::new_entry(hash,POISON_KLASSREF(klassRef(klass)));
entry->set_loader(ALWAYS_POISON_OBJECTREF(objectRef(loader)));
  entry->set_name(ALWAYS_POISON_OBJECTREF(objectRef(instanceKlass::cast(klass)->name())));
  entry->set_pd_set(NULL);
  return entry;
}
Ejemplo n.º 2
0
object sysPrimitive(int number, object* arguments)
{   
  ObjectHandle returnedObject = nilobj;

  /* someday there will be more here */
  switch(number - 150) {
    case 0:     /* do a system() call */
      returnedObject = newInteger(system(
            objectRef(arguments[0]).charPtr()));
      break;

    case 1: /* editline, with history support */
      {
        char* command = linenoise_nb(objectRef(arguments[0]).charPtr());
        returnedObject = newStString(command);
        if(command) 
        {
          linenoiseHistoryAdd(command);
          free(command);
        }
      }
      break;

    case 2: /* get last error */ 
      {
        returnedObject = newStString(gLastError);
      }
      break;

    case 3: /* force garbage collect */
      {
        MemoryManager::Instance()->garbageCollect();
      }
      break;

    case 4:
      {
        returnedObject = newInteger(MemoryManager::Instance()->objectCount());
      }
      break;

    case 5:
      {
        returnedObject = newStString(MemoryManager::Instance()->statsString().c_str());
      }
      break;

    default:
      sysError("unknown primitive","sysPrimitive");
  }
  return(returnedObject);
}
Ejemplo n.º 3
0
void vframe::print_to_xml_lock_info(JavaThread* thread, bool youngest, xmlBuffer* xb) {
  ResourceMark rm;
  frame fr = get_frame();       // Shorthand notation

  // First, assume we have the monitor locked.  If we haven't found an owned
  // monitor before and this is the first frame, then we need to see if the
  // thread is blocked.
  bool first = (youngest && thread->is_hint_blocked());

  // Print out all monitors that we have locked or are trying to lock
  if( fr.is_interpreted_frame() ) {
    int x = fr.interpreter_frame_monitor_count();
    // Not Correct; this always (re)prints the most recent X monitors
for(int i=0;i<x;i++){
      first = print_to_xml_lock( first, ALWAYS_UNPOISON_OBJECTREF(thread->_lckstk_top[-i-1]), false, xb );
    }      
    
}else if(fr.is_native_frame()){
CodeBlob*cb=CodeCache::find_blob(fr.pc());
    assert0( cb->is_native_method() );
    methodCodeOop mco = cb->owner().as_methodCodeOop();
    methodOop moop = mco->method().as_methodOop();
    bool is_object_wait = youngest && moop->name() == vmSymbols::wait_name() && 
      instanceKlass::cast(moop->method_holder())->name() == vmSymbols::java_lang_Object();
    if( moop->is_synchronized() && moop->is_static() ) {
      first = print_to_xml_lock( first, objectRef(Klass::cast(moop->method_holder())->java_mirror()), false, xb );
    } else if( is_object_wait ) {
      // For synchronized native methods, there should be a single lock.
      // For object.wait, there is a single oop argument being wait'd upon.
      const RegMap *lm = cb->oop_maps();
      VOopReg::VR lck = lm->get_sole_oop(cb->rel_pc(fr.pc()));
      objectRef *loc = fr.reg_to_addr_oop(lck);
      first = print_to_xml_lock( first, *loc, is_object_wait, xb );
    } else if( moop->is_synchronized() ) {
      // For synchronized native methods, there should be a single lock.
      const DebugScope *ds = scope();
      DebugScopeValue::Name lck = ds->get_lock(0);
      objectRef *loc = (objectRef*)fr.reg_to_addr(DebugScopeValue::to_vreg(lck));
      first = print_to_xml_lock( first, *loc, is_object_wait, xb );
    } else if (thread->current_park_blocker() != NULL) {
oop obj=thread->current_park_blocker();
      first = print_to_xml_lock( first, objectRef(obj), false, xb );
    }

  } else {                      // Hopefully a compiled frame
    const DebugScope *ds = scope();
for(uint i=0;i<ds->numlocks();i++){
      DebugScopeValue::Name lck = ds->get_lock(i);
      first = print_to_xml_lock( first, *fr.reg_to_addr(DebugScopeValue::to_vreg(lck)), false, xb );
    }
  }
}
Ejemplo n.º 4
0
int ObjectStruct::intValue()
{   
    int d;

    if(this == &objectRef(nilobj))
        return 0;
    memcpy((char *) &d, charPtr(), (int) sizeof(int));
    return d;
}
Ejemplo n.º 5
0
object MemoryManager::allocStr(register const char* str)
{
    register object newSym;
    char* t;

    if(NULL != str)
    {
        newSym = allocByte(1 + strlen(str));
        t = objectRef(newSym).charPtr();
        strcpy(t, str);
    }
    else
    {
        newSym = allocByte(1);
        objectRef(newSym).charPtr()[0] = '\0';
    }
    return(newSym);
}
Ejemplo n.º 6
0
object MemoryManager::allocByte(size_t size)
{
    object newObj;

    newObj = allocObject((size + 1) / 2);
    /* negative size fields indicate bit objects */
    objectRef(newObj).size = -size;
    return newObj;
}
Ejemplo n.º 7
0
  void Serializer::StateSave::objectRefArray (GenericArrayList *a)
  {
    //Store array size
    UintSize size = a->size();
    store( &size, sizeof( UintSize ));

    //Store array elements
    for (UintSize s=0; s<size; ++s)
      objectRef( (Object**) a->at(s) );
  }
Ejemplo n.º 8
0
// --- set_buf ---------------------------------------------------------------
static void set_buf(intptr_t* buf, jvmtiDeferredLocalVariable* val) {
  switch (val->type()) {
    case T_BOOLEAN:
      *buf = (intptr_t)val->value().z; break;
    case T_FLOAT:
    {
      jfloat jf = val->value().f;
      jint   ji = *(jint*) &jf;
      *buf = (intptr_t)ji; break;
    }
    case T_DOUBLE:
    {
      jint* halfs = (jint*) &buf[-1];
#ifdef VM_LITTLE_ENDIAN
      halfs[1] = (jint)frame::double_slot_primitive_type_empty_slot_id;
      halfs[0] = 0;
#else
      halfs[0] = (jint)frame::double_slot_primitive_type_empty_slot_id;
      halfs[1] = 0;
#endif
      *(jdouble*) buf = val->value().d;
      break;
    }
    case T_CHAR:
      *buf = (intptr_t)val->value().c; break;
    case T_BYTE:
      *buf = (intptr_t)val->value().b; break;
    case T_SHORT:
      *buf = (intptr_t)val->value().s; break;
    case T_INT:
      *buf = (intptr_t)val->value().i; break;
    case T_LONG:
    {
      jint* halfs = (jint*) &buf[-1];
#ifdef VM_LITTLE_ENDIAN
      halfs[1] = (jint)frame::double_slot_primitive_type_empty_slot_id;
      halfs[0] = 0;
#else
      halfs[0] = (jint)frame::double_slot_primitive_type_empty_slot_id;
      halfs[1] = 0;
#endif
      *(intptr_t*) buf = (intptr_t)val->value().j;
      break;
    }
case T_OBJECT://fall-through
    case T_ARRAY:
    {
      objectRef ref = objectRef((uint64_t)val->value().l);
      *buf = lvb_ref(&ref).raw_value();
      break;
    }
    default:
      ShouldNotReachHere();
  }
}
Ejemplo n.º 9
0
  void Serializer::StateSave::objectPtr (Object **pp)
  {
    //Enqueue reference
    objectRef (pp);

    //Enqueue object
    SaveObjNode o;
    o.p = *pp;
    o.done = false;
    objStack.pushBack( o );
  }
Ejemplo n.º 10
0
  void Serializer::StateLoad::objectRefArray (GenericArrayList *a)
  {
    //Load array size
    UintSize size = 0;
    load( &size, sizeof( UintSize ));

    //Insert array elements
    a->clear();
    a->resize( size );

    //Load array elements
    for (UintSize s=0; s<size; ++s)
      objectRef( (Object**) a->at(s) );
  }
int ArtaObjectPool::append_oop(const oopDesc *o) {
  if( UseSBA && !o->is_oop() && Threads::sba_find_owner((address)o) ) {
    return -1;                  // Valid stack object, but not inspectable by ARTA
  } else {
assert(o->is_oop(),"checking for bad oops");
  }

  _last_dispensed_id =  (_last_dispensed_id+1) % (16*_buffer_size);
assert(_last_dispensed_id>=0,"object id tickets are nonnegative");
  if (!_buffer_full && (uint)_last_dispensed_id == _buffer_size-1) {
    _buffer_full = true;
  }
  unsigned int slot = _last_dispensed_id % _buffer_size;
  objectRef r = objectRef((oop)o);
  POISON_AND_STORE_REF(&_object_map[slot], r);

  return _last_dispensed_id;
}
Ejemplo n.º 12
0
void MemoryManager::visit(register object x)
{
    int i, s;
    object *p;

    if (x && (!(x&1))) 
    {
        if (--(objectFromID(x).referenceCount) == -1) 
        {
            /* then it's the first time we've visited it, so: */
            visit(objectFromID(x)._class);
            s = objectRef(x).size;
            if (s>0) 
            {
                p = objectFromID(x).memory;
                for (i=s; i; --i) 
                    visit(*p++);
            }
        }
    }
}
Ejemplo n.º 13
0
 void Serializer::StateLoad::objectPtr (Object **pp)
 {
   //Enqueue reference
   objectRef( pp );
 };
 void set_component_mirror(oop m)      { ref_store_without_check(&_component_mirror, objectRef(m)); }
// --- build_repack_buffer ---------------------------------------------------
// Build a IFrame structure to help ASM code repack the 1 compiled frame into
// many interpreter (or C1) frames.  Takes in the current thread and a vframe;
// the vframe is pointing and the virtual Java frame needing to be repacked.
// It takes in the callee (which this frame is busy trying to call in it's
// inlined code), and an array of IFrames.  It returns the updated IFrame
// buffer filled in for this frame.
void Deoptimization::build_repack_buffer( JavaThread *thread, frame fr, IFrame *buf, const DebugMap *dm, const DebugScope *ds, intptr_t *jexstk, objectRef *lckstk, bool is_deopt, bool is_c1, bool is_youngest) {
  assert( thread->_deopt_buffer->contains((char*)(buf+1)), "over-ran large deopt buffer?" );

int bci=ds->bci();
if(bci==InvocationEntryBci){
    // We deoptimized while hanging in prologue code for a synchronized
    // method.  We got the lock (after all, deopt happens after returning
    // from the blocking call).  We want to begin execution in the
    // interpreter at BCI 0, and after taking the lock.
    // Also it is possilble to enter the deopt code through the br_s on method
    // entry before the first byte code.
    bci = 0;
  }

  const methodOop moop = ds->method().as_methodOop();
  if( ds->caller() ) {          // Do I have a caller?  Am I mid-call?
    // Initialize the constant pool entry for caller-parameter size.  It
    // might be the case that we inlined and compiled a callee, and are busy
    // calling it in the compiled code, and get deoptimized with that callee
    // in-progress AND we've never executed it in the interpreter - which
    // would have filled in the constant pool cache before making the call.
    // Fill it in now.
    const methodOop caller = ds->caller()->method().as_methodOop();
    int index = Bytes::get_native_u2(caller->bcp_from(ds->caller()->bci())+1);
    ConstantPoolCacheEntry *cpe = caller->constants()->cache()->entry_at(index);
    // Since we are setting the constant pool entry here, and another thread
    // could be busy resolving here we have a race condition setting the
    // flags.  Use a CAS to only set the flags if they are currently 0.
    intx *flags_adr = (intx*)((intptr_t)cpe + in_bytes(ConstantPoolCacheEntry::flags_offset()));
    if( !*flags_adr ) {         // Flags currently 0?
      // Set the flags, because the interpreter-return-entry points need some
      // info from them.  Not all fields are set, because it's too complex to
      // do it here... and not needed.  The cpCacheEntry is left "unresolved"
      // such that the next real use of it from the interpreter will be forced
      // to do a proper resolve, which will fill in the missing fields.

      // Compute new flags needed by the interpreter-return-entry
      intx flags = 
        (moop->size_of_parameters() & 0xFF) | 
        (1 << ConstantPoolCacheEntry::hotSwapBit) |
        (moop->result_type() << ConstantPoolCacheEntry::tosBits);
      // CAS 'em in, but only if there is currently a 0 flags
      assert0( sizeof(jlong)==sizeof(intx) );
      Atomic::cmpxchg((jlong)flags, (jlong*)flags_adr, 0);
      // We don't care about the result, because the cache is monomorphic.
      // Either our CAS succeeded and jammed    the right parameter count, or
      // another thread succeeded and jammed in the right parameter count.
    } 
  }

  if (TraceDeoptimization) {
    BufferedLoggerMark m(NOTAG, Log::M_DEOPT, TraceDeoptimization, true);
    m.out("DEOPT REPACK c%d: ", is_c1 ? 1 : 2);
    moop->print_short_name(m.stream());
    m.out(" @ bci %d %s", bci, ds->caller() ? "called by...": "   (oldest frame)" );
  }

  // If there was a suitable C1 frame, use it.
  // Otherwise, use an interpreter frame.
  if( 1 ) {
    // Build an interpreter-style IFrame.  Naked oops abound.
    assert0( !objectRef(moop).is_stack() );
    buf->_mref = objectRef(moop);
    buf->_cpc = moop->constants()->cacheRef();

    // Compute monitor list length.  If we have coarsened a lock we will end
    // up unlocking it and the repack buffer will not need to see it.
    uint mons_len = ds->numlocks();
    if( ds->is_extra_lock() ) { mons_len--; assert0( mons_len >= 0 ); }
    assert0( mons_len < (256*sizeof(buf->_numlck)) );
    buf->_numlck = mons_len;
    
    // Set up the return pc for the next frame: the next frame is a younger
    // frame which will return to this older frame.  All middle frames return
    // back into the interpreter, just after a call with proper TOS state.
    // Youngest frames always start in vtos state because the uncommon-trap
    // blob sets them up that way.
    const address bcp = moop->bcp_from(bci);
    Bytecodes::Code c = Bytecodes::java_code(Bytecodes::cast(*bcp));
BasicType return_type=T_VOID;

    bool handle_popframe = is_youngest && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution();

    int bci_bump = 0;
    if( !is_youngest ) {        // Middle-frame?
      bool from_call = (c == Bytecodes::_invokevirtual ||
c==Bytecodes::_invokespecial||
c==Bytecodes::_invokestatic||
                        c == Bytecodes::_invokeinterface );
assert(from_call,"Middle frame is in the middle of a call");
      bci_bump = Bytecodes::length_at(bcp); // But need to know how much it will be bumped for the return address
      buf->_bci = bci;          // Save bci without bumping it; normal interpreter call returns bump the bci as needed
      buf[-1]._retadr = Interpreter::return_entry(vtos, bci_bump);

    } else if( thread->pending_exception() ) { 
      // Deopt-with-pending.  Throw up on return to interpreter, which is
      // handled by unpack_and_go.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();

    } else if( !is_deopt ) {    // It is a C2-style uncommon-trap.
      // Do NOT increment the BCP!  We are re-executing the current bytecode.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();
      
    } else {                    // It is a plain deopt
      // It is a deopt without exception.  See if we are C1 in mid-patch.
      // If so, we always need to re-execute the bytecode.
      bool is_C1_mid_patch = false;
      if( is_c1 ) {             // C1 codeblob?
address caller_pc=fr.pc();
if(NativeCall::is_call_before(caller_pc)){
          address target = nativeCall_at(caller_pc)->destination();
          is_C1_mid_patch = target == Runtime1::entry_for(Runtime1::load_klass_patching_id);
        }
      }
      if( is_C1_mid_patch ) {
        Untested("");
        // Do NOT increment the BCP!  We are re-executing the current bytecode.
      } else if( ds->bci() == InvocationEntryBci ) {
        // It is deopt while hanging on a method-entry lock.
        // Do not advance BCP, as we have not executed bci 0 yet.
        
      } else {                  // Else C2 or C1-not-mid-patch
        // It is a deopt.  Whether we re-execute the current bytecode or
        // assume it has completed depends on the bytecode.
        switch( c ) {
case Bytecodes::_lookupswitch:
case Bytecodes::_tableswitch:
case Bytecodes::_fast_binaryswitch:
        case Bytecodes::_fast_linearswitch:
          // recompute condtional expression folded into _if<cond>
        case Bytecodes::_lcmp      :
        case Bytecodes::_fcmpl     :
        case Bytecodes::_fcmpg     :
        case Bytecodes::_dcmpl     :
        case Bytecodes::_dcmpg     :
        case Bytecodes::_ifnull    :
        case Bytecodes::_ifnonnull :
        case Bytecodes::_goto      :
        case Bytecodes::_goto_w    :
        case Bytecodes::_ifeq      :
        case Bytecodes::_ifne      :
        case Bytecodes::_iflt      :
        case Bytecodes::_ifge      :
        case Bytecodes::_ifgt      :
        case Bytecodes::_ifle      :
        case Bytecodes::_if_icmpeq :
        case Bytecodes::_if_icmpne :
        case Bytecodes::_if_icmplt :
        case Bytecodes::_if_icmpge :
        case Bytecodes::_if_icmpgt :
        case Bytecodes::_if_icmple :
        case Bytecodes::_if_acmpeq :
        case Bytecodes::_if_acmpne :
          // special cases
case Bytecodes::_aastore:
          // We are re-executing the current bytecode.
          Untested("");
          break;
          // special cases
case Bytecodes::_putstatic:
case Bytecodes::_getstatic:
case Bytecodes::_getfield:
case Bytecodes::_putfield:
          // We are re-executing the current bytecode.
          break;
        case Bytecodes::_athrow    :
          break;                // Must be deopt-w-exception
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
        }
case Bytecodes::_invokeinterface:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 5; // Increment the BCP to post-call!!!  See below!
          break;
        }
        case Bytecodes::_ldc   : 
          Untested("");
return_type=constant_pool_type(moop,*(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 2; // Increment the BCP to post-call!!!  See below!
          break;
          
        case Bytecodes::_ldc_w : // fall through
        case Bytecodes::_ldc2_w: 
return_type=constant_pool_type(moop,Bytes::get_Java_u2(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
          
        default:
return_type=Bytecodes::result_type(c);
          if( !ds->should_reexecute()) bci_bump = Bytecodes::length_at(bcp); // Increment the BCP to post-call!!!  See below!
          break;
        }
        if (ds->should_reexecute()) return_type = T_VOID;
      }
      // Save (possibly advanced) bci
      buf->_bci = bci+bci_bump;
      buf[-1]._retadr = Interpreter::unpack_and_go(); // Interpreter::return_entry(vtos, bci_bump);
    }

    // ---
    // Now all the Java locals.
    // First set the start of locals for the interpreter frame we are building.
    buf->_loc = (intptr_t)jexstk;

    uint loc_len = moop->max_locals();
for(uint i=0;i<loc_len;i++){
      *jexstk++ = dm->get_value(ds->get_local(i),fr);
    }

    // Now that the locals have been unpacked if we have any deferred local writes
    // added by jvmti then we can free up that structure as the data is now in the
    // buffer
    GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
    if( list ) {
      // Because of inlining we could have multiple vframes for a single frame
      // and several of the vframes could have deferred writes. Find them all.
      Unimplemented();
    }

    // ---
    // Now all the Java Expressions
    uint expr_len = ds->numstk();
for(uint i=0;i<expr_len;i++)
      *jexstk++ = dm->get_value(ds->get_expr(i),fr);

    // If returning from a deoptimized call, we will have return values in
    // registers that need to end up on the Java execution stack.  They are
    // not recorded in the debug info, since they did not exist at the time
    // the call began.
    if( is_youngest && is_deopt ) { 
      if( type2size[return_type] > 0 ) {
        if( type2size[return_type]==2 ) {
          *jexstk++ = (intptr_t)frame::double_slot_primitive_type_empty_slot_id << 32;
        }
        *jexstk++ = pd_fetch_return_values( thread, return_type );
        // Need to adjust the final jexstk_top for the youngest frame
        // returning values.  These returned values are not accounted for in
        // the standard debug info.
        thread->_jexstk_top = jexstk;
      }
    }

    // JVMTI PopFrame support
    // Add the number of words of popframe preserved args to expr_len
    int popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
    int popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
    if (handle_popframe) {
      Unimplemented();
      expr_len += popframe_preserved_args_size_in_words;
      // An interpreted frame was popped but it returns to a deoptimized
      // frame. The incoming arguments to the interpreted activation
      // were preserved in thread-local storage by the
      // remove_activation_preserving_args_entry in the interpreter; now
      // we put them back into the just-unpacked interpreter frame.
      // Note that this assumes that the locals arena grows toward lower
      // addresses.
    }

    // Set the JEX stk top
    buf->_stk = (intptr_t)jexstk;

    // --- 
    // Now move locked objects to the interpreters lock-stack.
    // No need to inflate anything, as we're moving standard oops.
    int numlcks = ds->numlocks();
    if( ds->is_extra_lock() ) { // coarsened a lock
      Untested("");
      // The last lock is "coarsened" - kept locked when it should have been
      // unlocked and relocked.  With no deopt, keeping it locked saves the 2
      // sets of back-to-back CAS's and fences.  However, here we need to
      // unlock it to match the proper Java state.
      ObjectSynchronizer::unlock(ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(numlcks-1),fr)).as_oop());
      numlcks--;
    }
for(int i=0;i<numlcks;i++){
      *lckstk++ = ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(i),fr));
    }

  } else {                    // Make a C1 frame
    
    Unimplemented();
    
  }
}
Ejemplo n.º 16
0
/*
    readClass reads a class method description
*/
static void readMethods(FILE* fd, bool printit)
{   
    ObjectHandle classObj, methTable, theMethod, selector;
# define LINEBUFFERSIZE 16384
    char *cp, *eoftest, lineBuffer[LINEBUFFERSIZE];
    ObjectHandle protocol;
    Parser pp;

    lineBuffer[0] = '\0';
    protocol = nilobj;
    if (ll.nextToken() != nameconst)
        sysError("missing name","following Method keyword");
    classObj = findClass(ll.strToken().c_str());
    pp.setInstanceVariables(classObj);
    if (printit)
        cp = objectRef(classObj->basicAt(nameInClass)).charPtr();

    /* now find or create a method table */
    methTable = classObj->basicAt(methodsInClass);
    if (methTable == nilobj) 
    {  /* must make */
        methTable = newDictionary(MethodTableSize);
        classObj->basicAtPut(methodsInClass, methTable);
    }

    if(ll.nextToken() == strconst) 
    {
        protocol = newStString(ll.strToken().c_str());
    }

    /* now go read the methods */
    do {
        if (lineBuffer[0] == '|')   /* get any left over text */
            strcpy(textBuffer,&lineBuffer[1]);
        else
            textBuffer[0] = '\0';
        while((eoftest = fgets(lineBuffer, LINEBUFFERSIZE, fd)) != NULL) {
            if ((lineBuffer[0] == '|') || (lineBuffer[0] == ']'))
                break;
            strcat(textBuffer, lineBuffer);
        }
        if (eoftest == NULL) {
            sysError("unexpected end of file","while reading method");
            break;
        }

        /* now we have a method */
        theMethod = newMethod();
        pp.setLexer(Lexer(textBuffer));
        if (pp.parseMessageHandler(theMethod, savetext)) {
            selector = theMethod->basicAt(messageInMethod);
            theMethod->basicAtPut(methodClassInMethod, classObj);
            theMethod->basicAtPut(protocolInMethod, protocol);
            if (printit)
                dspMethod(cp, selector->charPtr());
            nameTableInsert(methTable, selector.hash(), selector, theMethod);
        }
        else {
            /* get rid of unwanted method */
            givepause();
        }

    } while (lineBuffer[0] != ']');

}
Ejemplo n.º 17
0
void vframe::print_xml_on(JavaThread *thread, xmlBuffer *xb) const {
  ResourceMark rm;
  frame fr = get_frame();

  const char *frame_style = "UNKNOWN";
CodeBlob*cb=CodeCache::find_blob(fr.pc());
  if( fr.is_interpreted_frame() ) 
    frame_style = "INTERPRETER";
  else if( cb && cb->is_native_method() ) 
    frame_style = "NATIVE";
  else if( cb && cb->is_c1_method() )
    frame_style = "COMPILER1";
  else if( cb && cb->is_c2_method() )
    frame_style = "COMPILER2";

  // Frame header
  xb->name_value_item("frame_style",frame_style);

  // Java Locals
  if (!fr.is_java_frame()) 
    return; // no more printing to be done
  
  const methodOop moop = method();
  if (moop->is_native()) 
    return; // no more printing to be done
  
  const int max_locals = moop->max_locals();
  
if(fr.is_compiled_frame()){
    for( int i=0; i<max_locals; i++ ) {
      xmlElement je(xb, "java_element");
      { xmlElement n(xb, "name", xmlElement::delayed_LF);
        bool out_of_scope = true;
        const char* name = moop->localvariable_name(i, bci(), out_of_scope);
        if( name ) xb->print("%s%s%s", out_of_scope ? "{": "", name, out_of_scope ? "}": "");
        else       xb->print("JL%-2d", i);
      }
      DebugScopeValue::Name vreg = _ds->get_local(i);
      if( !DebugScopeValue::is_valid(vreg) ) {
        xb->name_value_item("type","invalid");
        xb->name_ptr_item("value",0);
      } else {
        intptr_t *data_ptr = fr.reg_to_addr(DebugScopeValue::to_vreg(vreg));
        const int rel_pc = cb->rel_pc(fr.pc());
        const bool isoop = cb->oop_maps()->is_oop( rel_pc, VReg::as_VOopReg(DebugScopeValue::to_vreg(vreg)) );
        if( isoop ) {
          xb->name_value_item("type", "oop");
          oop o = ((objectRef*)data_ptr)->as_oop();
          o->print_xml_on(xb, true);
        } else {
          xb->name_value_item("type", "normal");
          xb->name_ptr_item("value", (void*)*data_ptr);
        }
      }
    }
}else if(fr.is_interpreted_frame()){
    for( int i = 0; i < max_locals; i++ ) {
      frame::InterpreterStackSlotType tag = fr.tag_at_address(fr.interpreter_frame_local_addr(i));
      if (tag == frame::double_slot_primitive_type)
i++;//skip the tag slot
      xmlElement je(xb, "java_element");
      { xmlElement n(xb, "name", xmlElement::delayed_LF);
        bool out_of_scope = true;
        const char* name = moop->localvariable_name(i, bci(), out_of_scope);
        if (name != NULL) {
          xb->print("%s%s%s", out_of_scope ? "{": "", name, out_of_scope ? "}": "");
        } else {
          xb->print("JL%-2d", i);
        }
      }
      intptr_t local_value = fr.interpreter_frame_local_at(i);
      switch (tag) {
      case frame::single_slot_primitive_type: {
        xb->name_value_item("type", "single_slot_primitive_type");
        xb->name_ptr_item("value", (void*)local_value);
        break;
      }
      case frame::double_slot_primitive_type: {
        xb->name_value_item("type", "double_slot_primitive_type");
        xb->name_ptr_item("value", (void*)local_value);
        break;
      }
      case frame::single_slot_ref_type: {
        xb->name_value_item("type", "oop");
        oop o = objectRef((uint64_t)local_value).as_oop();
        o->print_xml_on(xb, true);
        break;
      }
      default: ShouldNotReachHere(); break;
      }
    }
  }
}