GrowableArray<MonitorValue*>* ScopeDesc::decode_monitor_values(int decode_offset) { if (decode_offset == DebugInformationRecorder::serialized_null) return NULL; DebugInfoReadStream* stream = stream_at(decode_offset); int length = stream->read_int(); GrowableArray<MonitorValue*>* result = new GrowableArray<MonitorValue*> (length); for (int index = 0; index < length; index++) { result->push(new MonitorValue(stream)); } return result; }
void LoopFinder::gather_loop_blocks(LoopList* loops) { int lng = loops->length(); BitMap blocks_in_loop(max_blocks()); for (int i = 0; i < lng; i++) { // for each loop do the following blocks_in_loop.clear(); Loop* loop = loops->at(i); BlockList* ends = loop->ends(); if (!loop->is_end(loop->start())) { GrowableArray<BlockBegin*>* stack = new GrowableArray<BlockBegin*>(); blocks_in_loop.at_put(loop->start()->block_id(), true); // insert all the ends into the list for (int i = 0; i < ends->length(); i++) { blocks_in_loop.at_put(ends->at(i)->block_id() , true); stack->push(ends->at(i)); } while (!stack->is_empty()) { BlockBegin* bb = stack->pop(); BlockLoopInfo* bli = get_block_info(bb); // push all predecessors that are not yet in loop int npreds = bli->nof_preds(); for (int m = 0; m < npreds; m++) { BlockBegin* pred = bli->pred_no(m); if (!blocks_in_loop.at(pred->block_id())) { blocks_in_loop.at_put(pred->block_id(), true); loop->append_node(pred); stack->push(pred); } } } loop->append_node(loop->start()); } // insert all the ends into the loop for (int i = 0; i < ends->length(); i++) { loop->append_node(ends->at(i)); } } }
void compiledVFrame::update_local(BasicType type, int index, jvalue value) { #ifdef ASSERT assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization"); #endif /* ASSERT */ GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = thread()->deferred_locals(); if (deferred != NULL ) { // See if this vframe has already had locals with deferred writes int f; for ( f = 0 ; f < deferred->length() ; f++ ) { if (deferred->at(f)->matches(this)) { // Matching, vframe now see if the local already had deferred write GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals(); int l; for (l = 0 ; l < locals->length() ; l++ ) { if (locals->at(l)->index() == index) { locals->at(l)->set_value(value); return; } } // No matching local already present. Push a new value onto the deferred collection locals->push(new jvmtiDeferredLocalVariable(index, type, value)); return; } } // No matching vframe must push a new vframe } else { // No deferred updates pending for this thread. // allocate in C heap deferred = new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true); thread()->set_deferred_locals(deferred); } deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr().id())); assert(deferred->top()->id() == fr().id(), "Huh? Must match"); deferred->top()->set_local_at(index, type, value); }
void vframe::update_local(JavaThread* thread, BasicType type, int index, jvalue value) { frame fr = this->get_frame(); // AZUL - We use extra slots to accomodate tags for longs and doubles // in the compiler as well. if(type==T_LONG||type==T_DOUBLE){ index=index+1; } #ifdef ASSERT Unimplemented(); //CodeBlob* b = CodeCache::find_blob(fr.pc()); //assert(b->is_patched_for_deopt(), "frame must be scheduled for deoptimization"); #endif /* ASSERT */ GrowableArray<jvmtiDeferredLocalVariableSet*>*deferred=thread->deferred_locals(); if (deferred != NULL ) { // See if this vframe has already had locals with deferred writes int f; for ( f = 0 ; f < deferred->length() ; f++ ) { if (deferred->at(f)->matches(this)) { // Matching, vframe now see if the local already had deferred write GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals(); int l; for (l = 0 ; l < locals->length() ; l++ ) { if (locals->at(l)->index() == index) { locals->at(l)->set_value(value); return; } } // No matching local already present. Push a new value onto the deferred collection locals->push(new jvmtiDeferredLocalVariable(index, type, value)); return; } } // No matching vframe must push a new vframe } else { // No deferred updates pending for this thread. // allocate in C heap deferred = new(ResourceObj::C_HEAP) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true); thread->set_deferred_locals(deferred); } // Because the frame is patched for deopt and we will push in // registers in uncommon_trap, we will use the sender's sp to compare deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr.pd_sender().sp())); assert(deferred->top()->id() == fr.pd_sender().sp(), "Huh? Must match"); deferred->top()->set_local_at(index, type, value); }
GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() { assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?"); GrowableArray<ClassLoaderData*>* array = new GrowableArray<ClassLoaderData*>(); // The CLDs in [_head, _saved_head] were all added during last call to remember_new_clds(true); ClassLoaderData* curr = _head; while (curr != _saved_head) { if (!curr->claimed()) { array->push(curr); if (TraceClassLoaderData) { tty->print("[ClassLoaderData] found new CLD: "); curr->print_value_on(tty); tty->cr(); } } curr = curr->_next; } return array; }
void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, GrowableArray<ciBlock *> &successors) { blk->set_processed(); ciBytecodeStream s(method()); int limit_bci = blk->limit_bci(); bool fall_through = false; ArgumentMap allocated_obj; allocated_obj.add_allocated(); ArgumentMap unknown_obj; unknown_obj.add_unknown(); ArgumentMap empty_map; s.reset_to_bci(blk->start_bci()); while (s.next() != ciBytecodeStream::EOBC() && s.cur_bci() < limit_bci) { fall_through = true; switch (s.cur_bc()) { case Bytecodes::_nop: break; case Bytecodes::_aconst_null: state.apush(empty_map); break; case Bytecodes::_iconst_m1: case Bytecodes::_iconst_0: case Bytecodes::_iconst_1: case Bytecodes::_iconst_2: case Bytecodes::_iconst_3: case Bytecodes::_iconst_4: case Bytecodes::_iconst_5: case Bytecodes::_fconst_0: case Bytecodes::_fconst_1: case Bytecodes::_fconst_2: case Bytecodes::_bipush: case Bytecodes::_sipush: state.spush(); break; case Bytecodes::_lconst_0: case Bytecodes::_lconst_1: case Bytecodes::_dconst_0: case Bytecodes::_dconst_1: state.lpush(); break; case Bytecodes::_ldc: case Bytecodes::_ldc_w: case Bytecodes::_ldc2_w: if (type2size[s.get_constant().basic_type()] == 1) { state.spush(); } else { state.lpush(); } break; case Bytecodes::_aload: state.apush(state._vars[s.get_index()]); break; case Bytecodes::_iload: case Bytecodes::_fload: case Bytecodes::_iload_0: case Bytecodes::_iload_1: case Bytecodes::_iload_2: case Bytecodes::_iload_3: case Bytecodes::_fload_0: case Bytecodes::_fload_1: case Bytecodes::_fload_2: case Bytecodes::_fload_3: state.spush(); break; case Bytecodes::_lload: case Bytecodes::_dload: case Bytecodes::_lload_0: case Bytecodes::_lload_1: case Bytecodes::_lload_2: case Bytecodes::_lload_3: case Bytecodes::_dload_0: case Bytecodes::_dload_1: case Bytecodes::_dload_2: case Bytecodes::_dload_3: state.lpush(); break; case Bytecodes::_aload_0: state.apush(state._vars[0]); break; case Bytecodes::_aload_1: state.apush(state._vars[1]); break; case Bytecodes::_aload_2: state.apush(state._vars[2]); break; case Bytecodes::_aload_3: state.apush(state._vars[3]); break; case Bytecodes::_iaload: case Bytecodes::_faload: case Bytecodes::_baload: case Bytecodes::_caload: case Bytecodes::_saload: state.spop(); set_method_escape(state.apop()); state.spush(); break; case Bytecodes::_laload: case Bytecodes::_daload: state.spop(); set_method_escape(state.apop()); state.lpush(); break; case Bytecodes::_aaload: { state.spop(); ArgumentMap array = state.apop(); set_method_escape(array); state.apush(unknown_obj); set_dirty(array); } break; case Bytecodes::_istore: case Bytecodes::_fstore: case Bytecodes::_istore_0: case Bytecodes::_istore_1: case Bytecodes::_istore_2: case Bytecodes::_istore_3: case Bytecodes::_fstore_0: case Bytecodes::_fstore_1: case Bytecodes::_fstore_2: case Bytecodes::_fstore_3: state.spop(); break; case Bytecodes::_lstore: case Bytecodes::_dstore: case Bytecodes::_lstore_0: case Bytecodes::_lstore_1: case Bytecodes::_lstore_2: case Bytecodes::_lstore_3: case Bytecodes::_dstore_0: case Bytecodes::_dstore_1: case Bytecodes::_dstore_2: case Bytecodes::_dstore_3: state.lpop(); break; case Bytecodes::_astore: state._vars[s.get_index()] = state.apop(); break; case Bytecodes::_astore_0: state._vars[0] = state.apop(); break; case Bytecodes::_astore_1: state._vars[1] = state.apop(); break; case Bytecodes::_astore_2: state._vars[2] = state.apop(); break; case Bytecodes::_astore_3: state._vars[3] = state.apop(); break; case Bytecodes::_iastore: case Bytecodes::_fastore: case Bytecodes::_bastore: case Bytecodes::_castore: case Bytecodes::_sastore: { state.spop(); state.spop(); ArgumentMap arr = state.apop(); set_method_escape(arr); break; } case Bytecodes::_lastore: case Bytecodes::_dastore: { state.lpop(); state.spop(); ArgumentMap arr = state.apop(); set_method_escape(arr); break; } case Bytecodes::_aastore: { set_global_escape(state.apop()); state.spop(); ArgumentMap arr = state.apop(); break; } case Bytecodes::_pop: state.raw_pop(); break; case Bytecodes::_pop2: state.raw_pop(); state.raw_pop(); break; case Bytecodes::_dup: { ArgumentMap w1 = state.raw_pop(); state.raw_push(w1); state.raw_push(w1); } break; case Bytecodes::_dup_x1: { ArgumentMap w1 = state.raw_pop(); ArgumentMap w2 = state.raw_pop(); state.raw_push(w1); state.raw_push(w2); state.raw_push(w1); } break; case Bytecodes::_dup_x2: { ArgumentMap w1 = state.raw_pop(); ArgumentMap w2 = state.raw_pop(); ArgumentMap w3 = state.raw_pop(); state.raw_push(w1); state.raw_push(w3); state.raw_push(w2); state.raw_push(w1); } break; case Bytecodes::_dup2: { ArgumentMap w1 = state.raw_pop(); ArgumentMap w2 = state.raw_pop(); state.raw_push(w2); state.raw_push(w1); state.raw_push(w2); state.raw_push(w1); } break; case Bytecodes::_dup2_x1: { ArgumentMap w1 = state.raw_pop(); ArgumentMap w2 = state.raw_pop(); ArgumentMap w3 = state.raw_pop(); state.raw_push(w2); state.raw_push(w1); state.raw_push(w3); state.raw_push(w2); state.raw_push(w1); } break; case Bytecodes::_dup2_x2: { ArgumentMap w1 = state.raw_pop(); ArgumentMap w2 = state.raw_pop(); ArgumentMap w3 = state.raw_pop(); ArgumentMap w4 = state.raw_pop(); state.raw_push(w2); state.raw_push(w1); state.raw_push(w4); state.raw_push(w3); state.raw_push(w2); state.raw_push(w1); } break; case Bytecodes::_swap: { ArgumentMap w1 = state.raw_pop(); ArgumentMap w2 = state.raw_pop(); state.raw_push(w1); state.raw_push(w2); } break; case Bytecodes::_iadd: case Bytecodes::_fadd: case Bytecodes::_isub: case Bytecodes::_fsub: case Bytecodes::_imul: case Bytecodes::_fmul: case Bytecodes::_idiv: case Bytecodes::_fdiv: case Bytecodes::_irem: case Bytecodes::_frem: case Bytecodes::_iand: case Bytecodes::_ior: case Bytecodes::_ixor: state.spop(); state.spop(); state.spush(); break; case Bytecodes::_ladd: case Bytecodes::_dadd: case Bytecodes::_lsub: case Bytecodes::_dsub: case Bytecodes::_lmul: case Bytecodes::_dmul: case Bytecodes::_ldiv: case Bytecodes::_ddiv: case Bytecodes::_lrem: case Bytecodes::_drem: case Bytecodes::_land: case Bytecodes::_lor: case Bytecodes::_lxor: state.lpop(); state.lpop(); state.lpush(); break; case Bytecodes::_ishl: case Bytecodes::_ishr: case Bytecodes::_iushr: state.spop(); state.spop(); state.spush(); break; case Bytecodes::_lshl: case Bytecodes::_lshr: case Bytecodes::_lushr: state.spop(); state.lpop(); state.lpush(); break; case Bytecodes::_ineg: case Bytecodes::_fneg: state.spop(); state.spush(); break; case Bytecodes::_lneg: case Bytecodes::_dneg: state.lpop(); state.lpush(); break; case Bytecodes::_iinc: break; case Bytecodes::_i2l: case Bytecodes::_i2d: case Bytecodes::_f2l: case Bytecodes::_f2d: state.spop(); state.lpush(); break; case Bytecodes::_i2f: case Bytecodes::_f2i: state.spop(); state.spush(); break; case Bytecodes::_l2i: case Bytecodes::_l2f: case Bytecodes::_d2i: case Bytecodes::_d2f: state.lpop(); state.spush(); break; case Bytecodes::_l2d: case Bytecodes::_d2l: state.lpop(); state.lpush(); break; case Bytecodes::_i2b: case Bytecodes::_i2c: case Bytecodes::_i2s: state.spop(); state.spush(); break; case Bytecodes::_lcmp: case Bytecodes::_dcmpl: case Bytecodes::_dcmpg: state.lpop(); state.lpop(); state.spush(); break; case Bytecodes::_fcmpl: case Bytecodes::_fcmpg: state.spop(); state.spop(); state.spush(); break; case Bytecodes::_ifeq: case Bytecodes::_ifne: case Bytecodes::_iflt: case Bytecodes::_ifge: case Bytecodes::_ifgt: case Bytecodes::_ifle: { state.spop(); int dest_bci = s.get_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); successors.push(_methodBlocks->block_containing(dest_bci)); break; } case Bytecodes::_if_icmpeq: case Bytecodes::_if_icmpne: case Bytecodes::_if_icmplt: case Bytecodes::_if_icmpge: case Bytecodes::_if_icmpgt: case Bytecodes::_if_icmple: { state.spop(); state.spop(); int dest_bci = s.get_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); successors.push(_methodBlocks->block_containing(dest_bci)); break; } case Bytecodes::_if_acmpeq: case Bytecodes::_if_acmpne: { set_method_escape(state.apop()); set_method_escape(state.apop()); int dest_bci = s.get_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); successors.push(_methodBlocks->block_containing(dest_bci)); break; } case Bytecodes::_goto: { int dest_bci = s.get_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); successors.push(_methodBlocks->block_containing(dest_bci)); fall_through = false; break; } case Bytecodes::_jsr: { int dest_bci = s.get_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); state.apush(empty_map); successors.push(_methodBlocks->block_containing(dest_bci)); fall_through = false; break; } case Bytecodes::_ret: // we don't track the destination of a "ret" instruction assert(s.next_bci() == limit_bci, "branch must end block"); fall_through = false; break; case Bytecodes::_return: assert(s.next_bci() == limit_bci, "return must end block"); fall_through = false; break; case Bytecodes::_tableswitch: { state.spop(); Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp()); int len = switch_->length(); int dest_bci; for (int i = 0; i < len; i++) { dest_bci = s.cur_bci() + switch_->dest_offset_at(i); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); successors.push(_methodBlocks->block_containing(dest_bci)); } dest_bci = s.cur_bci() + switch_->default_offset(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); successors.push(_methodBlocks->block_containing(dest_bci)); assert(s.next_bci() == limit_bci, "branch must end block"); fall_through = false; break; } case Bytecodes::_lookupswitch: { state.spop(); Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp()); int len = switch_->number_of_pairs(); int dest_bci; for (int i = 0; i < len; i++) { dest_bci = s.cur_bci() + switch_->pair_at(i)->offset(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); successors.push(_methodBlocks->block_containing(dest_bci)); } dest_bci = s.cur_bci() + switch_->default_offset(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); successors.push(_methodBlocks->block_containing(dest_bci)); fall_through = false; break; } case Bytecodes::_ireturn: case Bytecodes::_freturn: state.spop(); fall_through = false; break; case Bytecodes::_lreturn: case Bytecodes::_dreturn: state.lpop(); fall_through = false; break; case Bytecodes::_areturn: set_returned(state.apop()); fall_through = false; break; case Bytecodes::_getstatic: case Bytecodes::_getfield: { bool will_link; ciField* field = s.get_field(will_link); BasicType field_type = field->type()->basic_type(); if (s.cur_bc() != Bytecodes::_getstatic) { set_method_escape(state.apop()); } if (field_type == T_OBJECT || field_type == T_ARRAY) { state.apush(unknown_obj); } else if (type2size[field_type] == 1) { state.spush(); } else { state.lpush(); } } break; case Bytecodes::_putstatic: case Bytecodes::_putfield: { bool will_link; ciField* field = s.get_field(will_link); BasicType field_type = field->type()->basic_type(); if (field_type == T_OBJECT || field_type == T_ARRAY) { set_global_escape(state.apop()); } else if (type2size[field_type] == 1) { state.spop(); } else { state.lpop(); } if (s.cur_bc() != Bytecodes::_putstatic) { ArgumentMap p = state.apop(); set_method_escape(p); } } break; case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: case Bytecodes::_invokeinterface: { bool will_link; ciMethod* target = s.get_method(will_link); ciKlass* holder = s.get_declared_method_holder(); invoke(state, s.cur_bc(), target, holder); ciType* return_type = target->return_type(); if (!return_type->is_primitive_type()) { state.apush(unknown_obj); } else if (return_type->is_one_word()) { state.spush(); } else if (return_type->is_two_word()) { state.lpush(); } } break; case Bytecodes::_xxxunusedxxx: ShouldNotReachHere(); break; case Bytecodes::_new: state.apush(allocated_obj); break; case Bytecodes::_newarray: case Bytecodes::_anewarray: state.spop(); state.apush(allocated_obj); break; case Bytecodes::_multianewarray: { int i = s.cur_bcp()[3]; while (i-- > 0) state.spop(); state.apush(allocated_obj); } break; case Bytecodes::_arraylength: set_method_escape(state.apop()); state.spush(); break; case Bytecodes::_athrow: set_global_escape(state.apop()); fall_through = false; break; case Bytecodes::_checkcast: { ArgumentMap obj = state.apop(); set_method_escape(obj); state.apush(obj); } break; case Bytecodes::_instanceof: set_method_escape(state.apop()); state.spush(); break; case Bytecodes::_monitorenter: case Bytecodes::_monitorexit: state.apop(); break; case Bytecodes::_wide: ShouldNotReachHere(); break; case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: { set_method_escape(state.apop()); int dest_bci = s.get_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); successors.push(_methodBlocks->block_containing(dest_bci)); break; } case Bytecodes::_goto_w: { int dest_bci = s.get_far_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); successors.push(_methodBlocks->block_containing(dest_bci)); fall_through = false; break; } case Bytecodes::_jsr_w: { int dest_bci = s.get_far_dest(); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(s.next_bci() == limit_bci, "branch must end block"); state.apush(empty_map); successors.push(_methodBlocks->block_containing(dest_bci)); fall_through = false; break; } case Bytecodes::_breakpoint: break; default: ShouldNotReachHere(); break; } } if (fall_through) { int fall_through_bci = s.cur_bci(); if (fall_through_bci < _method->code_size()) { assert(_methodBlocks->is_block_start(fall_through_bci), "must fall through to block start."); successors.push(_methodBlocks->block_containing(fall_through_bci)); } } }
void Klass::initialize_supers(Klass* k, TRAPS) { if (FastSuperclassLimit == 0) { // None of the other machinery matters. set_super(k); return; } if (k == NULL) { set_super(NULL); _primary_supers[0] = this; assert(super_depth() == 0, "Object must already be initialized properly"); } else if (k != super() || k == SystemDictionary::Object_klass()) { assert(super() == NULL || super() == SystemDictionary::Object_klass(), "initialize this only once to a non-trivial value"); set_super(k); Klass* sup = k; int sup_depth = sup->super_depth(); juint my_depth = MIN2(sup_depth + 1, (int)primary_super_limit()); if (!can_be_primary_super_slow()) my_depth = primary_super_limit(); for (juint i = 0; i < my_depth; i++) { _primary_supers[i] = sup->_primary_supers[i]; } Klass* *super_check_cell; if (my_depth < primary_super_limit()) { _primary_supers[my_depth] = this; super_check_cell = &_primary_supers[my_depth]; } else { // Overflow of the primary_supers array forces me to be secondary. super_check_cell = &_secondary_super_cache; } set_super_check_offset((address)super_check_cell - (address) this); #ifdef ASSERT { juint j = super_depth(); assert(j == my_depth, "computed accessor gets right answer"); Klass* t = this; while (!t->can_be_primary_super()) { t = t->super(); j = t->super_depth(); } for (juint j1 = j+1; j1 < primary_super_limit(); j1++) { assert(primary_super_of_depth(j1) == NULL, "super list padding"); } while (t != NULL) { assert(primary_super_of_depth(j) == t, "super list initialization"); t = t->super(); --j; } assert(j == (juint)-1, "correct depth count"); } #endif } if (secondary_supers() == NULL) { KlassHandle this_kh (THREAD, this); // Now compute the list of secondary supertypes. // Secondaries can occasionally be on the super chain, // if the inline "_primary_supers" array overflows. int extras = 0; Klass* p; for (p = super(); !(p == NULL || p->can_be_primary_super()); p = p->super()) { ++extras; } ResourceMark rm(THREAD); // need to reclaim GrowableArrays allocated below // Compute the "real" non-extra secondaries. GrowableArray<Klass*>* secondaries = compute_secondary_supers(extras); if (secondaries == NULL) { // secondary_supers set by compute_secondary_supers return; } GrowableArray<Klass*>* primaries = new GrowableArray<Klass*>(extras); for (p = this_kh->super(); !(p == NULL || p->can_be_primary_super()); p = p->super()) { int i; // Scan for overflow primaries being duplicates of 2nd'arys // This happens frequently for very deeply nested arrays: the // primary superclass chain overflows into the secondary. The // secondary list contains the element_klass's secondaries with // an extra array dimension added. If the element_klass's // secondary list already contains some primary overflows, they // (with the extra level of array-ness) will collide with the // normal primary superclass overflows. for( i = 0; i < secondaries->length(); i++ ) { if( secondaries->at(i) == p ) break; } if( i < secondaries->length() ) continue; // It's a dup, don't put it in primaries->push(p); } // Combine the two arrays into a metadata object to pack the array. // The primaries are added in the reverse order, then the secondaries. int new_length = primaries->length() + secondaries->length(); Array<Klass*>* s2 = MetadataFactory::new_array<Klass*>( class_loader_data(), new_length, CHECK); int fill_p = primaries->length(); for (int j = 0; j < fill_p; j++) { s2->at_put(j, primaries->pop()); // add primaries in reverse order. } for( int j = 0; j < secondaries->length(); j++ ) { s2->at_put(j+fill_p, secondaries->at(j)); // add secondaries on the end. } #ifdef ASSERT // We must not copy any NULL placeholders left over from bootstrap. for (int j = 0; j < s2->length(); j++) { assert(s2->at(j) != NULL, "correct bootstrapping order"); } #endif this_kh->set_secondary_supers(s2); } }
bool scan_key(char* line, LookupKey* key) { int len = strlen(line); if (len > 1 && line[len-1] == '\n') line[len-1] = '\0'; bool is_super; bool is_block; char* sub = find_type(line, &is_super, &is_block); if (sub == NULL) return false; *sub = '\0'; char* class_name = line; char* method_id = sub + 2; bool class_side = false; char* class_start = strstr(class_name, " class"); if (class_start != NULL) { *class_start = '\0'; class_side = true; } klassOop rec = klassOop(Universe::find_global(class_name, true)); if (rec == NULL || !rec->is_klass()) return false; if (class_side) rec = rec->klass(); GrowableArray<int>* bcis = new GrowableArray<int>(10); char* bcis_string = strstr(method_id, " "); if (bcis_string) { *bcis_string++ = '\0'; while (*bcis_string != '\0') { int index; int bci; if (sscanf(bcis_string, "%d%n", &bci, &index) != 1) return 0; bcis->push(bci); bcis_string += index; if (*bcis_string == ' ') bcis_string++; } } symbolOop selector = oopFactory::new_symbol(method_id); if (is_block) { methodOop met = rec->klass_part()->lookup(selector); if (met == NULL) return false; for (int index = 0; index < bcis->length(); index++) { int bci = bcis->at(index); met = met->block_method_at(bci); if (met == NULL) return false; } key->initialize(rec, met); } else { key->initialize(rec, selector); } return true; }
// Returns whether the key was succesfully scanned bool scan_key(RScope* sender, char* line, klassOop* receiver_klass, methodOop* method) { bool is_super; bool is_block; char* sub = find_type(line, &is_super, &is_block); if (sub == NULL) return false; *sub = '\0'; char* class_name = line; char* method_id = sub + 2; bool class_side = false; char* class_start = strstr(class_name, " class"); if (class_start != NULL) { *class_start = '\0'; class_side = true; } klassOop rec = klassOop(Universe::find_global(class_name, true)); if (rec == NULL || !rec->is_klass()) return false; if (class_side) rec = rec->klass(); *receiver_klass = rec; GrowableArray<int>* bcis = new GrowableArray<int>(10); char* bcis_string = strstr(method_id, " "); if (bcis_string) { *bcis_string++ = '\0'; while (*bcis_string != '\0') { int index; int bci; if (sscanf(bcis_string, "%d%n", &bci, &index) != 1) return 0; bcis->push(bci); bcis_string += index; if (*bcis_string == ' ') bcis_string++; } } symbolOop selector = oopFactory::new_symbol(method_id); if (is_super) { assert(sender, "sender must be present"); klassOop method_holder = sender->receiverKlass()->klass_part()->lookup_method_holder_for(sender->method()); if (method_holder) { methodOop met = method_holder->klass_part()->superKlass()->klass_part()->lookup(selector); if (met) { *method = met; return true; } } return false; } methodOop met = rec->klass_part()->lookup(selector); if (met == NULL) return false; for (int index = 0; index < bcis->length(); index++) { int bci = bcis->at(index); met = met->block_method_at(bci); if (met == NULL) return false; } *method = met; return true; }
// Create default_methods list for the current class. // With the VM only processing erased signatures, the VM only // creates an overpass in a conflict case or a case with no candidates. // This allows virtual methods to override the overpass, but ensures // that a local method search will find the exception rather than an abstract // or default method that is not a valid candidate. static void create_defaults_and_exceptions( GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS) { GrowableArray<Method*> overpasses; GrowableArray<Method*> defaults; BytecodeConstantPool bpool(klass->constants()); for (int i = 0; i < slots->length(); ++i) { EmptyVtableSlot* slot = slots->at(i); if (slot->is_bound()) { MethodFamily* method = slot->get_binding(); BytecodeBuffer buffer; #ifndef PRODUCT if (TraceDefaultMethods) { tty->print("for slot: "); slot->print_on(tty); tty->cr(); if (method->has_target()) { method->print_selected(tty, 1); } else if (method->throws_exception()) { method->print_exception(tty, 1); } } #endif // ndef PRODUCT if (method->has_target()) { Method* selected = method->get_selected_target(); if (selected->method_holder()->is_interface()) { defaults.push(selected); } } else if (method->throws_exception()) { int max_stack = assemble_method_error(&bpool, &buffer, method->get_exception_name(), method->get_exception_message(), CHECK); AccessFlags flags = accessFlags_from( JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE); Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(), flags, max_stack, slot->size_of_parameters(), ConstMethod::OVERPASS, CHECK); // We push to the methods list: // overpass methods which are exception throwing methods if (m != NULL) { overpasses.push(m); } } } } #ifndef PRODUCT if (TraceDefaultMethods) { tty->print_cr("Created %d overpass methods", overpasses.length()); tty->print_cr("Created %d default methods", defaults.length()); } #endif // ndef PRODUCT if (overpasses.length() > 0) { switchover_constant_pool(&bpool, klass, &overpasses, CHECK); merge_in_new_methods(klass, &overpasses, CHECK); } if (defaults.length() > 0) { create_default_methods(klass, &defaults, CHECK); } }
// Register a class as 'in-use' by the thread. It's fine to register a class // multiple times (though perhaps inefficient) void register_class(InstanceKlass* ik) { ConstantPool* cp = ik->constants(); _keep_alive.push(cp); _thread->metadata_handles()->push(cp); }
void push(InstanceKlass* cls, void* data) { assert(cls != NULL, "Requires a valid instance class"); Node* node = new Node(cls, data, has_super(cls)); _path.push(node); }
void add(StackValue *val) const { _values->push(val); }
void add(astNode* element) { elements->push(element); }
CHAResult* CHA::analyze_call(KlassHandle calling_klass, KlassHandle static_receiver, KlassHandle actual_receiver, symbolHandle name, symbolHandle signature) { assert(static_receiver->oop_is_instance(), "must be instance klass"); methodHandle m; // Only do exact lookup if receiver klass has been linked. Otherwise, // the vtables has not been setup, and the LinkResolver will fail. if (instanceKlass::cast(static_receiver())->is_linked() && instanceKlass::cast(actual_receiver())->is_linked()) { if (static_receiver->is_interface()) { // no point trying to resolve unless actual receiver is a klass if (!actual_receiver->is_interface()) { m = LinkResolver::resolve_interface_call_or_null(actual_receiver, static_receiver, name, signature, calling_klass); } } else { m = LinkResolver::resolve_virtual_call_or_null(actual_receiver, static_receiver, name, signature, calling_klass); } if (m.is_null()) { // didn't find method (e.g., could be abstract method) return new CHAResult(actual_receiver, name, signature, NULL, NULL, m, false); } if( Klass::can_be_statically_bound(m()) || m()->is_private() || actual_receiver->subklass() == NULL ) { // always optimize final methods, private methods or methods with no // subclasses. return new CHAResult(actual_receiver, name, signature, NULL, NULL, m); } if (!UseCHA) { // don't optimize this call return new CHAResult(actual_receiver, name, signature, NULL, NULL, m, false); } } // If the method is abstract then each non-abstract subclass must implement // the method and inlining is not possible. If there is exactly 1 subclass // then there can be only 1 implementation and we are OK. if( !m.is_null() && m()->is_abstract() ) {// Method is abstract? Klass *sr = Klass::cast(static_receiver()); if( sr == sr->up_cast_abstract() ) return new CHAResult(actual_receiver, name, signature, NULL, NULL, m, false); // Fall into the next code; it will find the one implementation // and that implementation is correct. } _used = true; GrowableArray<methodHandle>* methods = new GrowableArray<methodHandle>(CHA::max_result()); GrowableArray<KlassHandle>* receivers = new GrowableArray<KlassHandle>(CHA::max_result()); // Since 'm' is visible from the actual receiver we can call it if the // runtime receiver class does not override 'm'. if( !m.is_null() && m()->method_holder() != actual_receiver() && !m->is_abstract() ) { receivers->push(actual_receiver); methods->push(m); } if (static_receiver->is_interface()) { instanceKlassHandle sr = static_receiver(); process_interface(sr, receivers, methods, name, signature); } else { process_class(static_receiver, receivers, methods, name, signature); } methodHandle dummy; CHAResult* res = new CHAResult(actual_receiver, name, signature, receivers, methods, dummy); //res->print(); return res; }