void C1_MacroAssembler::method_exit(FrameMap* frame_map) { // offset from the expected fixed sp within the method int sp_offset = 0; // adjust SP over spills... sp_offset = in_bytes(frame_map->framesize_in_bytes()) - 8 - (frame_map->num_callee_saves()*8); if (sp_offset == 8) pop(RCX); // pop and blow arbitrary caller save, smaller encoding than add8i else add8i (RSP, sp_offset ); if( frame_map->num_callee_saves() > 0 ) { int callee_save_num = 0; int callee_saves = frame_map->callee_saves(); // bitmap for(int i=0;i<LinearScan::nof_cpu_regs;i++){ if ((callee_saves & 1<<i) != 0) { int wanted_sp_offset = frame_map->address_for_callee_save(callee_save_num)._disp; assert0( sp_offset == wanted_sp_offset ); pop((Register)i); sp_offset += 8; callee_save_num++; assert0( callee_save_num <= frame_map->num_callee_saves() ); } } #ifdef ASSERT for(int i=0;i<LinearScan::nof_xmm_regs;i++){ int reg = LinearScan::nof_cpu_regs+i; assert ((callee_saves & 1<<reg) == 0, "Unexpected callee save XMM register"); } #endif } assert0 (sp_offset == (in_bytes(frame_map->framesize_in_bytes())-8) ); ret (); }
void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { BasicType t = item->type(); LIR_Opr sp_opr = FrameMap::SP_opr; if ((t == T_LONG || t == T_DOUBLE) && ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) { __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t)); } else { __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t)); } }
ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { // Get offset within MethodData* of the data array ByteSize data_offset = MethodData::data_offset(); // Get cell offset of the ProfileData within data array int cell_offset = dp_to_di(data->dp()); // Add in counter_offset, the # of bytes into the ProfileData of counter or flag int offset = in_bytes(data_offset) + cell_offset + in_bytes(slot_offset_in_data); return in_ByteSize(offset); }
void ciMethodData::dump_replay_data_extra_data_helper(outputStream* out, int round, int& count) { DataLayout* dp = extra_data_base(); DataLayout* end = args_data_limit(); for (;dp < end; dp = MethodData::next_extra(dp)) { switch(dp->tag()) { case DataLayout::no_tag: case DataLayout::arg_info_data_tag: return; case DataLayout::bit_data_tag: break; case DataLayout::speculative_trap_data_tag: { ciSpeculativeTrapData* data = new ciSpeculativeTrapData(dp); ciMethod* m = data->method(); if (m != NULL) { if (round == 0) { count++; } else { out->print(" %d ", (int)(dp_to_di(((address)dp) + in_bytes(ciSpeculativeTrapData::method_offset())) / sizeof(intptr_t))); m->dump_name_as_ascii(out); } } break; } default: fatal(err_msg("bad tag = %d", dp->tag())); } } }
// "Normal" instantiation is preceeded by a MetaspaceObj allocation // which zeros out memory - calloc equivalent. // The constructor is also used from init_self_patching_vtbl_list, // which doesn't zero out the memory before calling the constructor. // Need to set the _java_mirror field explicitly to not hit an assert that the field // should be NULL before setting it. Klass::Klass() : _prototype_header(markOopDesc::prototype()), _shared_class_path_index(-1), _java_mirror(NULL) { _primary_supers[0] = this; set_super_check_offset(in_bytes(primary_supers_offset())); }
ByteSize FrameMap::sp_offset_for_double_slot(const int index) const { ByteSize offset = sp_offset_for_slot(index); if (index >= argcount()) { assert(in_bytes(offset) + 4 < framesize() * 4, "spill outside of frame"); } return offset; }
Klass::Klass() { Klass* k = this; { // Preinitialize supertype information. // A later call to initialize_supers() may update these settings: set_super(NULL); for (juint i = 0; i < Klass::primary_super_limit(); i++) { _primary_supers[i] = NULL; } set_secondary_supers(NULL); _primary_supers[0] = k; set_super_check_offset(in_bytes(primary_supers_offset())); } set_java_mirror(NULL); set_modifier_flags(0); set_layout_helper(Klass::_lh_neutral_value); set_name(NULL); AccessFlags af; af.set_flags(0); set_access_flags(af); set_subklass(NULL); set_next_sibling(NULL); set_next_link(NULL); set_alloc_count(0); TRACE_SET_KLASS_TRACE_ID(this, 0); set_prototype_header(markOopDesc::prototype()); set_biased_lock_revocation_count(0); set_last_biased_lock_bulk_revocation_time(0); // The klass doesn't have any references at this point. clear_modified_oops(); clear_accumulated_modified_oops(); }
void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { Label no_deopt; Label no_handler; __ verify_not_null_oop(Oexception); // save the exception and issuing pc in the thread __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) __ mov(I7, L0); __ mov(Oissuing_pc, I7); __ sub(I7, frame::pc_return_offset, I7); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); // Note: if nmethod has been deoptimized then regardless of // whether it had a handler or not we will deoptimize // by entering the deopt blob with a pending exception. __ tst(O0); __ br(Assembler::zero, false, Assembler::pn, no_handler); __ delayed()->nop(); // restore the registers that were saved at the beginning and jump to the exception handler. restore_live_registers(sasm); __ jmp(O0, 0); __ delayed()->restore(); __ bind(no_handler); __ mov(L0, I7); // restore return address // restore exception oop __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save()); __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ restore(); AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id)); __ jump_to(exc, G4); __ delayed()->nop(); oop_maps->add_gc_map(call_offset, oop_map); }
// ------------------------------------------------------------------ // ciCPCache::get_f1_offset size_t ciCPCache::get_f1_offset(int index) { // Calculate the offset from the constantPoolCacheOop to the f1 // field. ByteSize f1_offset = constantPoolCacheOopDesc::entry_offset(index) + ConstantPoolCacheEntry::f1_offset(); return in_bytes(f1_offset); }
void ciMethodData::dump_replay_data_type_helper(outputStream* out, int round, int& count, ProfileData* pdata, ByteSize offset, ciKlass* k) { if (k != NULL) { if (round == 0) { count++; } else { out->print(" %d %s", (int)(dp_to_di(pdata->dp() + in_bytes(offset)) / sizeof(intptr_t)), k->name()->as_quoted_ascii()); } } }
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { const int code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new(code_length) VtableStub(true, vtable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0"); const Register tmp = Rtemp; // Rtemp OK, should be free at call sites address npe_addr = __ pc(); __ load_klass(tmp, R0); { int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes(); int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset; assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned"); int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff); if (method_offset & ~offset_mask) { __ add(tmp, tmp, method_offset & ~offset_mask); } __ ldr(Rmethod, Address(tmp, method_offset & offset_mask)); } address ame_addr = __ pc(); #ifdef AARCH64 __ ldr(tmp, Address(Rmethod, Method::from_compiled_offset())); __ br(tmp); #else __ ldr(PC, Address(Rmethod, Method::from_compiled_offset())); #endif // AARCH64 masm->flush(); if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d", vtable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); // FIXME ARM: need correct 'slop' - below is x86 code // shut the door on sizing bugs //int slop = 8; // 32-bit offset is this much larger than a 13-bit one //assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); s->set_exception_points(npe_addr, ame_addr); return s; }
void C1_MacroAssembler::entry( CodeProfile *cp ) { if(C1Breakpoint)os_breakpoint(); if (UseC2 && ProfileMethodEntry) { Label no_overflow; int invoff = CodeProfile:: invoke_count_offset_in_bytes() + in_bytes(InvocationCounter::counter_offset()); int beoff = CodeProfile::backedge_count_offset_in_bytes() + in_bytes(InvocationCounter::counter_offset()); mov8i(R11,(intptr_t)cp); ldz4 (RAX, R11, invoff); // increment the "# of calls" entry add4 (RAX, R11, beoff ); inc4 (R11, invoff); // increment "# of calls" cmp4i(RAX, C1PromotionThreshold); jbl (no_overflow); call (Runtime1::entry_for(Runtime1::frequency_counter_overflow_wrapper_id)); // No oop-map or debug info here, similar to resolve_and_patch_call the // caller NOT this code is responsible for GC'ing the arguments. bind(no_overflow); } }
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info); address generate_getPsrInfo() { StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); # define __ _masm-> address start = __ pc(); // rbx is callee-save on both unix and windows // rcx and rdx are first and second argument registers on windows __ pushq(rbx); __ movq(r8, rarg0); __ xorl(rax, rax); __ cpuid(); __ leaq(r9, Address(r8, in_bytes(VM_Version::std_cpuid0_offset()))); __ movl(Address(r9, 0), rax); __ movl(Address(r9, 4), rbx); __ movl(Address(r9, 8), rcx); __ movl(Address(r9, 12), rdx); __ movl(rax, 1); __ cpuid(); __ leaq(r9, Address(r8, in_bytes(VM_Version::std_cpuid1_offset()))); __ movl(Address(r9, 0), rax); __ movl(Address(r9, 4), rbx); __ movl(Address(r9, 8), rcx); __ movl(Address(r9, 12), rdx); __ movl(rax, 0x80000001); __ cpuid(); __ leaq(r9, Address(r8, in_bytes(VM_Version::ext_cpuid1_offset()))); __ movl(Address(r9, 0), rax); __ movl(Address(r9, 4), rbx); __ movl(Address(r9, 8), rcx); __ movl(Address(r9, 12), rdx); __ popq(rbx); __ ret(0); return start; }
ByteSize FrameMap::sp_offset_for_slot(const int index) const { if (index < argcount()) { int offset = _argument_locations->at(index); assert(offset != -1, "not a memory argument"); assert(offset >= framesize() * 4, "argument inside of frame"); return in_ByteSize(offset); } ByteSize offset = sp_offset_for_spill(index - argcount()); assert(in_bytes(offset) < framesize() * 4, "spill outside of frame"); return offset; }
void Compilation::install_code(int frame_size) { // frame_size is in 32-bit words so adjust it intptr_t words assert(frame_size == frame_map()->framesize(), "must match"); assert(in_bytes(frame_map()->framesize_in_bytes()) % sizeof(intptr_t) == 0, "must be at least pointer aligned"); _env->register_method( method(), osr_bci(), &_offsets, in_bytes(_frame_map->sp_offset_for_orig_pc()), code(), in_bytes(frame_map()->framesize_in_bytes()) / sizeof(intptr_t), debug_info_recorder()->_oopmaps, exception_handler_table(), implicit_exception_table(), compiler(), _env->comp_level(), needs_debug_information(), has_unsafe_access() ); }
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp, bool for_compiler_entry) { Label L_no_such_method; assert(method == R19_method, "interpreter calling convention"); assert_different_registers(method, target, temp); if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; // JVMTI events, such as single-stepping, are implemented partly by avoiding running // compiled code in threads for which the event is enabled. Check here for // interp_only_mode if these events CAN be enabled. __ verify_thread(); __ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); __ cmplwi(CCR0, temp, 0); __ beq(CCR0, run_compiled_code); // Null method test is replicated below in compiled case, // it might be able to address across the verify_thread() __ cmplwi(CCR0, R19_method, 0); __ beq(CCR0, L_no_such_method); __ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method); __ mtctr(target); __ bctr(); __ BIND(run_compiled_code); } // Compiled case, either static or fall-through from runtime conditional __ cmplwi(CCR0, R19_method, 0); __ beq(CCR0, L_no_such_method); const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : Method::from_interpreted_offset(); __ ld(target, in_bytes(entry_offset), R19_method); __ mtctr(target); __ bctr(); __ bind(L_no_such_method); assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!"); __ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry()); __ mtctr(target); __ bctr(); }
// Initialize the methodDataOop corresponding to a given method. void methodDataOopDesc::initialize(methodHandle method) { ResourceMark rm; // Set the method back-pointer. _method = method(); set_creation_mileage(mileage_of(method())); // Initialize flags and trap history. _nof_decompiles = 0; _nof_overflow_recompiles = 0; _nof_overflow_traps = 0; assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align"); Copy::zero_to_words((HeapWord*) &_trap_hist, sizeof(_trap_hist) / sizeof(HeapWord)); // Go through the bytecodes and allocate and initialize the // corresponding data cells. int data_size = 0; int empty_bc_count = 0; // number of bytecodes lacking data BytecodeStream stream(method); Bytecodes::Code c; while ((c = stream.next()) >= 0) { int size_in_bytes = initialize_data(&stream, data_size); data_size += size_in_bytes; if (size_in_bytes == 0) empty_bc_count += 1; } _data_size = data_size; int object_size = in_bytes(data_offset()) + data_size; // Add some extra DataLayout cells (at least one) to track stray traps. int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); // Add a cell to record information about modified arguments. // Set up _args_modified array after traps cells so that // the code for traps cells works. DataLayout *dp = data_layout_at(data_size + extra_size); int arg_size = method->size_of_parameters(); dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); object_size += extra_size + DataLayout::compute_size_in_bytes(arg_size+1); // Set an initial hint. Don't use set_hint_di() because // first_di() may be out of bounds if data_size is 0. // In that situation, _hint_di is never used, but at // least well-defined. _hint_di = first_di(); post_initialize(&stream); set_object_is_parsable(object_size); }
bool FrameMap::location_for_sp_offset(ByteSize byte_offset_from_sp, Location::Type loc_type, Location* loc) const { int offset = in_bytes(byte_offset_from_sp); assert(offset >= 0, "incorrect offset"); if (!Location::legal_offset_in_bytes(offset)) { return false; } Location tmp_loc = Location::new_stk_loc(loc_type, offset); *loc = tmp_loc; return true; }
void MethodData::initialize() { No_Safepoint_Verifier no_safepoint; // init function atomic wrt GC ResourceMark rm; init(); set_creation_mileage(mileage_of(method())); // Go through the bytecodes and allocate and initialize the // corresponding data cells. int data_size = 0; int empty_bc_count = 0; // number of bytecodes lacking data _data[0] = 0; // apparently not set below. BytecodeStream stream(method()); Bytecodes::Code c; while ((c = stream.next()) >= 0) { int size_in_bytes = initialize_data(&stream, data_size); data_size += size_in_bytes; if (is_empty_data(size_in_bytes, c)) empty_bc_count++; } _data_size = data_size; int object_size = in_bytes(data_offset()) + data_size; // Add some extra DataLayout cells (at least one) to track stray traps. int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); object_size += extra_size; Copy::zero_to_bytes((HeapWord*) extra_data_base(), extra_size); #ifndef GRAALVM // Add a cell to record information about modified arguments. // Set up _args_modified array after traps cells so that // the code for traps cells works. DataLayout *dp = data_layout_at(data_size + extra_size); int arg_size = method()->size_of_parameters(); dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); object_size += DataLayout::compute_size_in_bytes(arg_size+1); #endif // Set an initial hint. Don't use set_hint_di() because // first_di() may be out of bounds if data_size is 0. // In that situation, _hint_di is never used, but at // least well-defined. _hint_di = first_di(); post_initialize(&stream); set_size(object_size); }
//----------------------------method_data_addressing--------------------------- Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { // Get offset within MethodData* of the data array ByteSize data_offset = MethodData::data_offset(); // Get cell offset of the ProfileData within data array int cell_offset = md->dp_to_di(data->dp()); // Add in counter_offset, the # of bytes into the ProfileData of counter or flag int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset); const TypePtr* adr_type = TypeMetadataPtr::make(md); Node* mdo = makecon(adr_type); Node* ptr = basic_plus_adr(mdo, mdo, offset); if (stride != 0) { Node* str = _gvn.MakeConX(stride); Node* scale = _gvn.transform( new (C) MulXNode( idx, str ) ); ptr = _gvn.transform( new (C) AddPNode( mdo, ptr, scale ) ); } return ptr; }
bool FrameMap::finalize_frame(int nof_slots) { assert(nof_slots >= 0, "must be positive"); assert(_num_spills == -1, "can only be set once"); _num_spills = nof_slots; assert(_framesize == -1, "should only be calculated once"); _framesize = round_to(in_bytes(sp_offset_for_monitor_base(0)) + _num_monitors * sizeof(BasicObjectLock) + sizeof(intptr_t) + // offset of deopt orig pc frame_pad_in_bytes, StackAlignmentInBytes) / 4; int java_index = 0; for (int i = 0; i < _incoming_arguments->length(); i++) { LIR_Opr opr = _incoming_arguments->at(i); if (opr->is_stack()) { _argument_locations->at_put(java_index, in_bytes(framesize_in_bytes()) + _argument_locations->at(java_index)); } java_index += type2size[opr->type()]; } // make sure it's expressible on the platform return validate_frame(); }
void Parse::emit_guard_for_new(ciInstanceKlass* klass) { // Emit guarded new // if (klass->_init_thread != current_thread || // klass->_init_state != being_initialized) // uncommon_trap Node* cur_thread = _gvn.transform( new (C) ThreadLocalNode() ); Node* merge = new (C) RegionNode(3); _gvn.set_type(merge, Type::CONTROL); Node* kls = makecon(TypeKlassPtr::make(klass)); Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset())); Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS); Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); set_control(IfTrue(iff)); merge->set_req(1, IfFalse(iff)); Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset())); adr_node = basic_plus_adr(kls, kls, init_state_offset); // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler // can generate code to load it as unsigned byte. Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN); Node* being_init = _gvn.intcon(InstanceKlass::being_initialized); tst = Bool( CmpI( init_state, being_init), BoolTest::eq); iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); set_control(IfTrue(iff)); merge->set_req(2, IfFalse(iff)); PreserveJVMState pjvms(this); record_for_igvn(merge); set_control(merge); uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_reinterpret, klass); }
void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { Label no_deopt; __ verify_not_null_oop(Oexception); // save the exception and issuing pc in the thread __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) __ mov(I7, L0); __ mov(Oissuing_pc, I7); __ sub(I7, frame::pc_return_offset, I7); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); // Note: if nmethod has been deoptimized then regardless of // whether it had a handler or not we will deoptimize // by entering the deopt blob with a pending exception. #ifdef ASSERT Label done; __ tst(O0); __ br(Assembler::notZero, false, Assembler::pn, done); __ delayed()->nop(); __ stop("should have found address"); __ bind(done); #endif // restore the registers that were saved at the beginning and jump to the exception handler. restore_live_registers(sasm); __ jmp(O0, 0); __ delayed()->restore(); oop_maps->add_gc_map(call_offset, oop_map); }
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, Register recv, Register method_temp, Register temp2, Register temp3, bool for_compiler_entry) { BLOCK_COMMENT("jump_to_lambda_form {"); // This is the initial entry point of a lazy method handle. // After type checking, it picks up the invoker from the LambdaForm. assert_different_registers(recv, method_temp, temp2); // temp3 is only passed on assert(method_temp == R19_method, "required register for loading method"); // Load the invoker, as MH -> MH.form -> LF.vmentry __ verify_oop(recv); __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv, temp2); __ verify_oop(method_temp); __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2); __ verify_oop(method_temp); // The following assumes that a Method* is normally compressed in the vmtarget field: __ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp); if (VerifyMethodHandles && !for_compiler_entry) { // Make sure recv is already on stack. __ ld(temp2, in_bytes(Method::const_offset()), method_temp); __ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2, sizeof(u2), /*is_signed*/ false); // assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), ""); Label L; __ ld(temp2, __ argument_offset(temp2, temp2, 0), CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp)); __ cmpd(CCR1, temp2, recv); __ beq(CCR1, L); __ stop("receiver not on stack"); __ BIND(L); } jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry); BLOCK_COMMENT("} jump_to_lambda_form"); }
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len, t1, t2); if (UseBiasedLocking && !len->is_valid()) { ld(t1, in_bytes(Klass::prototype_header_offset()), klass); } else { load_const_optimized(t1, (intx)markOopDesc::prototype()); } std(t1, oopDesc::mark_offset_in_bytes(), obj); store_klass(obj, klass); if (len->is_valid()) { stw(len, arrayOopDesc::length_offset_in_bytes(), obj); } else if (UseCompressedClassPointers) { // Otherwise length is in the class gap. store_klass_gap(obj); } }
// Compute the size of the methodDataOop necessary to store // profiling information about a given method. Size is in bytes. int methodDataOopDesc::compute_allocation_size_in_bytes(methodOop method) { int data_size = 0; BytecodeStream stream(method); Bytecodes::Code c; int empty_bc_count = 0; // number of bytecodes lacking data while ((c = stream.next()) >= 0) { int size_in_bytes = compute_data_size(&stream); data_size += size_in_bytes; if (size_in_bytes == 0) empty_bc_count += 1; } int object_size = in_bytes(data_offset()) + data_size; // Add some extra DataLayout cells (at least one) to track stray traps. int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); return object_size; }
void C1_MacroAssembler::initialize_object( Register obj, // result: pointer to object after successful allocation Register klass, // object klass Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise int con_size_in_bytes, // object size in bytes if known at compile time Register t1, // temp register Register t2 // temp register ) { const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; initialize_header(obj, klass, noreg, t1, t2); #ifdef ASSERT { lwz(t1, in_bytes(Klass::layout_helper_offset()), klass); if (var_size_in_bytes != noreg) { cmpw(CCR0, t1, var_size_in_bytes); } else { cmpwi(CCR0, t1, con_size_in_bytes); } asm_assert_eq("bad size in initialize_object", 0x753); } #endif // Initialize body. if (var_size_in_bytes != noreg) { // Use a loop. addi(t1, obj, hdr_size_in_bytes); // Compute address of first element. addi(t2, var_size_in_bytes, -hdr_size_in_bytes); // Compute size of body. initialize_body(t1, t2); } else if (con_size_in_bytes > hdr_size_in_bytes) { // Use a loop. initialize_body(obj, t1, t2, con_size_in_bytes, hdr_size_in_bytes); } if (CURRENT_ENV->dtrace_alloc_probes()) { Unimplemented(); // assert(obj == O0, "must be"); // call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)), // relocInfo::runtime_call_type); } verify_oop(obj); }
void C1_MacroAssembler::restore_callee_saves_pop_frame_and_jmp(FrameMap* frame_map, address entry) { int callee_save_num=0; int callee_saves = frame_map->callee_saves(); for(int i=0;i<LinearScan::nof_cpu_regs;i++){ if ((callee_saves & 1<<i) != 0) { int sp_offset = frame_map->address_for_callee_save(callee_save_num)._disp; ld8((Register)i, RSP, sp_offset); callee_save_num++; } } #ifdef ASSERT for(int i=0;i<LinearScan::nof_xmm_regs;i++){ int reg = LinearScan::nof_cpu_regs+i; assert ((callee_saves & 1<<reg) == 0, "Unexpected callee save XMM register"); } #endif add8i (RSP, in_bytes(frame_map->framesize_in_bytes()) - 8); jmp(entry); }
// Compute the size of the methodDataOop necessary to store // profiling information about a given method. Size is in bytes. int methodDataOopDesc::compute_allocation_size_in_bytes(methodHandle method) { int data_size = 0; BytecodeStream stream(method); Bytecodes::Code c; int empty_bc_count = 0; // number of bytecodes lacking data while ((c = stream.next()) >= 0) { int size_in_bytes = compute_data_size(&stream); data_size += size_in_bytes; if (size_in_bytes == 0) empty_bc_count += 1; } int object_size = in_bytes(data_offset()) + data_size; // Add some extra DataLayout cells (at least one) to track stray traps. int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); // Add a cell to record information about modified arguments. int arg_size = method->size_of_parameters(); object_size += DataLayout::compute_size_in_bytes(arg_size+1); return object_size; }
void C1_MacroAssembler::build_frame(FrameMap* frame_map) { // offset from the expected fixed sp within the method int sp_offset = in_bytes(frame_map->framesize_in_bytes()) - 8; // call pushed the return IP if( frame_map->num_callee_saves() > 0 ) { int callee_save_num = frame_map->num_callee_saves()-1; int callee_saves = frame_map->callee_saves(); // bitmap for (int i=LinearScan::nof_cpu_regs-1; i>=0; i--) { if ((callee_saves & 1<<i) != 0) { int wanted_sp_offset = frame_map->address_for_callee_save(callee_save_num)._disp; assert0( sp_offset-8 == wanted_sp_offset ); push((Register)i); sp_offset -= 8; callee_save_num--; assert0( callee_save_num >= -1 ); } } #ifdef ASSERT for(int i=0;i<LinearScan::nof_xmm_regs;i++){ int reg = LinearScan::nof_cpu_regs+i; assert ((callee_saves & 1<<reg) == 0, "Unexpected callee save XMM register"); } #endif } if (sp_offset != 0) { // make sp equal expected sp for method if (sp_offset == 8) push (RCX); // push reg as smaller encoding than sub8i else sub8i (RSP, sp_offset ); } if( should_verify_oop(MacroAssembler::OopVerify_IncomingArgument) ) { int args_len = frame_map->incoming_arguments()->length(); for(int i=0; i < args_len; i++) { LIR_Opr arg = frame_map->incoming_arguments()->at(i); if (arg->is_valid() && arg->is_oop()) { VOopReg::VR oop = frame_map->oopregname(arg); verify_oop(oop, MacroAssembler::OopVerify_IncomingArgument); } } } }