// create a klass of array holding typeArrays Klass* TypeArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) { int dim = dimension(); assert(dim <= n, "check order of chain"); if (dim == n) return this; if (higher_dimension() == NULL) { if (or_null) return NULL; ResourceMark rm; JavaThread *jt = (JavaThread *)THREAD; { MutexLocker mc(Compile_lock, THREAD); // for vtables // Atomic create higher dimension and link into list MutexLocker mu(MultiArray_lock, THREAD); if (higher_dimension() == NULL) { Klass* oak = ObjArrayKlass::allocate_objArray_klass( class_loader_data(), dim + 1, this, CHECK_NULL); ObjArrayKlass* h_ak = ObjArrayKlass::cast(oak); h_ak->set_lower_dimension(this); OrderAccess::storestore(); set_higher_dimension(h_ak); assert(h_ak->oop_is_objArray(), "incorrect initialization of ObjArrayKlass"); } } } else { CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); } ObjArrayKlass* h_ak = ObjArrayKlass::cast(higher_dimension()); if (or_null) { return h_ak->array_klass_or_null(n); } return h_ak->array_klass(n, THREAD); }
// retrieve or create JvmtiThreadState // Can return NULL if JavaThread is exiting. inline static JvmtiThreadState *state_for(JavaThread *thread) { JvmtiThreadState *state = thread->jvmti_thread_state(); if (state == NULL) { MutexLocker mu(JvmtiThreadState_lock); // check again with the lock held state = state_for_while_locked(thread); } else { CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); } return state; }
jobject JNIHandles::make_weak_global(Handle obj) { assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); jobject res = NULL; if (!obj.is_null()) { // ignore null handles MutexLocker ml(JNIGlobalHandle_lock); assert(Universe::heap()->is_in_reserved(obj()), "sanity check"); res = _weak_global_handles->allocate_handle(obj()); } else { CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); } return res; }
klassOop typeArrayKlass::array_klass_impl(typeArrayKlassHandle h_this, bool or_null, int n, TRAPS) { int dimension = h_this->dimension(); assert(dimension <= n, "check order of chain"); if (dimension == n) return h_this(); objArrayKlassHandle h_ak(THREAD, h_this->higher_dimension()); if (h_ak.is_null()) { if (or_null) return NULL; ResourceMark rm; JavaThread *jt = (JavaThread *)THREAD; { MutexLocker mc(Compile_lock, THREAD); // for vtables // Atomic create higher dimension and link into list MutexLocker mu(MultiArray_lock, THREAD); h_ak = objArrayKlassHandle(THREAD, h_this->higher_dimension()); if (h_ak.is_null()) { klassOop oak = objArrayKlassKlass::cast( Universe::objArrayKlassKlassObj())->allocate_objArray_klass( dimension + 1, h_this, CHECK_NULL); h_ak = objArrayKlassHandle(THREAD, oak); h_ak->set_lower_dimension(h_this()); OrderAccess::storestore(); h_this->set_higher_dimension(h_ak()); assert(h_ak->oop_is_objArray(), "incorrect initialization of objArrayKlass"); } } } else { CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); } if (or_null) { return h_ak->array_klass_or_null(n); } return h_ak->array_klass(n, CHECK_NULL); }
// ------------------------------------------------------------------ // ciMethod::ciMethod // // Loaded method. ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) { assert(h_m() != NULL, "no null method"); // These fields are always filled in in loaded methods. _flags = ciFlags(h_m()->access_flags()); // Easy to compute, so fill them in now. _max_stack = h_m()->max_stack(); _max_locals = h_m()->max_locals(); _code_size = h_m()->code_size(); _intrinsic_id = h_m()->intrinsic_id(); _handler_count = h_m()->exception_table_length(); _uses_monitors = h_m()->access_flags().has_monitor_bytecodes(); _balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching(); _is_c1_compilable = !h_m()->is_not_c1_compilable(); _is_c2_compilable = !h_m()->is_not_c2_compilable(); // Lazy fields, filled in on demand. Require allocation. _code = NULL; _exception_handlers = NULL; _liveness = NULL; _method_blocks = NULL; #if defined(COMPILER2) || defined(SHARK) _flow = NULL; _bcea = NULL; #endif // COMPILER2 || SHARK ciEnv *env = CURRENT_ENV; if (env->jvmti_can_hotswap_or_post_breakpoint() && can_be_compiled()) { // 6328518 check hotswap conditions under the right lock. MutexLocker locker(Compile_lock); if (Dependencies::check_evol_method(h_m()) != NULL) { _is_c1_compilable = false; _is_c2_compilable = false; } } else { CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); } if (h_m()->method_holder()->is_linked()) { _can_be_statically_bound = h_m()->can_be_statically_bound(); } else { // Have to use a conservative value in this case. _can_be_statically_bound = false; } // Adjust the definition of this condition to be more useful: // %%% take these conditions into account in vtable generation if (!_can_be_statically_bound && h_m()->is_private()) _can_be_statically_bound = true; if (_can_be_statically_bound && h_m()->is_abstract()) _can_be_statically_bound = false; // generating _signature may allow GC and therefore move m. // These fields are always filled in. _name = env->get_symbol(h_m()->name()); _holder = env->get_instance_klass(h_m()->method_holder()); ciSymbol* sig_symbol = env->get_symbol(h_m()->signature()); constantPoolHandle cpool = h_m()->constants(); _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol); _method_data = NULL; // Take a snapshot of these values, so they will be commensurate with the MDO. if (ProfileInterpreter || TieredCompilation) { int invcnt = h_m()->interpreter_invocation_count(); // if the value overflowed report it as max int _interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ; _interpreter_throwout_count = h_m()->interpreter_throwout_count(); } else { _interpreter_invocation_count = 0; _interpreter_throwout_count = 0; } if (_interpreter_invocation_count == 0) _interpreter_invocation_count = 1; _instructions_size = -1; #ifdef ASSERT if (ReplayCompiles) { ciReplay::initialize(this); } #endif }
int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) { // Make sure method is native and not abstract assert(method->is_native() && !method->is_abstract(), "should be"); JavaThread *thread = (JavaThread *) THREAD; ZeroStack *stack = thread->zero_stack(); // Allocate and initialize our frame InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0); thread->push_zero_frame(frame); interpreterState istate = frame->interpreter_state(); intptr_t *locals = istate->locals(); // Update the invocation counter if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) { InvocationCounter *counter = method->invocation_counter(); counter->increment(); if (counter->reached_InvocationLimit()) { CALL_VM_NOCHECK( InterpreterRuntime::frequency_counter_overflow(thread, NULL)); if (HAS_PENDING_EXCEPTION) goto unwind_and_return; } } // Lock if necessary BasicObjectLock *monitor; monitor = NULL; if (method->is_synchronized()) { monitor = (BasicObjectLock*) istate->stack_base(); oop lockee = monitor->obj(); markOop disp = lockee->mark()->set_unlocked(); monitor->lock()->set_displaced_header(disp); if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) { if (thread->is_lock_owned((address) disp->clear_lock_bits())) { monitor->lock()->set_displaced_header(NULL); } else { CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor)); if (HAS_PENDING_EXCEPTION) goto unwind_and_return; } } } // Get the signature handler InterpreterRuntime::SignatureHandler *handler; { address handlerAddr = method->signature_handler(); if (handlerAddr == NULL) { CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method)); if (HAS_PENDING_EXCEPTION) goto unlock_unwind_and_return; handlerAddr = method->signature_handler(); assert(handlerAddr != NULL, "eh?"); } if (handlerAddr == (address) InterpreterRuntime::slow_signature_handler) { CALL_VM_NOCHECK(handlerAddr = InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL)); if (HAS_PENDING_EXCEPTION) goto unlock_unwind_and_return; } handler = \ InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr); } // Get the native function entry point address function; function = method->native_function(); assert(function != NULL, "should be set if signature handler is"); // Build the argument list stack->overflow_check(handler->argument_count() * 2, THREAD); if (HAS_PENDING_EXCEPTION) goto unlock_unwind_and_return; void **arguments; void *mirror; { arguments = (void **) stack->alloc(handler->argument_count() * sizeof(void **)); void **dst = arguments; void *env = thread->jni_environment(); *(dst++) = &env; if (method->is_static()) { istate->set_oop_temp( method->constants()->pool_holder()->java_mirror()); mirror = istate->oop_temp_addr(); *(dst++) = &mirror; } intptr_t *src = locals; for (int i = dst - arguments; i < handler->argument_count(); i++) { ffi_type *type = handler->argument_type(i); if (type == &ffi_type_pointer) { if (*src) { stack->push((intptr_t) src); *(dst++) = stack->sp(); } else { *(dst++) = src; } src--; } else if (type->size == 4) { *(dst++) = src--; } else if (type->size == 8) { src--; *(dst++) = src--; } else { ShouldNotReachHere(); } } } // Set up the Java frame anchor thread->set_last_Java_frame(); // Change the thread state to _thread_in_native ThreadStateTransition::transition_from_java(thread, _thread_in_native); // Make the call intptr_t result[4 - LogBytesPerWord]; ffi_call(handler->cif(), (void (*)()) function, result, arguments); // Change the thread state back to _thread_in_Java. // ThreadStateTransition::transition_from_native() cannot be used // here because it does not check for asynchronous exceptions. // We have to manage the transition ourself. thread->set_thread_state(_thread_in_native_trans); // Make sure new state is visible in the GC thread if (os::is_MP()) { if (UseMembar) { OrderAccess::fence(); } else { InterfaceSupport::serialize_memory(thread); } } // Handle safepoint operations, pending suspend requests, // and pending asynchronous exceptions. if (SafepointSynchronize::do_call_back() || thread->has_special_condition_for_native_trans()) { JavaThread::check_special_condition_for_native_trans(thread); CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops()); } // Finally we can change the thread state to _thread_in_Java. thread->set_thread_state(_thread_in_Java); fixup_after_potential_safepoint(); // Clear the frame anchor thread->reset_last_Java_frame(); // If the result was an oop then unbox it and store it in // oop_temp where the garbage collector can see it before // we release the handle it might be protected by. if (handler->result_type() == &ffi_type_pointer) { if (result[0]) istate->set_oop_temp(*(oop *) result[0]); else istate->set_oop_temp(NULL); } // Reset handle block thread->active_handles()->clear(); unlock_unwind_and_return: // Unlock if necessary if (monitor) { BasicLock *lock = monitor->lock(); markOop header = lock->displaced_header(); oop rcvr = monitor->obj(); monitor->set_obj(NULL); if (header != NULL) { if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { monitor->set_obj(rcvr); { HandleMark hm(thread); CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor)); } } } } unwind_and_return: // Unwind the current activation thread->pop_zero_frame(); // Pop our parameters stack->set_sp(stack->sp() + method->size_of_parameters()); // Push our result if (!HAS_PENDING_EXCEPTION) { BasicType type = result_type_of(method); stack->set_sp(stack->sp() - type2size[type]); switch (type) { case T_VOID: break; case T_BOOLEAN: #ifndef VM_LITTLE_ENDIAN result[0] <<= (BitsPerWord - BitsPerByte); #endif SET_LOCALS_INT(*(jboolean *) result != 0, 0); break; case T_CHAR: #ifndef VM_LITTLE_ENDIAN result[0] <<= (BitsPerWord - BitsPerShort); #endif SET_LOCALS_INT(*(jchar *) result, 0); break; case T_BYTE: #ifndef VM_LITTLE_ENDIAN result[0] <<= (BitsPerWord - BitsPerByte); #endif SET_LOCALS_INT(*(jbyte *) result, 0); break; case T_SHORT: #ifndef VM_LITTLE_ENDIAN result[0] <<= (BitsPerWord - BitsPerShort); #endif SET_LOCALS_INT(*(jshort *) result, 0); break; case T_INT: #ifndef VM_LITTLE_ENDIAN result[0] <<= (BitsPerWord - BitsPerInt); #endif SET_LOCALS_INT(*(jint *) result, 0); break; case T_LONG: SET_LOCALS_LONG(*(jlong *) result, 0); break; case T_FLOAT: SET_LOCALS_FLOAT(*(jfloat *) result, 0); break; case T_DOUBLE: SET_LOCALS_DOUBLE(*(jdouble *) result, 0); break; case T_OBJECT: case T_ARRAY: SET_LOCALS_OBJECT(istate->oop_temp(), 0); break; default: ShouldNotReachHere(); } } // No deoptimized frames on the stack return 0; }