AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) { // Abstract method? if (m->is_abstract()) return abstract; // Invoker for method handles? if (m->is_method_handle_invoke()) return method_handle; // Native method? // Note: This test must come _before_ the test for intrinsic // methods. See also comments below. if (m->is_native()) { assert(!m->is_method_handle_invoke(), "overlapping bits here, watch out"); return m->is_synchronized() ? native_synchronized : native; } // Synchronized? if (m->is_synchronized()) { return zerolocals_synchronized; } if (RegisterFinalizersAtInit && m->code_size() == 1 && m->intrinsic_id() == vmIntrinsics::_Object_init) { // We need to execute the special return bytecode to check for // finalizer registration so create a normal frame. return zerolocals; } // Empty method? if (m->is_empty_method()) { return empty; } // Special intrinsic method? // Note: This test must come _after_ the test for native methods, // otherwise we will run into problems with JDK 1.2, see also // AbstractInterpreterGenerator::generate_method_entry() for // for details. switch (m->intrinsic_id()) { case vmIntrinsics::_dsin : return java_lang_math_sin ; case vmIntrinsics::_dcos : return java_lang_math_cos ; case vmIntrinsics::_dtan : return java_lang_math_tan ; case vmIntrinsics::_dabs : return java_lang_math_abs ; case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ; case vmIntrinsics::_dlog : return java_lang_math_log ; case vmIntrinsics::_dlog10: return java_lang_math_log10; case vmIntrinsics::_Reference_get: return java_lang_ref_reference_get; } // Accessor method? if (m->is_accessor()) { assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); return accessor; } // Note: for now: zero locals for all non-empty methods return zerolocals; }
bool Compiler::is_intrinsic_supported(const methodHandle& method) { vmIntrinsics::ID id = method->intrinsic_id(); assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); if (method->is_synchronized()) { // C1 does not support intrinsification of synchronized methods. return false; } switch (id) { case vmIntrinsics::_compareAndSwapLong: if (!VM_Version::supports_cx8()) return false; break; case vmIntrinsics::_getAndAddInt: if (!VM_Version::supports_atomic_getadd4()) return false; break; case vmIntrinsics::_getAndAddLong: if (!VM_Version::supports_atomic_getadd8()) return false; break; case vmIntrinsics::_getAndSetInt: if (!VM_Version::supports_atomic_getset4()) return false; break; case vmIntrinsics::_getAndSetLong: if (!VM_Version::supports_atomic_getset8()) return false; break; case vmIntrinsics::_getAndSetObject: #ifdef _LP64 if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false; if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false; #else if (!VM_Version::supports_atomic_getset4()) return false; #endif break; case vmIntrinsics::_arraycopy: case vmIntrinsics::_currentTimeMillis: case vmIntrinsics::_nanoTime: case vmIntrinsics::_Reference_get: // Use the intrinsic version of Reference.get() so that the value in // the referent field can be registered by the G1 pre-barrier code. // Also to prevent commoning reads from this field across safepoint // since GC can change its value. case vmIntrinsics::_loadFence: case vmIntrinsics::_storeFence: case vmIntrinsics::_fullFence: case vmIntrinsics::_floatToRawIntBits: case vmIntrinsics::_intBitsToFloat: case vmIntrinsics::_doubleToRawLongBits: case vmIntrinsics::_longBitsToDouble: case vmIntrinsics::_getClass: case vmIntrinsics::_isInstance: case vmIntrinsics::_currentThread: case vmIntrinsics::_dabs: case vmIntrinsics::_dsqrt: case vmIntrinsics::_dsin: case vmIntrinsics::_dcos: case vmIntrinsics::_dtan: case vmIntrinsics::_dlog: case vmIntrinsics::_dlog10: case vmIntrinsics::_dexp: case vmIntrinsics::_dpow: case vmIntrinsics::_getObject: case vmIntrinsics::_getBoolean: case vmIntrinsics::_getByte: case vmIntrinsics::_getShort: case vmIntrinsics::_getChar: case vmIntrinsics::_getInt: case vmIntrinsics::_getLong: case vmIntrinsics::_getFloat: case vmIntrinsics::_getDouble: case vmIntrinsics::_putObject: case vmIntrinsics::_putBoolean: case vmIntrinsics::_putByte: case vmIntrinsics::_putShort: case vmIntrinsics::_putChar: case vmIntrinsics::_putInt: case vmIntrinsics::_putLong: case vmIntrinsics::_putFloat: case vmIntrinsics::_putDouble: case vmIntrinsics::_getObjectVolatile: case vmIntrinsics::_getBooleanVolatile: case vmIntrinsics::_getByteVolatile: case vmIntrinsics::_getShortVolatile: case vmIntrinsics::_getCharVolatile: case vmIntrinsics::_getIntVolatile: case vmIntrinsics::_getLongVolatile: case vmIntrinsics::_getFloatVolatile: case vmIntrinsics::_getDoubleVolatile: case vmIntrinsics::_putObjectVolatile: case vmIntrinsics::_putBooleanVolatile: case vmIntrinsics::_putByteVolatile: case vmIntrinsics::_putShortVolatile: case vmIntrinsics::_putCharVolatile: case vmIntrinsics::_putIntVolatile: case vmIntrinsics::_putLongVolatile: case vmIntrinsics::_putFloatVolatile: case vmIntrinsics::_putDoubleVolatile: case vmIntrinsics::_getByte_raw: case vmIntrinsics::_getShort_raw: case vmIntrinsics::_getChar_raw: case vmIntrinsics::_getInt_raw: case vmIntrinsics::_getLong_raw: case vmIntrinsics::_getFloat_raw: case vmIntrinsics::_getDouble_raw: case vmIntrinsics::_putByte_raw: case vmIntrinsics::_putShort_raw: case vmIntrinsics::_putChar_raw: case vmIntrinsics::_putInt_raw: case vmIntrinsics::_putLong_raw: case vmIntrinsics::_putFloat_raw: case vmIntrinsics::_putDouble_raw: case vmIntrinsics::_putOrderedObject: case vmIntrinsics::_putOrderedInt: case vmIntrinsics::_putOrderedLong: case vmIntrinsics::_getShortUnaligned: case vmIntrinsics::_getCharUnaligned: case vmIntrinsics::_getIntUnaligned: case vmIntrinsics::_getLongUnaligned: case vmIntrinsics::_putShortUnaligned: case vmIntrinsics::_putCharUnaligned: case vmIntrinsics::_putIntUnaligned: case vmIntrinsics::_putLongUnaligned: case vmIntrinsics::_checkIndex: case vmIntrinsics::_updateCRC32: case vmIntrinsics::_updateBytesCRC32: case vmIntrinsics::_updateByteBufferCRC32: case vmIntrinsics::_compareAndSwapInt: case vmIntrinsics::_compareAndSwapObject: case vmIntrinsics::_getCharStringU: case vmIntrinsics::_putCharStringU: #ifdef TRACE_HAVE_INTRINSICS case vmIntrinsics::_classID: case vmIntrinsics::_threadID: case vmIntrinsics::_counterTime: #endif break; default: return false; // Intrinsics not on the previous list are not available. } return true; }
AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) { // Abstract method? if (m->is_abstract()) return abstract; // Method handle primitive? if (m->is_method_handle_intrinsic()) { vmIntrinsics::ID id = m->intrinsic_id(); assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic"); MethodKind kind = (MethodKind)( method_handle_invoke_FIRST + ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) ); assert(kind <= method_handle_invoke_LAST, "parallel enum ranges"); return kind; } #ifndef CC_INTERP if (UseCRC32Intrinsics && m->is_native()) { // Use optimized stub code for CRC32 native methods. switch (m->intrinsic_id()) { case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update; case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes; case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer; } } #endif // Native method? // Note: This test must come _before_ the test for intrinsic // methods. See also comments below. if (m->is_native()) { assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out"); return m->is_synchronized() ? native_synchronized : native; } // Synchronized? if (m->is_synchronized()) { return zerolocals_synchronized; } if (RegisterFinalizersAtInit && m->code_size() == 1 && m->intrinsic_id() == vmIntrinsics::_Object_init) { // We need to execute the special return bytecode to check for // finalizer registration so create a normal frame. return zerolocals; } // Empty method? if (m->is_empty_method()) { return empty; } // Special intrinsic method? // Note: This test must come _after_ the test for native methods, // otherwise we will run into problems with JDK 1.2, see also // InterpreterGenerator::generate_method_entry() for // for details. switch (m->intrinsic_id()) { case vmIntrinsics::_dsin : return java_lang_math_sin ; case vmIntrinsics::_dcos : return java_lang_math_cos ; case vmIntrinsics::_dtan : return java_lang_math_tan ; case vmIntrinsics::_dabs : return java_lang_math_abs ; case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ; case vmIntrinsics::_dlog : return java_lang_math_log ; case vmIntrinsics::_dlog10: return java_lang_math_log10; case vmIntrinsics::_dpow : return java_lang_math_pow ; case vmIntrinsics::_dexp : return java_lang_math_exp ; case vmIntrinsics::_Reference_get: return java_lang_ref_reference_get; } // Accessor method? if (m->is_accessor()) { assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); return accessor; } // Note: for now: zero locals for all non-empty methods return zerolocals; }