void Thread::timer_tick() { SETUP_ERROR_CHECKER_ARG; Thread *thread = Thread::current(); #ifndef PRODUCT if( TraceCompiledMethodCache ) { TTY_TRACE_CR(( "\n*** Timer tick beg ***" )); } #endif _global_tick_count++; #if ENABLE_PROFILER // We must check is_ready() to distinguish real time ticks // from ticks generated by shared_invoke_compiler if (Profiler::is_ready()) { JavaFrame f(thread); Method method(f.method()); Profiler::profile_method(&method, f.is_compiled_frame()); } #endif #if ENABLE_JVMPI_PROFILE && ENABLE_JVMPI_PROFILE_VERIFY // Notice: To ensure that dump method is in the same thread with the // Compiler, so we dump here. if(JVMPIProfile::need_dump && UseJvmpiProfiler) { JVMPIProfile::need_dump = false; JVMPIProfile::VMjvmpiPostDumpJVMPIEventsInfo(); } #endif #if ENABLE_COMPILER ObjectHeap::accumulate_current_task_memory_usage(); Compiler::on_timer_tick(_real_time_has_ticked JVM_MUST_SUCCEED); _real_time_has_ticked = false; #endif if (Universe::is_stopping()) { if (!CURRENT_HAS_PENDING_EXCEPTION) { Throw::uncatchable(JVM_SINGLE_ARG_THROW); } } // Do the preemption. yield will transfer the control // to the next ready thread in the scheduler. ObjectHeap::accumulate_current_task_memory_usage(); Scheduler::yield(); Thread::clear_timer_tick(); #if ENABLE_CPU_VARIANT if (EnableCPUVariant) { enable_cpu_variant(); } #endif if( TraceCompiledMethodCache ) { TTY_TRACE_CR(( "\n*** Timer tick end ***" )); } (void)thread; }
void Thread::set_current(Thread* value) { GUARANTEE(_last_handle == NULL, "No handles when switching threads"); GUARANTEE(last_kni_handle_info == NULL, "No KNI handles when switching threads"); GUARANTEE(!_jvm_in_quick_native_method, "cannot switch thread in quick native methods"); if (VerifyOnly) { GUARANTEE(current()->is_null() || value->is_null() || current()->equals(value), "cannot switch threads during Romization or VerifyOnly"); } if (current()->not_null()) { decache_current_pending_exception(); } _current_thread = value->obj(); cache_current_pending_exception(); if (TraceThreadsExcessive) { TTY_TRACE_CR(("set_current_thread: 0x%x (id=%d)", value->obj(), (value->is_null() ? -1 : value->id()))); } #if ENABLE_ISOLATES { int tid = value->task_id(); Task::Raw task = Universe::task_from_id(tid); Universe::set_current_task(tid); // have to be here, not in Universe::set_current_task() ObjectHeap::on_task_switch(tid); if (task().is_terminating()){ #ifdef AZZERT GUARANTEE(!task.is_null() && task().status() >= Task::TASK_STOPPING, "task of terminating thread must be stopping"); #endif // The task that this thread belongs to is dead if (TraceThreadsExcessive) { TTY_TRACE_CR(("set_current: task dead: thread 0x%x", current()->obj())); } current()->set_terminating(); set_current_pending_exception(Task::get_termination_object()); } } #endif // Update the current stack limit update_current_stack_limit(value); #if ENABLE_JAVA_DEBUGGER if (_debugger_active) { JavaDebugger::set_stepping(value->is_stepping()); } #endif }
ReturnOop JarFileParser::get_parser_from_cache(const JvmPathChar* jar_file_name1, TypeArray *jar_file_name2) { // Count trailing zero as well const int name_bytes = jar_file_name1 ? (fn_strlen(jar_file_name1)+1) * sizeof(JvmPathChar) : jar_file_name2->length(); const int max = MAX_CACHED_PARSERS < MaxCachedJarParsers ? MAX_CACHED_PARSERS : MaxCachedJarParsers; #if ENABLE_ISOLATES const char current_task_id = TaskContext::current_task_id(); #endif for( int i=0; i < max; i++ ) { const int ref = _cached_parsers[i]; if (ref >= 0) { #if ENABLE_ISOLATES const char task_id = ObjectHeap::get_global_reference_owner(ref); if( task_id != current_task_id ) { // Don't share cached JarFileParser across task ID. See // JarFileParser.hpp for why. continue; } #endif JarFileParser::Raw parser = ObjectHeap::get_global_ref_object(ref); if (parser.not_null()) { TypeArray::Raw stored_name = parser().pathname(); JvmPathChar *data = (JvmPathChar*)stored_name().byte_base_address(); bool match; if (jar_file_name1) { match = stored_name().length() == name_bytes && jvm_memcmp(data, jar_file_name1, name_bytes) == 0; } else { match = jar_file_name2->equals(&stored_name) || (stored_name().length() == name_bytes && jvm_memcmp(data, jar_file_name2->byte_base_address(), name_bytes) == 0); } if (match) { if (TraceJarCache) { TTY_TRACE(("JAR: parser cache hit: ")); for (int n=0; n <= name_bytes; n++) { TTY_TRACE(("%c", (char)data[n])); } TTY_TRACE_CR(("")); } // Found a match parser().set_timestamp(++_timestamp); return parser; } } } } return NULL; }
bool ThreadObj::is_alive() { Thread::Raw t = thread(); if (TraceThreadEvents) { TTY_TRACE_CR(("ThreadObj 0x%x has Thread 0x%x, status %s", obj(), t.obj(), is_terminated() ? "TERMINATED" : "ALIVE")); } return !t().is_null() && !is_stillborn() && !is_terminated(); }
void Thread::finish() { SETUP_ERROR_CHECKER_ARG; Thread *thread = Thread::current(); ThreadObj receiver = thread->thread_obj(); GUARANTEE(receiver.is_alive(), "Sanity check"); if (TraceThreadsExcessive) { TTY_TRACE_CR(("thread dying 0x%x", thread->obj())); } // Set thread state to terminated receiver.set_terminated(); // Signal waiters waiting on thread Scheduler::notify(&receiver, true, false JVM_NO_CHECK_AT_BOTTOM); }
inline void JarFileParser::save_parser_in_cache(JVM_SINGLE_ARG_TRAPS) { const int max = MAX_CACHED_PARSERS < MaxCachedJarParsers ? MAX_CACHED_PARSERS : MaxCachedJarParsers; int found = 0; int oldest = max_jint; // (1) Find a location the _cached_parsers[] for (int i=0; i<max; i++) { int ref = _cached_parsers[i]; if (ref < 0) { // This slot is not used. found = i; break; } else { JarFileParser::Raw parser = ObjectHeap::get_global_ref_object(ref); if (parser.is_null()) { // The cached parser has been GC'ed found = i; break; } if (parser().timestamp() < oldest) { // It's not GC'ed, let use it if it's the oldest of the non-yet-GC'ed // parsers. oldest = parser().timestamp(); found = i; } } } dispose( found ); const int newref = ObjectHeap::register_global_ref_object(this, ObjectHeap::WEAK JVM_MUST_SUCCEED); if( newref >= 0 ) { _cached_parsers[found] = newref; } #ifndef ENABLE_TTY_TRACE if (TraceJarCache) { TTY_TRACE(("JAR: parser cached: %d ", found)); TypeArray::Raw stored_name = pathname(); JvmPathChar *data = (JvmPathChar*)stored_name().byte_base_address(); for (int n=0; n<stored_name().length(); n++) { TTY_TRACE(("%c", (char)data[n])); } TTY_TRACE_CR(("")); } #endif }
void TypeSymbol::ParseStream::gc_prologue() { for (ParseStream*p = _list; p; p=p->_next) { if (p->_heap_input != NULL) { if (VerboseGC) { TTY_TRACE_CR(("GC: Updating ParseStream relative pointers")); } int base = (int)(p->_heap_input->base_address()); int cur = (int)(p->_current_ptr); p->_current_ptr = (char*)(cur - base); // Mangle it so we can catch errors sooner AZZERT_ONLY(p->_start_ptr = (char*)0xffffffff); AZZERT_ONLY(p->_end_ptr = (char*)0x0); } } }
bool CompiledMethod::expand_compiled_code_space(int delta, int relocation_size) { if (ObjectHeap::expand_current_compiled_method(delta)) { if (Verbose) { TTY_TRACE_CR(("Expanding compiled method from %d to %d bytes", size(), size() + delta)); } void* src = field_base(end_offset() - relocation_size); void* dst = DERIVED(void*, src, delta); GUARANTEE(src < dst, "should be copying up"); jvm_memmove(dst, src, relocation_size); // possibly overlapping regions // It's probably OK only to clear dst[-1], but let's just make sure. jvm_memset(src, 0, delta); ((CompiledMethodDesc*) obj())->set_size(size() + delta); if (VerifyGC > 2) { ObjectHeap::verify(); } return true; } else { return false;
void thread_task_cleanup() { SETUP_ERROR_CHECKER_ARG; // We come in here on primordial stack UsingFastOops fast_oops; Thread *thread = Thread::current(); // cleanup any taskmirrors awaiting initialization. They may be // on the clinit list because of an exception being thrown during // static initialization int tid = thread->task_id(); GUARANTEE(tid >= 0 && tid < MAX_TASKS, "Not a valid task id"); Task::Fast this_task = Task::get_task(tid); GUARANTEE(!this_task.is_null(), "Not a valid task"); TaskMirror::Raw tm = this_task().clinit_list(); while (!tm.is_null()) { TaskMirror::Raw next = tm().next_in_clinit_list(); if (tm().init_thread() == thread->obj()) { JavaClass::Raw jc = tm().containing_class(); TaskMirror::clinit_list_remove(&jc); Universe::mirror_list()->obj_at_put(jc().class_id(), Universe::task_class_init_marker()); } tm = next.obj(); } bool all_threads_terminated = this_task().remove_thread(); if (all_threads_terminated) { if (TraceThreadsExcessive) { TTY_TRACE_CR(("Cleanup task 0x%x", tid)); } // if this is the last thread of the last task to exit, its // exit code will get returned. JVM::set_exit_code(this_task().exit_code()); Thread::clear_current_pending_exception(); // will invoke task termination to produce events and close links. this_task().terminate_current_isolate(thread JVM_NO_CHECK_AT_BOTTOM); } }
void Thread::lightweight_thread_exit() { SETUP_ERROR_CHECKER_ARG; Thread *thread = Thread::current(); if (TraceThreadsExcessive) { TTY_TRACE_CR(("thread exit 0x%x", thread->obj())); } if (!TestCompiler) { Thread::clear_current_pending_exception(); } #if ENABLE_ISOLATES int tid = thread->task_id(); #endif // remove thread from scheduler Scheduler::terminate(thread JVM_NO_CHECK); { // Warning: ExecutionStack::~ExecutionStack() must be called before we // switch threads below. ExecutionStack::Raw stack = thread->execution_stack(); stack().clear_thread(); thread->clear_execution_stack(); } Scheduler::yield(); // At this point we either have another thread to run OR this is the // very last thread in the system and the VM will exit #if ENABLE_ISOLATES if (!Scheduler::get_next_runnable_thread()->is_null()) { // Another thread will run, cleanup the task that may have just // terminated. int thread_count = 0; { Task::Raw task = Task::get_task(tid); thread_count = task().thread_count(); } if (thread_count == 0) { #if ENABLE_PROFILER if (UseProfiler) { Profiler::dump_and_clear_profile_data(tid); } #endif #if ENABLE_WTK_PROFILER if (UseExactProfiler) { WTKProfiler::dump_and_clear_profile_data(tid); } #endif // After profiler data is dumped _current_thread is no longer used // and should be cleared in order to dispose the reference // to the task being terminated _current_thread = NULL; Task::cleanup_terminated_task(tid JVM_NO_CHECK); } } else { // This GUARANTEE is too strict: in SlaveMode the GUI may call back to // the VM too soon. Just ignore and continue. // // GUARANTEE(Scheduler::active_count() == 0, // "Active threads but nothing to run?"); } #endif while (_debugger_active && !Scheduler::is_slave_mode() && Scheduler::get_next_runnable_thread()->is_null() && Scheduler::active_count() > 0) { // All threads are suspended by the debugger, wait until one gets resumed Scheduler::yield(); } if (!Scheduler::get_next_runnable_thread()->is_null()) { // We will return to a different thread that the one that sent us here Thread* next = Scheduler::get_next_runnable_thread(); Thread::set_current(next); // Ok to allocate now, thread is not visible to GC Scheduler::allocate_blocked_threads_buffer(JVM_SINGLE_ARG_NO_CHECK); if (TraceThreadsExcessive) { TTY_TRACE_CR(("thread exit: next 0x%x", Thread::current()->obj())); } } }
OsFile_MappedImageHandle OsFile_MapImage(const PathChar* name, address preferrable_destination, int length, int rw_offset, int rw_length) { GUARANTEE(length == rw_offset + rw_length, "assumption"); GUARANTEE((int(preferrable_destination) % ASSUMED_PAGE_SIZE) == 0, "page aligned"); #if USE_UNICODE_FOR_FILENAMES char ascii_name[256]; int len = fn_strlen(name); if (len > 255) { len = 255; } for (int i=0; i<len; i++) { ascii_name[i] = (char)name[i]; } ascii_name[len] = 0; #else const char *ascii_name = name; #endif Linux_MappedImage *img = (Linux_MappedImage*)jvm_malloc(sizeof(Linux_MappedImage)); if (img == NULL) { return NULL; } if (Verbose) { TTY_TRACE_CR(("Map image desired = 0x%x",int(preferrable_destination))); } int open_flags = O_RDONLY; int fd = jvm_open(ascii_name, open_flags); // Align the RW region down so that the RW region starts at // a page boundary. int ro_length = length - rw_length; ro_length = (int)align_size_down(ro_length, ASSUMED_PAGE_SIZE); rw_offset = ro_length; rw_length = length - ro_length; if (ForceImageRelocation) { // For testing purposes, check relocations preferrable_destination += ASSUMED_PAGE_SIZE * 17; } address ro_addr = (address)-1; address rw_addr = (address)-1; address ro_preferred = preferrable_destination; address rw_preferred = preferrable_destination + rw_offset; if (fd == -1) { goto error; } if (ro_length > 0 && LoadXIPImage && !ForceImageRelocation) { ro_addr = (address)jvm_mmap(ro_preferred, ro_length, PROT_READ, MAP_PRIVATE, fd, 0); rw_addr = (address)jvm_mmap(rw_preferred, rw_length, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, rw_offset); } else { // The whole image needs to be mapped R/W. } if (ro_addr == ro_preferred && rw_preferred == rw_preferred) { if (Verbose) { TTY_TRACE_CR(("Map image actual = 0x%x [RO] size=%d", int(ro_addr), ro_length)); TTY_TRACE_CR(("Map image actual = 0x%x [RW] size=%d", int(rw_addr), rw_length)); } img->mapped_address = ro_addr; img->ro_length = ro_length; img->rw_mapped_address = rw_addr; img->rw_length = rw_length; } else { // Can't get to our preferred location. Relocation of the image content // is needed, so we need to remap the whole thing using RW mapping if (ro_addr != (address) -1) { jvm_munmap(ro_addr, ro_length); } if (rw_addr != (address) -1) { jvm_munmap(rw_addr, rw_length); } ro_addr = (address)jvm_mmap(ro_preferred, length, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (ro_addr == (address)-1) { goto error; } if (Verbose) { TTY_TRACE_CR(("Map image actual = 0x%x [RW] size=%d", int(ro_addr), length)); } img->mapped_address = ro_addr; img->ro_length = length; img->rw_mapped_address = (address)-1; img->rw_length = 0; } // we don't need fd anymore, mapping is still preserved jvm_close(fd); return img; error: if (img) { jvm_free(img); } if (fd >= 0) { jvm_close(fd); } return NULL; }
int CheckException(int n_except, LPEXCEPTION_POINTERS exptr) { #if ENABLE_PAGE_PROTECTION if (n_except == STATUS_ACCESS_VIOLATION) { const juint offset = (address)exptr->ExceptionRecord->ExceptionInformation[1]-_protected_page; if (TracePageAccess) { TTY_TRACE_CR(("ACCESS_VIOLATION signaled: offset = %d", offset)); } switch (offset) { case COMPILER_TIMER_TICK_SLOT: exptr->ContextRecord->Eip = (DWORD)compiler_timer_tick; return EXCEPTION_CONTINUE_EXECUTION; case INTERPRETER_TIMER_TICK_SLOT: exptr->ContextRecord->Eip = (DWORD)interpreter_timer_tick; return EXCEPTION_CONTINUE_EXECUTION; } } #endif #ifndef PRODUCT // If we have error inside a pp() call, last_raw_handle, etc, need to be // restored. DebugHandleMarker::restore(); char * name = "unknown"; switch (n_except) { EXCEPTION_CASE(STATUS_ACCESS_VIOLATION) break; EXCEPTION_CASE(STATUS_BREAKPOINT) break; EXCEPTION_CASE(STATUS_DATATYPE_MISALIGNMENT) break; EXCEPTION_CASE(STATUS_ILLEGAL_INSTRUCTION) break; EXCEPTION_CASE(STATUS_PRIVILEGED_INSTRUCTION) break; EXCEPTION_CASE(STATUS_INTEGER_DIVIDE_BY_ZERO) break; EXCEPTION_CASE(STATUS_INTEGER_OVERFLOW) break; EXCEPTION_CASE(STATUS_SINGLE_STEP) break; } tty->print_cr("** Unhandled exception 0x%x (%s) at 0x%x **", n_except, name, exptr->ExceptionRecord->ExceptionAddress); if (n_except == STATUS_ACCESS_VIOLATION) { tty->print_cr("access violation address = 0x%x", exptr->ExceptionRecord->ExceptionInformation[1]); } PRINT_REGISTER(SegCs); PRINT_REGISTER(SegEs); PRINT_REGISTER(SegDs); PRINT_REGISTER(SegGs); PRINT_REGISTER(SegFs); PRINT_REGISTER(SegSs); PRINT_REGISTER(Eax); PRINT_REGISTER(Ebx); PRINT_REGISTER(Ecx); PRINT_REGISTER(Edx); PRINT_REGISTER(Edi); PRINT_REGISTER(Esi); PRINT_REGISTER(Ebp); PRINT_REGISTER(Esp); PRINT_REGISTER(Eip); PRINT_REGISTER(EFlags); if (!printing_stack && PrintStackTraceOnCrash) { printing_stack = 1; __try { pss(); } __except (EXCEPTION_EXECUTE_HANDLER) {;} }