bool thread_t::start(async_method_t method_) { LOGTRACE((THREAD_MODULE, "thread_t::start begin")); if(m_alive) { LOGWARN((THREAD_MODULE, "thread_t::start thread-[%] has started, return.", get_thread_name().c_str())); return true; } ::pthread_attr_t attr; ::pthread_attr_init(&attr); if(!m_joinable) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); m_thread_func = method_; if(0 != ::pthread_create(&m_thread, NULL, thread_t::thread_func, this)) { LOGWARN((THREAD_MODULE, "thread_t::start thread-[%] create failed, return.", get_thread_name().c_str())); return false; } pthread_attr_destroy(&attr); LOGTRACE((THREAD_MODULE, "thread_t::start end")); return true; }
/* Callback for JVMTI_EVENT_VM_INIT */ static void JNICALL cbVMInit(jvmtiEnv *jvmti, JNIEnv *env, jthread thread) { enter_critical_section(jvmti); { char tname[MAX_THREAD_NAME_LENGTH]; static jvmtiEvent events[] = { JVMTI_EVENT_THREAD_START, JVMTI_EVENT_THREAD_END }; int i; /* The VM has started. */ get_thread_name(jvmti, thread, tname, sizeof(tname)); stdout_message("VMInit %s\n", tname); /* The VM is now initialized, at this time we make our requests * for additional events. */ for( i=0; i < (int)(sizeof(events)/sizeof(jvmtiEvent)); i++) { jvmtiError error; /* Setup event notification modes */ error = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, events[i], (jthread)NULL); check_jvmti_error(jvmti, error, "Cannot set event notification"); } } exit_critical_section(jvmti); }
void print_debug(char * mensaje) { if (DEBUG_ACTIVE) { fprintf(stderr,"%s.Thread : %s\n",mensaje,get_thread_name()); } }
/* Callback for JVMTI_EVENT_THREAD_END */ static void JNICALL cbThreadEnd(jvmtiEnv *jvmti, JNIEnv *env, jthread thread) { enter_critical_section(jvmti); { /* It's possible we get here right after VmDeath event, be careful */ if ( !gdata->vm_is_dead ) { char tname[MAX_THREAD_NAME_LENGTH]; get_thread_name(jvmti, thread, tname, sizeof(tname)); stdout_message("ThreadEnd %s\n", tname); } } exit_critical_section(jvmti); }
static void graph_tracer_enter(const struct caller *this_fn, int depth) { struct trace_graph_item trace = { .type = TRACE_GRAPH_ENTRY, .depth = depth, }; pstrcpy(trace.fname, sizeof(trace.fname), this_fn->name); get_thread_name(trace.tname); entry_time[depth] = clock_get_time(); trace_buffer_push(sched_getcpu(), &trace); }
static void graph_tracer_exit(const struct caller *this_fn, int depth) { struct trace_graph_item trace = { .depth = depth, .type = TRACE_GRAPH_RETURN, .entry_time = entry_time[depth], .return_time = clock_get_time(), }; pstrcpy(trace.fname, sizeof(trace.fname), this_fn->name); get_thread_name(trace.tname); trace_buffer_push(sched_getcpu(), &trace); }
notrace unsigned long trace_return_call(void) { struct trace_graph_item trace; unsigned long ret; memset(&trace, 0, sizeof(trace)); get_thread_name(trace.tname); trace.return_time = clock_get_time(); pop_return_trace(&trace, &ret); trace.type = TRACE_GRAPH_RETURN; trace_graph_return(&trace); return ret; }
/* Hook the return address and push it in the trace_ret_stack. * * ip: the address of the call instruction in the code. * ret_addr: the address of return address in the stack frame. */ static notrace void graph_tracer(unsigned long ip, unsigned long *ret_addr) { unsigned long old_addr = *ret_addr; uint64_t entry_time; struct trace_graph_item trace; struct caller *cr; memset(&trace, 0, sizeof(trace)); cr = trace_lookup_ip(ip, false); assert(cr->namelen + 1 < TRACE_FNAME_LEN); memcpy(trace.fname, cr->name, cr->namelen); memset(trace.fname + cr->namelen, '\0', 1); get_thread_name(trace.tname); *ret_addr = (unsigned long)trace_return_caller; entry_time = clock_get_time(); push_return_trace(old_addr, entry_time, ip, &trace.depth); trace.type = TRACE_GRAPH_ENTRY; trace_graph_entry(&trace); }
/* create principals */ static jint createPrincipal(jvmtiEnv* jvmti, JNIEnv *jniEnv, ResourcePrincipal** principals, ClassInfo* infos, int count_classes) { jint count_principals, thread_count; int j; int i; int k; jthread* threads; jvmtiError err; err = (*jvmti)->GetAllThreads(jvmti, &thread_count, &threads); check_jvmti_error(jvmti, err, "get all threads"); count_principals = 1; for (i = 0 ; i < thread_count ; ++i) { char tname[255]; get_thread_name(jvmti, threads[i], tname, sizeof(tname)); if (!strcmp(tname, "The guy")) count_principals++; // increase count } (*principals) = (ResourcePrincipal*)calloc(sizeof(ResourcePrincipal), count_principals); (*principals)[0].details = (ClassDetails*)calloc(sizeof(ClassDetails), count_classes); if ( (*principals)[0].details == NULL ) fatal_error("ERROR: Ran out of malloc space\n"); for ( i = 0 ; i < count_classes ; i++ ) (*principals)[0].details[i].info = &infos[i]; (*principals)[0].tag = 1; (*principals)[0].strategy_to_explore = &followReferences_to_discard; j = 1; for (i = 0 ; i < thread_count ; ++i) { jvmtiThreadInfo t_info; char tname[255]; get_thread_name(jvmti, threads[i], tname, sizeof(tname)); if (strcmp(tname, "The guy")) continue; /* Setup an area to hold details about these classes */ (*principals)[j].details = (ClassDetails*)calloc(sizeof(ClassDetails), count_classes); if ( (*principals)[j].details == NULL ) fatal_error("ERROR: Ran out of malloc space\n"); for ( k = 0 ; k < count_classes ; k++ ) (*principals)[j].details[k].info = &infos[k]; (*principals)[j].strategy_to_explore = &explore_FollowReferences_Thread; (*principals)[j].tag = (jlong)(j+1); /* Tag this jthread */ err = (*jvmti)->SetTag(jvmti, threads[i], tagForObject( &(*principals)[j]) ); check_jvmti_error(jvmti, err, "set thread tag"); memset(&t_info,0, sizeof(t_info)); err = (*jvmti)->GetThreadInfo(jvmti, threads[i], &t_info); check_jvmti_error(jvmti, err, "get thread info"); err = (*jvmti)->SetTag(jvmti, t_info.context_class_loader, tagForObject( &(*principals)[j])); check_jvmti_error(jvmti, err, "set classloader tag"); j++; } /* Free up all allocated space */ deallocate(jvmti, threads); return count_principals; }