void launch(int num_threads, Function f) { // save the start state state start_state; if(!setjmp(start_state)) { std::clog << "launch(): initializing start state" << std::endl; // init each thread's state to the start state thread_state.clear(); thread_state.resize(num_threads, start_state); set_current_thread_id(0); } else { // new thread std::clog << "launch(): jumped to thread " << current_thread_id() << " start state into thread " << std::endl; } // execute the thread f(); std::clog << "launch(): done with thread " << current_thread_id() << std::endl; barrier(); }
bool is_running_same_thread() { if (_current_thread_id != current_thread_id()) { _current_thread_id = current_thread_id(); return false; } return true; }
void Message::Reset( lemon_trace_flag flag ) { _flag = flag; _threadId = current_thread_id(); _length = 0; _offset = 0; }
void test_thread(void* p) { test_thread_param_t* param = (test_thread_param_t*)p; int i; // printf("thread %d starting. expected thread_id is %d.\n", current_thread_id(), param->thread_id); assert(current_thread_id() == param->thread_id); for(i=0; i<param->num_yields; ++i) { assert(*param->global_counter == (current_thread_id() + param->nthreads*i)); ++(*param->global_counter); // printf("thread %d yielding.\n", current_thread_id()); thread_yield(0,0); } // printf("thread %d terminating. global counter is %d. switches_wait is %d.\n", current_thread_id(), *param->global_counter, thread_stats(THREAD_NONGLOBAL_STATS|current_thread_id())); thread_term(); }
void Message::Reset( const LemonUuid * uuid , lemon_trace_flag flag ) { _flag = flag; _uuid = *uuid; _threadId = current_thread_id(); _length = 0; _offset = 0; }
void Logger::vlog(LoggingLevel level, const char* format, va_list args) noexcept { if (!m_fp || level < this->get_level()) return; struct tm now; int now_ns = 0; OSService::get_current_time_in_tm(&now, &now_ns); flockfile(m_fp); DEFER(funlockfile(m_fp)); if (m_console_color) { switch (level) { case kLogWarning: m_console_color->use(Colour::Warning); break; case kLogError: m_console_color->use(Colour::Error); break; default: break; } } fprintf(m_fp, "[%s] [%p] [%d-%02d-%02d %02d:%02d:%02d.%09d UTC] ", stringify(level), current_thread_id(), now.tm_year + 1900, now.tm_mon + 1, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec, now_ns); vfprintf(m_fp, format, args); if (m_console_color && (level == kLogWarning || level == kLogError)) { m_console_color->use(Colour::Default); } putc('\n', m_fp); fflush(m_fp); }
out_stream::out_stream(const std::string &filename_line) : _current_line_length(0) { lock_output(); if (!is_running_same_thread()) { std::string thread_id = thread_id_field(current_thread_id()); const std::string &thread_name = current_thread_name(); const std::string &header = thread_header(thread_id, thread_name); *this << "\n" << header << "\n"; } *this << filename_line << DELIMITER << indentation(); }
void __update_object_event(uint64_t address, uint32_t new_type) { if (current_thread_id() == working_thread_id || !logging_is_enable) { return; } memory_logging_event curr_event; curr_event.address = address; curr_event.argument = new_type; curr_event.event_type = EventType_Update; curr_event.type_flags = memory_logging_type_generic; curr_event.stack_size = 0; append_event_to_buffer(event_buffer, &curr_event); }
uint32_t get_current_thread_memory_usage() { if (!logging_is_enable || !allocation_event_writer) { return 0; } __block uint32_t total = 0; __block thread_id curr_thread = current_thread_id(); should_working_thread_lock = 1; while (should_working_thread_lock != 2); enumerate_allocation_event(allocation_event_writer, ^(const allocation_event &event) { if (event.t_id == curr_thread) { total += event.size; } });
void barrier() { std::clog << "barrier(): entering barrier from thread " << current_thread_id() << std::endl; // save this thread's state if(!setjmp(thread_state[current_thread_id()])) { // switch to the next ready thread std::clog << "barrier(): jumping from thread " << current_thread_id() << " to thread " << next_current_thread_id() << std::endl; set_next_current_thread_id(); std::longjmp(thread_state[current_thread_id()], 1); } else { std::clog << "barrier(): jumped into thread " << current_thread_id() << std::endl; } std::clog << "barrier(): thread " << current_thread_id() << " exiting barrier()" << std::endl; }
void barrier() { // switch to next thread int old_thread_id = set_next_current_thread_id(); swapcontext(&thread_state[old_thread_id], &thread_state[current_thread_id()]); }
void *__memory_logging_event_writing_thread(void *param) { pthread_setname_np("Memory Logging"); working_thread_id = current_thread_id(); // for preventing deadlock'ing on stack logging on a single thread log_internal_without_this_thread(working_thread_id); struct timeval delay; delay.tv_sec = 0; delay.tv_usec = 10 * 1000; // 10 ms while (logging_is_enable) { while (has_event_in_buffer(event_buffer) == false) { usleep(15000); //select(0, NULL, NULL, NULL, &delay); } if (!logging_is_enable) { break; } // pick an event from buffer int64_t next_index = 0; memory_logging_event *curr_event = get_event_from_buffer(event_buffer, &next_index); bool is_skip = (curr_event->event_type == EventType_Invalid); if (is_next_index_valid(event_buffer, next_index) == false) { // Impossible... continue; } // compaction uint32_t object_type = 0; if (curr_event->event_type == EventType_Alloc && has_event_in_buffer(event_buffer, next_index)) { memory_logging_event *next_event = get_event_from_buffer(event_buffer, NULL, next_index); if (curr_event->address == next_event->address) { if (curr_event->type_flags & memory_logging_type_alloc) { if (next_event->type_flags & memory_logging_type_dealloc) { // *waves hand* current allocation never occurred is_skip = true; next_event->event_type = EventType_Invalid; } else if (next_event->event_type == EventType_Update) { object_type = next_event->argument; next_event->event_type = EventType_Invalid; } } else if (next_event->type_flags & memory_logging_type_vm_deallocate) { // *waves hand* current allocation(VM) never occurred is_skip = true; next_event->event_type = EventType_Invalid; } } } if (!is_skip) { // Can't lock like this without brain, or affect performance //__malloc_lock_lock(&working_thread_lock); if (should_working_thread_lock == 1) { should_working_thread_lock = 2; while (should_working_thread_lock == 2); } if (curr_event->event_type == EventType_Alloc) { uint32_t stack_identifier = 0; if (curr_event->stack_size > 0) { stack_identifier = add_stack_frames_in_table(stack_frames_writer, curr_event->stacks + curr_event->num_hot_to_skip, curr_event->stack_size - curr_event->num_hot_to_skip); // unique stack in memory } else { __malloc_printf("Data corrupted!"); //__malloc_lock_unlock(&working_thread_lock); // Restore abort()? //abort(); report_error(MS_ERRC_DATA_CORRUPTED); disable_memory_logging(); break; } // Try to get vm memory type from type_flags if (object_type == 0) { VM_GET_FLAGS_ALIAS(curr_event->type_flags, object_type); } add_allocation_event(allocation_event_writer, curr_event->address, curr_event->type_flags, object_type, curr_event->argument, stack_identifier, curr_event->t_id); } else if (curr_event->event_type == EventType_Free) { del_allocation_event(allocation_event_writer, curr_event->address, curr_event->type_flags); } else { update_allocation_event_object_type(allocation_event_writer, curr_event->address, curr_event->argument); } //__malloc_lock_unlock(&working_thread_lock); } update_read_index(event_buffer, next_index); } return NULL; }
void __memory_event_callback(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t arg2, uintptr_t arg3, uintptr_t return_val, uint32_t num_hot_to_skip) { uintptr_t size = 0; uintptr_t ptr_arg = 0; bool is_alloc = false; if (!logging_is_enable) { return; } uint32_t alias = 0; VM_GET_FLAGS_ALIAS(type_flags, alias); // skip all VM allocation events from malloc_zone if (alias >= VM_MEMORY_MALLOC && alias <= VM_MEMORY_MALLOC_NANO) { return; } // check incoming data if (type_flags & memory_logging_type_alloc && type_flags & memory_logging_type_dealloc) { size = arg3; ptr_arg = arg2; // the original pointer if (ptr_arg == return_val) { return; // realloc had no effect, skipping } if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size) type_flags ^= memory_logging_type_dealloc; } else { // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result __memory_event_callback(memory_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1); __memory_event_callback(memory_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1); return; } } if (type_flags & memory_logging_type_dealloc || type_flags & memory_logging_type_vm_deallocate) { size = arg3; ptr_arg = arg2; if (ptr_arg == 0) { return; // free(nil) } } if (type_flags & memory_logging_type_alloc || type_flags & memory_logging_type_vm_allocate) { if (return_val == 0 || return_val == (uintptr_t)MAP_FAILED) { //return; // alloc that failed, but still record this allocation event return_val = 0; } size = arg2; is_alloc = true; } if (type_flags & memory_logging_type_vm_allocate || type_flags & memory_logging_type_vm_deallocate) { mach_port_t targetTask = (mach_port_t)zone_ptr; // For now, ignore "injections" of VM into other tasks. if (targetTask != mach_task_self()) { return; } } type_flags &= memory_logging_valid_type_flags; thread_id curr_thread = current_thread_id(); if (curr_thread == working_thread_id || curr_thread == g_matrix_block_monitor_dumping_thread_id/* || is_thread_ignoring_logging(curr_thread)*/) { // Prevent a thread from deadlocking against itself if vm_allocate() or malloc() // is called below here, from woking thread or dumping thread return; } memory_logging_event curr_event; // gather stack, only alloc type if (is_alloc) { curr_event.stack_size = backtrace((void **)curr_event.stacks, STACK_LOGGING_MAX_STACK_SIZE); num_hot_to_skip += 1; // skip itself and caller if (curr_event.stack_size <= num_hot_to_skip) { // Oops! Didn't get a valid backtrace from thread_stack_pcs(). return; } if (is_stack_frames_should_skip(curr_event.stacks + num_hot_to_skip, curr_event.stack_size - num_hot_to_skip, size, type_flags)) { curr_event.stack_size = 0; // skip this event? return; } else { curr_event.num_hot_to_skip = num_hot_to_skip; } curr_event.address = return_val; curr_event.argument = (uint32_t)size; curr_event.event_type = EventType_Alloc; curr_event.type_flags = type_flags; curr_event.t_id = curr_thread; } else { curr_event.address = ptr_arg; curr_event.argument = (uint32_t)size; curr_event.event_type = EventType_Free; curr_event.type_flags = type_flags; curr_event.stack_size = 0; } append_event_to_buffer(event_buffer, &curr_event); }