// suspend all available threads in a given pid int suspend_all_threads(pid_t target_pid) { #if DEBUG printf("[DEBUG] Suspending all threads...\n"); #endif task_t targetTask; mach_port_t me = mach_task_self(); thread_act_port_array_t thread_list; mach_msg_type_number_t thread_count,i; if(task_for_pid(me, target_pid, &targetTask)) { fprintf(stderr, "[ERROR] task for pid failed while trying to suspend threads!\n"); fprintf(stderr, "Verify if python has the right procmod permissions!\n"); exit(1); } if (task_threads(targetTask, &thread_list, &thread_count)) { fprintf(stderr, "[ERROR] task_threads failed at %s\n", __FUNCTION__); exit(1); } if (thread_count > 0) { i = thread_count; while (i--) { suspend_thread(thread_list[i]); } } return(0); }
// fG - 10/03/2011 // CLEANME EXPORT BOOL StartProcess(DWORD dwProcessId) { // int error = 0; // printf("[DEBUG] Calling PT_CONTINUE %d!\n",(int)dwProcessId); //// error = ptrace(PT_CONTINUE, dwProcessId, (char *) 1, 0); // kill(target_pid, SIGCONT); // if errno printf("Errno: %s\n", strerror(errno)); // fflush(stdout); // return(error); // task_t targetTask; mach_port_t me = mach_task_self(); thread_act_port_array_t thread_list; mach_msg_type_number_t thread_count,i; kern_return_t kr; task_for_pid(me, target_pid, &targetTask); kr = task_threads(targetTask, &thread_list, &thread_count); if (thread_count > 0) { i = thread_count; printf("[INFO] Available threads:\n"); while (i--) { printf("[%d] %d\n", i, thread_list[i]); resume_thread(thread_list[i]); } } return(0); }
static inline bool getThreadList(KSMachineContext* context) { const task_t thisTask = mach_task_self(); KSLOG_DEBUG("Getting thread list"); kern_return_t kr; thread_act_array_t threads; mach_msg_type_number_t actualThreadCount; if((kr = task_threads(thisTask, &threads, &actualThreadCount)) != KERN_SUCCESS) { KSLOG_ERROR("task_threads: %s", mach_error_string(kr)); return false; } KSLOG_TRACE("Got %d threads", context->threadCount); int threadCount = (int)actualThreadCount; int maxThreadCount = sizeof(context->allThreads) / sizeof(context->allThreads[0]); if(threadCount > maxThreadCount) { KSLOG_ERROR("Thread count %d is higher than maximum of %d", threadCount, maxThreadCount); threadCount = maxThreadCount; } for(int i = 0; i < threadCount; i++) { context->allThreads[i] = threads[i]; } context->threadCount = threadCount; for(mach_msg_type_number_t i = 0; i < actualThreadCount; i++) { mach_port_deallocate(thisTask, context->allThreads[i]); } vm_deallocate(thisTask, (vm_address_t)threads, sizeof(thread_t) * actualThreadCount); return true; }
static PyObject *MachTask_threads(PyObject *self) { PyObject *threads; kern_return_t err; thread_t *thread_list; unsigned int thread_count, i; err = task_threads(((MachPort *)self)->port, &thread_list, &thread_count); if (err != KERN_SUCCESS) { PyErr_SetString(MachError, mach_error_string(err)); return NULL; } threads = PyList_New(thread_count); if (!threads) { vm_deallocate(mach_task_self(), (unsigned int)thread_list, thread_count*sizeof(mach_port_t)); return NULL; } for (i=0; i < thread_count; i++) { PyList_SET_ITEM(threads, i, MachPort_New(&MachThreadType, thread_list[i])); } vm_deallocate(mach_task_self(), (unsigned int)thread_list, thread_count*sizeof(mach_port_t)); return threads; }
static void info_mach_threads_command (char *args, int from_tty) { thread_array_t threads; unsigned int thread_count; kern_return_t result; task_t task; int i; task = get_task_from_args (args); if (task == TASK_NULL) return; result = task_threads (task, &threads, &thread_count); MACH_CHECK_ERROR (result); printf_unfiltered (_("Threads in task %#x:\n"), task); for (i = 0; i < thread_count; ++i) { printf_unfiltered (_(" %#x\n"), threads[i]); mach_port_deallocate (task_self (), threads[i]); } vm_deallocate (task_self (), (vm_address_t) threads, (thread_count * sizeof (thread_t))); }
gboolean gum_process_modify_thread (GumThreadId thread_id, GumModifyThreadFunc func, gpointer user_data) { gboolean success = FALSE; mach_port_t task; thread_act_array_t threads; mach_msg_type_number_t count; kern_return_t kr; task = mach_task_self (); kr = task_threads (task, &threads, &count); if (kr == KERN_SUCCESS) { guint i; for (i = 0; i != count; i++) { thread_t thread = threads[i]; if (thread == thread_id) { gum_thread_state_t state; mach_msg_type_number_t state_count = GUM_THREAD_STATE_COUNT; thread_state_flavor_t state_flavor = GUM_THREAD_STATE_FLAVOR; GumCpuContext cpu_context; kr = thread_suspend (thread); if (kr != KERN_SUCCESS) break; kr = thread_get_state (thread, state_flavor, (thread_state_t) &state, &state_count); if (kr != KERN_SUCCESS) { thread_resume (thread); break; } gum_cpu_context_from_darwin (&state, &cpu_context); func (thread_id, &cpu_context, user_data); gum_cpu_context_to_darwin (&cpu_context, &state); kr = thread_set_state (thread, state_flavor, (thread_state_t) &state, state_count); success = (thread_resume (thread) == KERN_SUCCESS && kr == KERN_SUCCESS); } } for (i = 0; i != count; i++) mach_port_deallocate (task, threads[i]); vm_deallocate (task, (vm_address_t) threads, count * sizeof (thread_t)); } return success; }
void ksmach_init(void) { static volatile sig_atomic_t initialized = 0; if(!initialized) { kern_return_t kr; const task_t thisTask = mach_task_self(); thread_act_array_t threads; mach_msg_type_number_t numThreads; if((kr = task_threads(thisTask, &threads, &numThreads)) != KERN_SUCCESS) { KSLOG_ERROR("task_threads: %s", mach_error_string(kr)); return; } g_topThread = pthread_from_mach_thread_np(threads[0]); for(mach_msg_type_number_t i = 0; i < numThreads; i++) { mach_port_deallocate(thisTask, threads[i]); } vm_deallocate(thisTask, (vm_address_t)threads, sizeof(thread_t) * numThreads); initialized = true; } }
// FIXME void get_task_threads(int pid, thread_act_port_array_t *thread_list, mach_msg_type_number_t *thread_count) { // fprintf(stderr, "get_task_threads %x\n", pid); task_t port = getport(pid); task_threads(port, thread_list, thread_count); // fprintf(stderr, "Got %d threads from %d\n", *thread_count, pid); }
/* * Return number of threads used by process as a Python integer. */ static PyObject* get_process_num_threads(PyObject* self, PyObject* args) { long pid; int err, ret; unsigned int info_count = TASK_BASIC_INFO_COUNT; mach_port_t task; struct task_basic_info tasks_info; thread_act_port_array_t thread_list; mach_msg_type_number_t thread_count; // the argument passed should be a process id if (! PyArg_ParseTuple(args, "l", &pid)) { return NULL; } /* task_for_pid() requires special privileges * "This function can be called only if the process is owned by the * procmod group or if the caller is root." * - http://developer.apple.com/documentation/MacOSX/Conceptual/universal_binary/universal_binary_tips/chapter_5_section_19.html */ err = task_for_pid(mach_task_self(), pid, &task); if ( err == KERN_SUCCESS) { info_count = TASK_BASIC_INFO_COUNT; err = task_info(task, TASK_BASIC_INFO, (task_info_t)&tasks_info, &info_count); if (err != KERN_SUCCESS) { // errcode 4 is "invalid argument" (access denied) if (err == 4) { return AccessDenied(); } // otherwise throw a runtime error with appropriate error code return PyErr_Format(PyExc_RuntimeError, "task_info(TASK_BASIC_INFO) failed"); } err = task_threads(task, &thread_list, &thread_count); if (err == KERN_SUCCESS) { ret = vm_deallocate(task, (vm_address_t)thread_list, thread_count * sizeof(int)); if (ret != KERN_SUCCESS) { printf("vm_deallocate() failed\n"); } return Py_BuildValue("l", (long)thread_count); } else { return PyErr_Format(PyExc_RuntimeError, "task_thread() failed"); } } else { if (! pid_exists(pid) ) { return NoSuchProcess(); } // pid exists, so return AccessDenied error since task_for_pid() failed return AccessDenied(); } return NULL; }
// We should consider moving this into each MacThread. static void get_threads_profile_data(DNBProfileDataScanType scanType, task_t task, nub_process_t pid, std::vector<uint64_t> &threads_id, std::vector<std::string> &threads_name, std::vector<uint64_t> &threads_used_usec) { kern_return_t kr; thread_act_array_t threads; mach_msg_type_number_t tcnt; kr = task_threads(task, &threads, &tcnt); if (kr != KERN_SUCCESS) return; for (int i = 0; i < tcnt; i++) { thread_identifier_info_data_t identifier_info; mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; kr = ::thread_info(threads[i], THREAD_IDENTIFIER_INFO, (thread_info_t)&identifier_info, &count); if (kr != KERN_SUCCESS) continue; thread_basic_info_data_t basic_info; count = THREAD_BASIC_INFO_COUNT; kr = ::thread_info(threads[i], THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count); if (kr != KERN_SUCCESS) continue; if ((basic_info.flags & TH_FLAGS_IDLE) == 0) { nub_thread_t tid = MachThread::GetGloballyUniqueThreadIDForMachPortID (threads[i]); threads_id.push_back(tid); if ((scanType & eProfileThreadName) && (identifier_info.thread_handle != 0)) { struct proc_threadinfo proc_threadinfo; int len = ::proc_pidinfo(pid, PROC_PIDTHREADINFO, identifier_info.thread_handle, &proc_threadinfo, PROC_PIDTHREADINFO_SIZE); if (len && proc_threadinfo.pth_name[0]) { threads_name.push_back(proc_threadinfo.pth_name); } else { threads_name.push_back(""); } } else { threads_name.push_back(""); } struct timeval tv; struct timeval thread_tv; TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &thread_tv); TIME_VALUE_TO_TIMEVAL(&basic_info.system_time, &tv); timeradd(&thread_tv, &tv, &thread_tv); uint64_t used_usec = thread_tv.tv_sec * 1000000ULL + thread_tv.tv_usec; threads_used_usec.push_back(used_usec); } kr = mach_port_deallocate(mach_task_self(), threads[i]); } kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)threads, tcnt * sizeof(*threads)); }
static int suspendAllPreemptive(PlatformWalkData *data) { mach_msg_type_number_t threadCount = 0; data->threadCount = 0; mach_port_t task = mach_task_self(); struct sigaction upcallAction; int rc = 0; /* Install a signal handler to get thread context info from the handler. */ upcallAction.sa_sigaction = upcallHandler; upcallAction.sa_flags = SA_SIGINFO | SA_RESTART; if (-1 == sigaction(SUSPEND_SIG, &upcallAction, &data->oldHandler)) { RECORD_ERROR(data->state, SIGNAL_SETUP_ERROR, -1); rc = -1; } else if (data->oldHandler.sa_sigaction == upcallHandler) { /* Handler's already installed so already iterating threads. We mustn't uninstall the handler * while cleaning as the thread that installed the initial handler will do so. */ memset(&data->oldHandler, 0, sizeof(struct sigaction)); RECORD_ERROR(data->state, CONCURRENT_COLLECTION, -1); rc = -1; } if (0 == rc) { /* After this point it's safe to go through the full cleanup. */ data->cleanupRequired = 1; /* Suspend all threads until there are no new threads. */ do { int i = 0; /* Get a list of the threads within the process. */ data->threadCount = threadCount; if (KERN_SUCCESS != task_threads(task, &data->threadList, &threadCount)) { RECORD_ERROR(data->state, SIGNAL_SETUP_ERROR, -1); rc = -1; break; } /* Suspend each thread except this one. */ for (i = data->threadCount; i < threadCount; i += 1) { if (data->filterThread != data->threadList[i]) { if (KERN_SUCCESS != thread_suspend(data->threadList[i])) { data->threadCount = i; rc = -1; break; } } } } while ((threadCount > data->threadCount) && (0 == rc)); } return rc; }
int xnu_reg_write (RDebug *dbg, int type, const ut8 *buf, int size) { int ret; thread_array_t inferior_threads = NULL; unsigned int inferior_thread_count = 0; R_DEBUG_REG_T *regs = (R_DEBUG_REG_T*)buf; unsigned int gp_count = R_DEBUG_STATE_SZ; ret = task_threads (pid_to_task (dbg->pid), &inferior_threads, &inferior_thread_count); if (ret != KERN_SUCCESS) { eprintf ("debug_getregs\n"); return R_FALSE; } /* TODO: thread cannot be selected */ if (inferior_thread_count > 0) { gp_count = ((dbg->bits == R_SYS_BITS_64)) ? 44 : 16; // XXX: kinda spaguetti coz multi-arch int tid = inferior_threads[0]; #if __i386__ || __x86_64__ switch (type) { case R_REG_TYPE_DRX: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_SET_STATE(x86_DEBUG_STATE64); } else { ret = THREAD_SET_STATE(x86_DEBUG_STATE32); } break; default: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_SET_STATE(x86_THREAD_STATE); } else { ret = THREAD_SET_STATE(i386_THREAD_STATE); } break; } #else ret = THREAD_SET_STATE(R_DEBUG_STATE_T); #endif if (ret != KERN_SUCCESS) { eprintf ("debug_setregs: Failed to set thread %d %d.error (%x). (%s)\n", (int)dbg->pid, pid_to_task (dbg->pid), (int)ret, MACH_ERROR_STRING (ret)); perror ("thread_set_state"); return R_FALSE; } } else { eprintf ("There are no threads!\n"); } return sizeof (R_DEBUG_REG_T); }
// Returns the number of threads running in the process, or 0 to indicate that // we cannot detect it. size_t GetThreadCount() { const task_t task = mach_task_self(); mach_msg_type_number_t thread_count; thread_act_array_t thread_list; const kern_return_t status = task_threads(task, &thread_list, &thread_count); if (status == KERN_SUCCESS) { // task_threads allocates resources in thread_list and we need to free them // to avoid leaks. vm_deallocate(task, reinterpret_cast<vm_address_t>(thread_list), sizeof(thread_t) * thread_count); return static_cast<size_t>(thread_count); } else { return 0; } }
void gum_darwin_enumerate_threads (mach_port_t task, GumFoundThreadFunc func, gpointer user_data) { thread_act_array_t threads; mach_msg_type_number_t count; kern_return_t kr; kr = task_threads (task, &threads, &count); if (kr == KERN_SUCCESS) { guint i; for (i = 0; i != count; i++) { thread_t thread = threads[i]; GumThreadDetails details; thread_basic_info_data_t info; mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT; gum_thread_state_t state; mach_msg_type_number_t state_count = GUM_THREAD_STATE_COUNT; thread_state_flavor_t state_flavor = GUM_THREAD_STATE_FLAVOR; kr = thread_info (thread, THREAD_BASIC_INFO, (thread_info_t) &info, &info_count); if (kr != KERN_SUCCESS) continue; kr = thread_get_state (thread, state_flavor, (thread_state_t) &state, &state_count); if (kr != KERN_SUCCESS) continue; details.id = (GumThreadId) thread; details.state = gum_thread_state_from_darwin (info.run_state); gum_cpu_context_from_darwin (&state, &details.cpu_context); if (!func (&details, user_data)) break; } for (i = 0; i != count; i++) mach_port_deallocate (task, threads[i]); vm_deallocate (task, (vm_address_t) threads, count * sizeof (thread_t)); } }
int macosx_thread_valid (task_t task, thread_t thread) { thread_array_t thread_list; unsigned int thread_count = 0; kern_return_t kret; unsigned int found = 0; unsigned int i; CHECK_FATAL (task != TASK_NULL); kret = task_threads (task, &thread_list, &thread_count); #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("macosx_thread_valid - task_threads (%d, %p, %d) returned 0x%lx\n", task, &thread_list, thread_count, kret); #endif if ((kret == KERN_INVALID_ARGUMENT) || (kret == MACH_SEND_INVALID_RIGHT) || (kret == MACH_RCV_INVALID_NAME)) { return 0; } MACH_CHECK_ERROR (kret); for (i = 0; i < thread_count; i++) { if (thread_list[i] == thread) { found = 1; } } kret = vm_deallocate (mach_task_self (), (vm_address_t) thread_list, (vm_size_t) (thread_count * sizeof (thread_t))); MACH_CHECK_ERROR (kret); #ifdef DEBUG_MACOSX_MUTILS if (!found) { mutils_debug ("thread 0x%lx no longer valid for task 0x%lx\n", (unsigned long) thread, (unsigned long) task); } #endif return found; }
RList *xnu_thread_list (RDebug *dbg, int pid, RList *list) { #if __arm__ #define OSX_PC state.__pc #elif __arm64__ #define OSX_PC state.__pc #elif __POWERPC__ #define OSX_PC state.srr0 #elif __x86_64__ #define OSX_PC state.__rip #undef OSX_PC #define OSX_PC state.x64[REG_PC] #else #define OSX_PC state.__eip #undef OSX_PC #define OSX_PC state.x32[REG_PC] #endif int i, tid; //, err; //unsigned int gp_count; static thread_array_t inferior_threads = NULL; static unsigned int inferior_thread_count = 0; R_DEBUG_REG_T state; if (task_threads (pid_to_task (pid), &inferior_threads, &inferior_thread_count) != KERN_SUCCESS) { eprintf ("Failed to get list of task's threads.\n"); return list; } for (i = 0; i < inferior_thread_count; i++) { tid = inferior_threads[i]; /* XXX overflow here gp_count = R_DEBUG_STATE_SZ; //sizeof (R_DEBUG_REG_T); if ((err = thread_get_state (tid, R_DEBUG_STATE_T, (thread_state_t) &state, &gp_count)) != KERN_SUCCESS) { // eprintf ("debug_list_threads: %s\n", MACH_ERROR_STRING(err)); OSX_PC = 0; } */ r_list_append (list, r_debug_pid_new ("???", tid, 's', OSX_PC)); } return list; }
// s/inferior_task/port/ static int debug_attach(int pid) { task_t task = pid_to_task (pid); if (task == -1) return -1; eprintf ("pid: %d\ntask: %d\n", pid, task); #if 0 // TODO : move this code into debug if (task_threads (task, &inferior_threads, &inferior_thread_count) != KERN_SUCCESS) { eprintf ("Failed to get list of task's threads.\n"); return -1; } eprintf ("Thread count: %d\n", inferior_thread_count); #endif #if SUSPEND if (task_suspend (this->port) != KERN_SUCCESS) { eprintf ("cannot suspend task\n"); return -1; // R_FALSE } #endif /* is this required for arm ? */ #if EXCEPTION_PORT int exception_port; if (mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &exception_port) != KERN_SUCCESS) { eprintf ("Failed to create exception port.\n"); return -1; } if (mach_port_insert_right(mach_task_self(), exception_port, exception_port, MACH_MSG_TYPE_MAKE_SEND) != KERN_SUCCESS) { eprintf ("Failed to acquire insertion rights on the port.\n"); return -1; } if (task_set_exception_ports(inferior_task, EXC_MASK_ALL, exception_port, EXCEPTION_DEFAULT, THREAD_STATE_NONE) != KERN_SUCCESS) { eprintf ("Failed to set the inferior's exception ports.\n"); return -1; } #endif return task; }
__private_extern__ int chudxnu_task_threads(task_t task, thread_act_array_t *thr_act_list, mach_msg_type_number_t *count) { mach_msg_type_number_t task_thread_count = 0; kern_return_t kr; kr = task_threads(current_task(), thr_act_list, count); if(kr==KERN_SUCCESS) { thread_act_t thr_act; int i, state_count; for(i=0; i<(*count); i++) { thr_act = convert_port_to_act(((ipc_port_t *)(*thr_act_list))[i]); /* undo the mig conversion task_threads does */ thr_act_list[i] = thr_act; } } return kr; }
void ksmc_resumeEnvironment() { #if KSCRASH_HAS_THREADS_API KSLOG_DEBUG("Resuming environment."); kern_return_t kr; const task_t thisTask = mach_task_self(); const thread_t thisThread = (thread_t)ksthread_self(); thread_act_array_t threads; mach_msg_type_number_t numThreads; if((kr = task_threads(thisTask, &threads, &numThreads)) != KERN_SUCCESS) { KSLOG_ERROR("task_threads: %s", mach_error_string(kr)); return; } for(mach_msg_type_number_t i = 0; i < numThreads; i++) { thread_t thread = threads[i]; if(thread != thisThread && !isThreadInList(thread, g_reservedThreads, g_reservedThreadsCount)) { if((kr = thread_resume(thread)) != KERN_SUCCESS) { // Record the error and keep going. KSLOG_ERROR("thread_resume (%08x): %s", thread, mach_error_string(kr)); } } } for(mach_msg_type_number_t i = 0; i < numThreads; i++) { mach_port_deallocate(thisTask, threads[i]); } vm_deallocate(thisTask, (vm_address_t)threads, sizeof(thread_t) * numThreads); KSLOG_DEBUG("Resume complete."); #endif }
bool ksmach_resumeAllThreadsExcept(thread_t* exceptThreads, int exceptThreadsCount) { kern_return_t kr; const task_t thisTask = mach_task_self(); const thread_t thisThread = ksmach_thread_self(); thread_act_array_t threads; mach_msg_type_number_t numThreads; if((kr = task_threads(thisTask, &threads, &numThreads)) != KERN_SUCCESS) { KSLOG_ERROR("task_threads: %s", mach_error_string(kr)); return false; } for(mach_msg_type_number_t i = 0; i < numThreads; i++) { thread_t thread = threads[i]; if(thread != thisThread && !isThreadInList(thread, exceptThreads, exceptThreadsCount)) { if((kr = thread_resume(thread)) != KERN_SUCCESS) { KSLOG_ERROR("thread_resume (%08x): %s", thread, mach_error_string(kr)); // Don't treat this as a fatal error. } } } for(mach_msg_type_number_t i = 0; i < numThreads; i++) { mach_port_deallocate(thisTask, threads[i]); } vm_deallocate(thisTask, (vm_address_t)threads, sizeof(thread_t) * numThreads); return true; }
static void info_mach_threads_command (char *args, int from_tty) { thread_array_t thread_array; unsigned int thread_count; kern_return_t result; task_t task; int i; CHECK_ARGS ("Task", args); sscanf (args, "0x%x", &task); result = task_threads (task, &thread_array, &thread_count); MACH_CHECK_ERROR (result); printf_unfiltered ("Threads in task %#x:\n", task); for (i = 0; i < thread_count; ++i) { printf_unfiltered (" %#x\n", thread_array[i]); } vm_deallocate (task_self (), (vm_address_t) thread_array, (thread_count * sizeof (thread_t))); }
/* On Mac OS X, the only way to get enough information is to become root. Pretty frustrating!*/ int run_get_dynamic_proc_info(pid_t pid, RunProcDyn *rpd) { task_t task; kern_return_t error; mach_msg_type_number_t count; thread_array_t thread_table; thread_basic_info_t thi; thread_basic_info_data_t thi_data; unsigned table_size; struct task_basic_info ti; error = task_for_pid(mach_task_self(), pid, &task); if (error != KERN_SUCCESS) { /* fprintf(stderr, "++ Probably you have to set suid or become root.\n"); */ rpd->rss = rpd->vsize = 0; rpd->utime = rpd->stime = 0; return 0; } count = TASK_BASIC_INFO_COUNT; error = task_info(task, TASK_BASIC_INFO, (task_info_t)&ti, &count); assert(error == KERN_SUCCESS); { /* adapted from ps/tasks.c */ vm_region_basic_info_data_64_t b_info; vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; vm_size_t size; mach_port_t object_name; count = VM_REGION_BASIC_INFO_COUNT_64; error = vm_region_64(task, &address, &size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name); if (error == KERN_SUCCESS) { if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && ti.virtual_size > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { ti.virtual_size -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); } } rpd->rss = ti.resident_size; rpd->vsize = ti.virtual_size; } { /* calculate CPU times, adapted from top/libtop.c */ unsigned i; rpd->utime = ti.user_time.seconds + ti.user_time.microseconds * 1e-6; rpd->stime = ti.system_time.seconds + ti.system_time.microseconds * 1e-6; error = task_threads(task, &thread_table, &table_size); assert(error == KERN_SUCCESS); thi = &thi_data; for (i = 0; i != table_size; ++i) { count = THREAD_BASIC_INFO_COUNT; error = thread_info(thread_table[i], THREAD_BASIC_INFO, (thread_info_t)thi, &count); assert(error == KERN_SUCCESS); if ((thi->flags & TH_FLAGS_IDLE) == 0) { rpd->utime += thi->user_time.seconds + thi->user_time.microseconds * 1e-6; rpd->stime += thi->system_time.seconds + thi->system_time.microseconds * 1e-6; } if (task != mach_task_self()) { error = mach_port_deallocate(mach_task_self(), thread_table[i]); assert(error == KERN_SUCCESS); } } error = vm_deallocate(mach_task_self(), (vm_offset_t)thread_table, table_size * sizeof(thread_array_t)); assert(error == KERN_SUCCESS); } mach_port_deallocate(mach_task_self(), task); return 0; }
//------------------------------------------------------------------------------ // Name: set_state // Desc: //------------------------------------------------------------------------------ void DebuggerCore::set_state(const State &state) { // TODO: assert that we are paused auto state_impl = static_cast<PlatformState *>(state.impl_); if(attached()) { /* Get the mach task for the target process */ mach_port_t task; kern_return_t err = task_for_pid(mach_task_self(), pid(), &task); if(err != KERN_SUCCESS) { qDebug("task_for_pid() failed with %x [%d]", err, pid()); return; } /* Suspend the target process */ err = task_suspend(task); if(err != KERN_SUCCESS) { qDebug("task_suspend() failed"); } /* Get all threads in the specified task */ thread_act_port_array_t thread_list; mach_msg_type_number_t thread_count; err = task_threads(task, &thread_list, &thread_count); if(err != KERN_SUCCESS) { qDebug("task_threads() failed"); err = task_resume(task); if(err != KERN_SUCCESS) { qDebug("task_resume() failed"); } } Q_ASSERT(thread_count > 0); #ifdef EDB_X86 mach_msg_type_number_t state_count = x86_THREAD_STATE32_COUNT; const thread_state_flavor_t flavor = x86_THREAD_STATE32; const thread_state_flavor_t debug_flavor = x86_DEBUG_STATE32; //const thread_state_flavor_t fpu_flavor = x86_FLOAT_STATE32; //const thread_state_flavor_t exception_flavor = x86_EXCEPTION_STATE32; #elif defined(EDB_X86_64) mach_msg_type_number_t state_count = x86_THREAD_STATE64_COUNT; const thread_state_flavor_t flavor = x86_THREAD_STATE64; const thread_state_flavor_t debug_flavor = x86_DEBUG_STATE64; //const thread_state_flavor_t fpu_flavor = x86_FLOAT_STATE64; //const thread_state_flavor_t exception_flavor = x86_EXCEPTION_STATE64; #endif // TODO Set for specific thread, not first one err = thread_set_state( thread_list[0], flavor, (thread_state_t)&state_impl->thread_state_, state_count); if(err != KERN_SUCCESS) { qDebug("thread_set_state() failed with %.08x", err); err = task_resume(task); if(err != KERN_SUCCESS) { qDebug("task_resume() failed"); } return; } err = thread_set_state( thread_list[0], debug_flavor, (thread_state_t)&state_impl->debug_state_, state_count); if(err != KERN_SUCCESS) { qDebug("thread_set_state() failed with %.08x", err); err = task_resume(task); if(err != KERN_SUCCESS) { qDebug("task_resume() failed"); } return; } } }
/* * Return process threads */ static PyObject* get_process_threads(PyObject* self, PyObject* args) { long pid; int err, j, ret; kern_return_t kr; unsigned int info_count = TASK_BASIC_INFO_COUNT; mach_port_t task; struct task_basic_info tasks_info; thread_act_port_array_t thread_list; thread_info_data_t thinfo; thread_basic_info_t basic_info_th; mach_msg_type_number_t thread_count, thread_info_count; PyObject* retList = PyList_New(0); PyObject* pyTuple = NULL; // the argument passed should be a process id if (! PyArg_ParseTuple(args, "l", &pid)) { return NULL; } // task_for_pid() requires special privileges err = task_for_pid(mach_task_self(), pid, &task); if (err != KERN_SUCCESS) { if (! pid_exists(pid) ) { return NoSuchProcess(); } return AccessDenied(); } info_count = TASK_BASIC_INFO_COUNT; err = task_info(task, TASK_BASIC_INFO, (task_info_t)&tasks_info, &info_count); if (err != KERN_SUCCESS) { // errcode 4 is "invalid argument" (access denied) if (err == 4) { return AccessDenied(); } // otherwise throw a runtime error with appropriate error code return PyErr_Format(PyExc_RuntimeError, "task_info(TASK_BASIC_INFO) failed"); } err = task_threads(task, &thread_list, &thread_count); if (err != KERN_SUCCESS) { return PyErr_Format(PyExc_RuntimeError, "task_threads() failed"); } for (j = 0; j < thread_count; j++) { thread_info_count = THREAD_INFO_MAX; kr = thread_info(thread_list[j], THREAD_BASIC_INFO, (thread_info_t)thinfo, &thread_info_count); if (kr != KERN_SUCCESS) { return PyErr_Format(PyExc_RuntimeError, "thread_info() failed"); } basic_info_th = (thread_basic_info_t)thinfo; // XXX - thread_info structure does not provide any process id; // the best we can do is assigning an incremental bogus value pyTuple = Py_BuildValue("Iff", j + 1, (float)basic_info_th->user_time.microseconds / 1000000.0, (float)basic_info_th->system_time.microseconds / 1000000.0 ); PyList_Append(retList, pyTuple); Py_XDECREF(pyTuple); } ret = vm_deallocate(task, (vm_address_t)thread_list, thread_count * sizeof(int)); if (ret != KERN_SUCCESS) { printf("vm_deallocate() failed\n"); } return retList; }
int load_thread_info(struct macos_proc * mp) { register kern_return_t rc = 0; register int i = 0; register int t_utime = 0; register int t_stime = 0; register int t_cpu = 0; register task_t the_task = mp->the_task; thread_array_t thread_list = NULL; /* * We need to load all of the threads for the given task so we can get the * performance data from them. */ mp->thread_count = 0; rc = task_threads(the_task, &thread_list, &(mp->thread_count)); if (rc != KERN_SUCCESS) { return (rc); } /* * now, for each of the threads, we need to sum the stats so we can * present the whole thing to the caller. */ for (i = 0; i < mp->thread_count; i++) { struct thread_basic_info t_info; unsigned int icount = THREAD_BASIC_INFO_COUNT; kern_return_t rc = 0; rc = thread_info(thread_list[i], THREAD_BASIC_INFO, (thread_info_t) & t_info, &icount); if (rc != KERN_SUCCESS) { puke("error: unable to load thread info for task (%s); rc = %d", strerror(errno), rc); return (rc); } t_utime += t_info.user_time.seconds; t_stime += t_info.system_time.seconds; t_cpu += t_info.cpu_usage; } vm_deallocate(mach_task_self(), (vm_address_t) thread_list, sizeof(thread_array_t) * (mp->thread_count)); /* * Now, we load the values in the structure above. */ RP(mp, user_time).seconds = t_utime; RP(mp, system_time).seconds = t_stime; RP(mp, cpu_usage) = t_cpu; return (KERN_SUCCESS); }
int main(int argc, char *argv[]) { kern_return_t ret; mach_port_name_t port; int pid; int c; thread_act_t *thread_array; mach_msg_type_number_t num_threads; int i; boolean_t interactive = FALSE; int tag; if (geteuid() != 0) { printf("Must be run as root\n"); exit(1); } /* Do switch parsing: */ while ((c = getopt (argc, argv, "hiv:")) != -1) { switch (c) { case 'i': interactive = TRUE; break; case 'v': verbosity = atoi(optarg); break; case 'h': case '?': default: usage(); } } argc -= optind; argv += optind; if (argc > 0) pid = atoi(*argv); ret = task_for_pid(mach_task_self(), pid, &port); if (ret != KERN_SUCCESS) err(1, "task_for_pid(,%d,) returned %d", pid, ret); mutter("task %p\n", port); ret = task_threads(port, &thread_array, &num_threads); if (ret != KERN_SUCCESS) err(1, "task_threads() returned %d", pid, ret); for (i = 0; i < num_threads; i++) { printf(" %d: thread 0x%08x tag %d\n", i, thread_array[i], thread_tag_get(thread_array[i])); } while (interactive) { printf("Enter new tag or <return> to skip or ^D to quit\n"); for (i = 0; i < num_threads; i++) { tag = thread_tag_get(thread_array[i]); printf(" %d: thread 0x%08x tag %d: ", i, thread_array[i], tag); fflush(stdout); (void) fgets(input, 20, stdin); if (feof(stdin)) { printf("\n"); interactive = FALSE; break; } if (strlen(input) > 1) { tag = atoi(input); thread_tag_set(thread_array[i], tag); } } } return 0; }
static gint64 get_process_stat_item (int pid, int pos, int sum, MonoProcessError *error) { #if defined(__APPLE__) double process_user_time = 0, process_system_time = 0;//, process_percent = 0; task_t task; struct task_basic_info t_info; mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT, th_count; thread_array_t th_array; size_t i; if (task_for_pid(mach_task_self(), pid, &task) != KERN_SUCCESS) RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND); if (task_info(task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count) != KERN_SUCCESS) { mach_port_deallocate (mach_task_self (), task); RET_ERROR (MONO_PROCESS_ERROR_OTHER); } if (task_threads(task, &th_array, &th_count) != KERN_SUCCESS) { mach_port_deallocate (mach_task_self (), task); RET_ERROR (MONO_PROCESS_ERROR_OTHER); } for (i = 0; i < th_count; i++) { double thread_user_time, thread_system_time;//, thread_percent; struct thread_basic_info th_info; mach_msg_type_number_t th_info_count = THREAD_BASIC_INFO_COUNT; if (thread_info(th_array[i], THREAD_BASIC_INFO, (thread_info_t)&th_info, &th_info_count) == KERN_SUCCESS) { thread_user_time = th_info.user_time.seconds + th_info.user_time.microseconds / 1e6; thread_system_time = th_info.system_time.seconds + th_info.system_time.microseconds / 1e6; //thread_percent = (double)th_info.cpu_usage / TH_USAGE_SCALE; process_user_time += thread_user_time; process_system_time += thread_system_time; //process_percent += th_percent; } } for (i = 0; i < th_count; i++) mach_port_deallocate(task, th_array[i]); mach_port_deallocate (mach_task_self (), task); process_user_time += t_info.user_time.seconds + t_info.user_time.microseconds / 1e6; process_system_time += t_info.system_time.seconds + t_info.system_time.microseconds / 1e6; if (pos == 10 && sum == TRUE) return (gint64)((process_user_time + process_system_time) * 10000000); else if (pos == 10) return (gint64)(process_user_time * 10000000); else if (pos == 11) return (gint64)(process_system_time * 10000000); return 0; #else char buf [512]; char *s, *end; FILE *f; int len, i; gint64 value; g_snprintf (buf, sizeof (buf), "/proc/%d/stat", pid); f = fopen (buf, "r"); if (!f) RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND); len = fread (buf, 1, sizeof (buf), f); fclose (f); if (len <= 0) RET_ERROR (MONO_PROCESS_ERROR_OTHER); s = strchr (buf, ')'); if (!s) RET_ERROR (MONO_PROCESS_ERROR_OTHER); s++; while (g_ascii_isspace (*s)) s++; if (!*s) RET_ERROR (MONO_PROCESS_ERROR_OTHER); /* skip the status char */ while (*s && !g_ascii_isspace (*s)) s++; if (!*s) RET_ERROR (MONO_PROCESS_ERROR_OTHER); for (i = 0; i < pos; ++i) { while (g_ascii_isspace (*s)) s++; if (!*s) RET_ERROR (MONO_PROCESS_ERROR_OTHER); while (*s && !g_ascii_isspace (*s)) s++; if (!*s) RET_ERROR (MONO_PROCESS_ERROR_OTHER); } /* we are finally at the needed item */ value = strtoul (s, &end, 0); /* add also the following value */ if (sum) { while (g_ascii_isspace (*s)) s++; if (!*s) RET_ERROR (MONO_PROCESS_ERROR_OTHER); value += strtoul (s, &end, 0); } if (error) *error = MONO_PROCESS_ERROR_NONE; return value; #endif }
int xnu_reg_read (RDebug *dbg, int type, ut8 *buf, int size) { int ret; int pid = dbg->pid; thread_array_t inferior_threads = NULL; unsigned int inferior_thread_count = 0; R_DEBUG_REG_T *regs = (R_DEBUG_REG_T*)buf; unsigned int gp_count = R_DEBUG_STATE_SZ; int tid = dbg->tid; ret = task_threads (pid_to_task (pid), &inferior_threads, &inferior_thread_count); if (ret != KERN_SUCCESS) { return R_FALSE; } if (tid < 0 || tid >= inferior_thread_count) { dbg->tid = tid = dbg->pid; } if (tid == dbg->pid) { tid = 0; } if (inferior_thread_count > 0) { /* TODO: allow to choose the thread */ gp_count = R_DEBUG_STATE_SZ; // XXX: kinda spaguetti coz multi-arch #if __i386__ || __x86_64__ switch (type) { case R_REG_TYPE_SEG: case R_REG_TYPE_FLG: case R_REG_TYPE_GPR: ret = THREAD_GET_STATE ((dbg->bits == R_SYS_BITS_64)? x86_THREAD_STATE: i386_THREAD_STATE); break; case R_REG_TYPE_DRX: ret = THREAD_GET_STATE ((dbg->bits == R_SYS_BITS_64)? x86_DEBUG_STATE64: x86_DEBUG_STATE32); break; } #elif __arm__ || __arm64__ || __aarch64__ switch (type) { case R_REG_TYPE_FLG: case R_REG_TYPE_GPR: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_GET_STATE (ARM_THREAD_STATE64); } else { ret = THREAD_GET_STATE (ARM_THREAD_STATE); } break; case R_REG_TYPE_DRX: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_GET_STATE (ARM_DEBUG_STATE64); } else { ret = THREAD_GET_STATE (ARM_DEBUG_STATE32); } break; } #else eprintf ("Unknown architecture\n"); #endif if (ret != KERN_SUCCESS) { eprintf ( "debug_getregs: Failed to get thread %d %d.error (%x). (%s)\n", (int)pid, pid_to_task (pid), (int)ret, MACH_ERROR_STRING (ret) ); perror ("thread_get_state"); return R_FALSE; } } else eprintf ("There are no threads!\n"); return sizeof (R_DEBUG_REG_T); }
int thread_stats(int pid, struct thread_basic_info *info, int *thread_count) { int i; kern_return_t status; kern_return_t status_dealloc; task_t p_task; thread_array_t thread_list, list; struct thread_basic_info threadInfo; unsigned int info_count = THREAD_BASIC_INFO_COUNT; /* Get the task pointer for the process. */ status = task_by_unix_pid( task_self(), pid, &p_task); if (status!=KERN_SUCCESS) { #ifdef DEBUG printf("pid = %i\n", pid); mach_error("Error calling task_by_unix_pid()", status); #endif return status; } /* Get the list of threads for the task. */ status = task_threads(p_task, &thread_list, thread_count); if (status!=KERN_SUCCESS) { #ifdef DEBUG mach_error("Error calling task_threads()", status); #endif return status; } /* Get the pctcpu value for each thread and sum the values */ info->user_time.seconds = 0; info->user_time.microseconds = 0; info->system_time.seconds = 0; info->system_time.microseconds = 0; info->cpu_usage = 0; info->sleep_time = 0; for(i=0; i<*thread_count; i++) { status = thread_info(thread_list[i], THREAD_BASIC_INFO, (thread_info_t)&threadInfo, &info_count); if (status!=KERN_SUCCESS) { #ifdef DEBUG mach_error("Error calling thread_info()", status); #endif break; } else { if(i==0) { info->base_priority = threadInfo.base_priority; info->cur_priority = threadInfo.cur_priority; info->run_state = threadInfo.run_state; info->flags = threadInfo.flags; info->suspend_count = threadInfo.suspend_count; info->sleep_time += threadInfo.sleep_time; } info->user_time.seconds += threadInfo.user_time.seconds; info->user_time.microseconds += threadInfo.user_time.microseconds; info->system_time.seconds += threadInfo.system_time.seconds; info->system_time.microseconds += threadInfo.system_time.microseconds; info->cpu_usage += threadInfo.cpu_usage; } } /* Deallocate the list of threads. */ status_dealloc = vm_deallocate(task_self(), (vm_address_t)thread_list, sizeof(thread_list)*(*thread_count)); if (status_dealloc != KERN_SUCCESS) { #ifdef DEBUG mach_error("Trouble freeing thread_list", status_dealloc); #endif status = status_dealloc; } return status; }
int main() { int pid; int gcount; long address; unsigned char * bytes; mach_port_t task; thread_act_port_array_t threadList; mach_msg_type_number_t threadCount; printf("PID to query, or 0 for this process: "); scanf("%d", &pid); if(pid == 0){ pid = getpid(); } printf("[i] OK, using PID: %d\n", pid); int retval = task_for_pid(mach_task_self(), pid, &task); if(retval!=KERN_SUCCESS){ fprintf(stderr, "[!] Failed to get task. Do you have perms?\n"); fprintf(stderr, "Error: %s\n", mach_error_string(retval)); return 1; } printf("[i] Querying thread list\n"); retval = task_threads(task, &threadList, &threadCount); if(retval!=KERN_SUCCESS){ fprintf(stderr, "[!] Failed to read thread list\n"); fprintf(stderr, "Error: %s\n", mach_error_string(retval)); return 1; } printf("[+] Thread Count: %d\n", threadCount); printf("Address to start reading from: "); scanf("%ld", &address); printf("Number of bytes to read:: "); scanf("%d", &gcount); printf("[i] Staring... \n"); bytes = malloc(gcount); //Allocate memory for reading time_t temptime = time(NULL); long tempaddr = address; while(address < pow(2, 63)){ retval = mach_vm_write(task, (mach_vm_address_t)address, (vm_offset_t)*bytes, gcount); if(retval == KERN_SUCCESS){ printf("Succesfull Read from @0x%016lx: %s\n", address, bytes); } if(time(NULL) - temptime > TICK){ //probably a load of overhead in calling time() long bytes_read = address - tempaddr; int seconds_elapsed = time(NULL) - temptime; float read_rate = bytes_read/seconds_elapsed/(1024*1024); printf("Tick... currently at 0x%016lx (%f MB/sec)\n", address, read_rate); temptime = time(NULL); tempaddr = address; } address += gcount; //move to next chunk } }