__private_extern__ kern_return_t chudxnu_thread_get_state(thread_act_t thr_act, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, boolean_t user_only) { if(thr_act==current_act()) { if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { struct savearea *sv; if(user_only) { sv = chudxnu_private_get_user_regs(); } else { sv = chudxnu_private_get_regs(); } return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv); } else if(flavor==PPC_FLOAT_STATE && user_only) { #warning chudxnu_thread_get_state() does not yet support supervisor FP return machine_thread_get_state(current_act(), flavor, tstate, count); } else if(flavor==PPC_VECTOR_STATE && user_only) { #warning chudxnu_thread_get_state() does not yet support supervisor VMX return machine_thread_get_state(current_act(), flavor, tstate, count); } else { *count = 0; return KERN_INVALID_ARGUMENT; } } else { return machine_thread_get_state(thr_act, flavor, tstate, count); } }
kern_return_t thread_get_state( register thread_t thread, int flavor, thread_state_t state, /* pointer to OUT array */ mach_msg_type_number_t *state_count) /*IN/OUT*/ { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread != current_thread()) { thread_hold(thread); thread_mtx_unlock(thread); if (thread_stop(thread, FALSE)) { thread_mtx_lock(thread); result = machine_thread_get_state( thread, flavor, state, state_count); thread_unstop(thread); } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); } else result = machine_thread_get_state( thread, flavor, state, state_count); } else if (thread->inspection) { result = machine_thread_get_state( thread, flavor, state, state_count); } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
/* * thread_getstatus: * * Get the status of the specified thread. */ kern_return_t thread_getstatus( register thread_act_t act, int flavor, thread_state_t tstate, mach_msg_type_number_t *count) { kern_return_t result = KERN_SUCCESS; thread_t thread; thread = act_lock_thread(act); if ( act != current_act() && (act->suspend_count == 0 || thread == THREAD_NULL || (thread->state & TH_RUN) || thread->top_act != act) ) result = KERN_FAILURE; if (result == KERN_SUCCESS) result = machine_thread_get_state(act, flavor, tstate, count); act_unlock_thread(act); return (result); }
/* * thread state should always be accessible by locking the thread * and copying it. The activation messes things up so for right * now if it's not the top of the chain, use a special handler to * get the information when the shuttle returns to the activation. */ kern_return_t thread_get_state( register thread_act_t act, int flavor, thread_state_t state, /* pointer to OUT array */ mach_msg_type_number_t *state_count) /*IN/OUT*/ { kern_return_t result = KERN_SUCCESS; thread_t thread; if (act == THR_ACT_NULL || act == current_act()) return (KERN_INVALID_ARGUMENT); thread = act_lock_thread(act); if (!act->active) { act_unlock_thread(act); return (KERN_TERMINATED); } thread_hold(act); for (;;) { thread_t thread1; if ( thread == THREAD_NULL || thread->top_act != act ) break; act_unlock_thread(act); if (!thread_stop(thread)) { result = KERN_ABORTED; (void)act_lock_thread(act); thread = THREAD_NULL; break; } thread1 = act_lock_thread(act); if (thread1 == thread) break; thread_unstop(thread); thread = thread1; } if (result == KERN_SUCCESS) result = machine_thread_get_state(act, flavor, state, state_count); if ( thread != THREAD_NULL && thread->top_act == act ) thread_unstop(thread); thread_release(act); act_unlock_thread(act); return (result); }
__private_extern__ kern_return_t chudxnu_thread_get_state( thread_t thread, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, boolean_t user_only) { if (user_only) { /* We can't get user state for kernel threads */ if (thread->task == kernel_task) return KERN_FAILURE; /* this properly handles deciding whether or not the thread is 64 bit or not */ return machine_thread_get_state(thread, flavor, tstate, count); } else { // i386 machine_thread_get_kern_state() is different from the PPC version which returns // the previous save area - user or kernel - rather than kernel or NULL if no kernel // interrupt state available // the real purpose of this branch is the following: // the user doesn't care if the thread states are user or kernel, he // just wants the thread state, so we need to determine the proper one // to return, kernel or user, for the given thread. if(thread == current_thread() && current_cpu_datap()->cpu_int_state) { // the above are conditions where we possibly can read the kernel // state. we still need to determine if this interrupt happened in // kernel or user context if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state && current_cpu_datap()->cpu_interrupt_level == 1) { // interrupt happened in user land return machine_thread_get_state(thread, flavor, tstate, count); } else { // kernel interrupt. return machine_thread_get_kern_state(thread, flavor, tstate, count); } } else { // get the user-mode thread state return machine_thread_get_state(thread, flavor, tstate, count); } } }
/** * act_thread_csave * * Save the current thread context, used for the internal uthread structure. * (We should also save VFP state...) */ void *act_thread_csave(void) { kern_return_t kret; mach_msg_type_number_t val; thread_t thr_act = current_thread(); struct arm_thread_state_t *ts; ts = (struct arm_thread_state_t *)kalloc(sizeof(struct arm_thread_state)); if (ts == (struct arm_thread_state_t *)NULL) return((void *)0); val = ARM_THREAD_STATE_COUNT; kret = machine_thread_get_state(thr_act, ARM_THREAD_STATE, (thread_state_t) ts, &val); if (kret != KERN_SUCCESS) { kfree(ts, sizeof(struct arm_thread_state)); return((void *)0); } return ts; }