コード例 #1
0
ファイル: pcb.c プロジェクト: rohsaini/mkunity
/*
 * stack_attach: Attach a kernel stack to a thread.
 */
void
machine_kernel_stack_init(
	struct thread_shuttle *thread,
	void		(*continuation)(void))
{
    vm_offset_t	stack;
    struct ppc_kernel_state *kss;

    assert(thread->kernel_stack);
    stack = thread->kernel_stack;

#if	MACH_ASSERT
    if (watchacts & WA_PCB)
	printf("machine_kernel_stack_init(thr=%x,stk=%x,cont=%x)\n", thread,stack,continuation);
#endif	/* MACH_ASSERT */

    kss = STACK_IKS(stack);

    /*
     * Build a kernel state area + arg frame on the stack for the initial
     * switch into the thread. We also store a zero into the kernel
     * stack pointer so the trap code knows there is already a frame
     * on the kernel stack.
     */

    kss->lr = (unsigned int) continuation;
    kss->r1 = (vm_offset_t) ((int)kss - KF_SIZE);

    *((int*)kss->r1) = 0;	/* Zero the frame backpointer */

    thread->top_act->mact.pcb->ksp = 0;

}
コード例 #2
0
ファイル: db_trace.c プロジェクト: 0xffea/gnumach
long
db_i386_reg_value(
	struct	db_variable	*vp,
	db_expr_t		*valuep,
	int			flag,
	db_var_aux_param_t	ap)
{
	long			*dp = 0;
	db_expr_t		null_reg = 0;
	register thread_t	thread = ap->thread;
	extern unsigned		int_stack_high;

	if (db_option(ap->modif, 'u')) {
	    if (thread == THREAD_NULL) {
		if ((thread = current_thread()) == THREAD_NULL)
		    db_error("no user registers\n");
	    }
	    if (thread == current_thread()) {
		if (ddb_regs.cs & 0x3)
		    dp = vp->valuep;
		else if (ddb_regs.ebp < int_stack_high)
		    db_error("cannot get/set user registers in nested interrupt\n");
	    }
	} else {
	    if (thread == THREAD_NULL || thread == current_thread()) {
		dp = vp->valuep;
	    } else if ((thread->state & TH_SWAPPED) == 0 &&
			thread->kernel_stack) {
		dp = db_lookup_i386_kreg(vp->name,
				(long *)(STACK_IKS(thread->kernel_stack)));
		if (dp == 0)
		    dp = &null_reg;
	    } else if ((thread->state & TH_SWAPPED) &&
			thread->swap_func != thread_exception_return) {
/*.....this breaks t/t $taskN.0...*/
		/* only EIP is valid */
		if (vp->valuep == (long *) &ddb_regs.eip) {
		    dp = (long *)(&thread->swap_func);
		} else {
		    dp = &null_reg;
		}
	    }
	}
	if (dp == 0) {
	    if (thread->pcb == 0)
		db_error("no pcb\n");
	    dp = (long *)((long)(&thread->pcb->iss) +
		    ((long)vp->valuep - (long)&ddb_regs));
	}
	if (flag == DB_VAR_SET)
	    *dp = *valuep;
	else
	    *valuep = *dp;
}
コード例 #3
0
ファイル: kdp_vm.c プロジェクト: Prajna/xnu
static void
kern_collectth_state(thread_t thread, tir_t *t)
{
	vm_offset_t	header;
	int  hoffset, i ;
	mythread_state_flavor_t *flavors;
	struct thread_command	*tc;
	/*
	 *	Fill in thread command structure.
	 */
	header = t->header;
	hoffset = t->hoffset;
	flavors = t->flavors;
	
	tc = (struct thread_command *) (header + hoffset);
	tc->cmd = LC_THREAD;
	tc->cmdsize = (uint32_t)sizeof(struct thread_command) + t->tstate_size;
	hoffset += (uint32_t)sizeof(struct thread_command);
	/*
	 * Follow with a struct thread_state_flavor and
	 * the appropriate thread state struct for each
	 * thread state flavor.
	 */
	for (i = 0; i < kdp_mynum_flavors; i++) {
		*(mythread_state_flavor_t *)(header+hoffset) =
		    flavors[i];
		hoffset += (uint32_t)sizeof(mythread_state_flavor_t);
		/* Locate and obtain the non-volatile register context
		 * for this kernel thread. This should ideally be
		 * encapsulated in machine_thread_get_kern_state()
		 * but that routine appears to have been co-opted
		 * by CHUD to obtain pre-interrupt state.
		 */
		if (flavors[i].flavor == x86_THREAD_STATE64) {
			x86_thread_state64_t *tstate = (x86_thread_state64_t *) (header + hoffset);
			vm_offset_t kstack;
			x86_saved_state64_t *cpstate = current_cpu_datap()->cpu_fatal_trap_state;
			bzero(tstate, x86_THREAD_STATE64_COUNT * sizeof(int));
			if ((current_thread() == thread) && (cpstate != NULL)) {
				tstate->rax = cpstate->rax;
				tstate->rbx = cpstate->rbx;
				tstate->rcx = cpstate->rcx;
				tstate->rdx = cpstate->rdx;
				tstate->rdi = cpstate->rdi;
				tstate->rsi = cpstate->rsi;
				tstate->rbp = cpstate->rbp;
				tstate->r8 = cpstate->r8;
				tstate->r9 = cpstate->r9;
				tstate->r10 = cpstate->r10;
				tstate->r11 = cpstate->r11;
				tstate->r12 = cpstate->r12;
				tstate->r13 = cpstate->r13;
				tstate->r14 = cpstate->r14;
				tstate->r15 = cpstate->r15;
				tstate->rip = cpstate->isf.rip;
				tstate->rsp = cpstate->isf.rsp;
				tstate->rflags = cpstate->isf.rflags;
				tstate->cs = cpstate->isf.cs;
				tstate->fs = cpstate->fs;
				tstate->gs = cpstate->gs;
			} else if ((kstack = thread->kernel_stack) != 0){
				struct x86_kernel_state *iks = STACK_IKS(kstack);
				tstate->rbx = iks->k_rbx;
				tstate->rsp = iks->k_rsp;
				tstate->rbp = iks->k_rbp;
				tstate->r12 = iks->k_r12;
				tstate->r13 = iks->k_r13;
				tstate->r14 = iks->k_r14;
				tstate->r15 = iks->k_r15;
				tstate->rip = iks->k_rip;
			}
		}
		else if (machine_thread_get_kern_state(thread,
			flavors[i].flavor, (thread_state_t) (header+hoffset),
			&flavors[i].count) != KERN_SUCCESS)
			printf ("Failure in machine_thread_get_kern_state()\n");
		hoffset += (uint32_t)(flavors[i].count*sizeof(int));
	}

	t->hoffset = hoffset;
}
コード例 #4
0
ファイル: kdp_machdep.c プロジェクト: Prajna/xnu
int
machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
{
	uint64_t *tracebuf = (uint64_t *)tracepos;
	uint32_t fence = 0;
	addr64_t stackptr = 0;
	int	 framecount = 0;
	addr64_t init_rip = 0;
	addr64_t prevsp = 0;
	unsigned framesize = 2 * sizeof(addr64_t);

	if (user_p) {
		x86_saved_state64_t	*iss64;
		iss64 = USER_REGS64(thread);
		init_rip = iss64->isf.rip;
		stackptr = iss64->rbp;
		kdp_pmap = thread->task->map->pmap;
	}
	else {
		stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
		init_rip = STACK_IKS(thread->kernel_stack)->k_rip;
		kdp_pmap = 0;
	}

	*tracebuf++ = init_rip;

	for (framecount = 0; framecount < nframes; framecount++) {

		if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
			tracebuf--;
			break;
		}

		*tracebuf++ = stackptr;

		if (!stackptr || (stackptr == fence)){
			break;
		}

		if (stackptr & 0x0000003) {
			break;
		}

		if (stackptr <= prevsp) {
			break;
		}

		if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
			break;
		}
		tracebuf++;

		prevsp = stackptr;
		if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
			*tracebuf++ = 0;
			break;
		}
	}

	kdp_pmap = NULL;

	return (uint32_t) (((char *) tracebuf) - tracepos);
}
コード例 #5
0
ファイル: chud_thread_i386.c プロジェクト: SbIm/xnu-env
static kern_return_t do_kernel_backtrace(
	thread_t thread,
	struct x86_kernel_state *regs, 
	uint64_t *frames,
	mach_msg_type_number_t *start_idx,
	mach_msg_type_number_t max_idx)
{
	uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
    uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
	mach_msg_type_number_t ct = *start_idx;
	kern_return_t kr = KERN_FAILURE;

#if __LP64__
	uint64_t currPC = 0ULL;
	uint64_t currFP = 0ULL;
	uint64_t prevPC = 0ULL;
	uint64_t prevFP = 0ULL;
	if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
		return KERN_FAILURE;
	}
	if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
		return KERN_FAILURE;
	}
#else
	uint32_t currPC = 0U;
	uint32_t currFP = 0U;
	uint32_t prevPC = 0U;
	uint32_t prevFP = 0U;
	if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
		return KERN_FAILURE;
	}
	if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
		return KERN_FAILURE;
	}
#endif

	if(*start_idx >= max_idx)
		return KERN_RESOURCE_SHORTAGE;	// no frames traced
	
	if(!currPC) {
		return KERN_FAILURE;
	}

	frames[ct++] = (uint64_t)currPC;

	// build a backtrace of this kernel state
#if __LP64__
	while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
		// this is the address where caller lives in the user thread
		uint64_t caller = currFP + sizeof(uint64_t);
#else
	while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
		uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
#endif

        if(!currFP || !currPC) {
            currPC = 0;
            break;
        }

        if(ct >= max_idx) {
			*start_idx = ct;
            return KERN_RESOURCE_SHORTAGE;
        }

		/* read our caller */
		kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));

		if(kr != KERN_SUCCESS || !currPC) {
			currPC = 0UL;
			break;
		}

        /* 
         * retrive contents of the frame pointer and advance to the next stack
         * frame if it's valid 
         */
        prevFP = 0;
		kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));

#if __LP64__
        if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
#else
        if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
#endif
            frames[ct++] = (uint64_t)currPC;
            prevPC = currPC;
        }
        if(prevFP <= currFP) {
            break;
        } else {
            currFP = prevFP;
        }	
	}

	*start_idx = ct;
	return KERN_SUCCESS;
}



__private_extern__
kern_return_t chudxnu_thread_get_callstack64(
	thread_t		thread,
	uint64_t		*callstack,
	mach_msg_type_number_t	*count,
	boolean_t		user_only)
{
	kern_return_t kr = KERN_FAILURE;
    task_t task = thread->task;
    uint64_t currPC = 0ULL;
	boolean_t supervisor = FALSE;
    mach_msg_type_number_t bufferIndex = 0;
    mach_msg_type_number_t bufferMaxIndex = *count;
	x86_saved_state_t *tagged_regs = NULL;		// kernel register state
	x86_saved_state64_t *regs64 = NULL;
	x86_saved_state32_t *regs32 = NULL;
	x86_saved_state32_t *u_regs32 = NULL;
	x86_saved_state64_t *u_regs64 = NULL;
	struct x86_kernel_state *kregs = NULL;

	if(ml_at_interrupt_context()) {
		
		if(user_only) {
			/* can't backtrace user state on interrupt stack. */
			return KERN_FAILURE;
		}

		/* backtracing at interrupt context? */
		 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
			/* 
			 * Locate the registers for the interrupted thread, assuming it is
			 * current_thread(). 
			 */
			tagged_regs = current_cpu_datap()->cpu_int_state;
			
			if(is_saved_state64(tagged_regs)) {
				/* 64 bit registers */
				regs64 = saved_state64(tagged_regs);
				supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
			} else {
				/* 32 bit registers */
				regs32 = saved_state32(tagged_regs);
				supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
			}
		} 
	}

	if(!ml_at_interrupt_context() && kernel_task == task) {

		if(!thread->kernel_stack) {
			return KERN_FAILURE;
		}

		// Kernel thread not at interrupt context
		kregs = (struct x86_kernel_state *)NULL;

		// nofault read of the thread->kernel_stack pointer
		if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
			return KERN_FAILURE;
		}

		// Adjust to find the saved kernel state
		kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);

		supervisor = TRUE;
	} else if(!tagged_regs) {
		/* 
		 * not at interrupt context, or tracing a different thread than
		 * current_thread() at interrupt context 
		 */
		tagged_regs = USER_STATE(thread);
		if(is_saved_state64(tagged_regs)) {
			/* 64 bit registers */
			regs64 = saved_state64(tagged_regs);
			supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U); 
		} else {
			/* 32 bit registers */
			regs32 = saved_state32(tagged_regs);
			supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
		}
	}

	*count = 0; 

	if(supervisor) {
		// the caller only wants a user callstack.
		if(user_only) {
			// bail - we've only got kernel state
			return KERN_FAILURE;
		}
	} else {
		// regs32(64) is not in supervisor mode.
		u_regs32 = regs32;
		u_regs64 = regs64;
		regs32 = NULL;
		regs64 = NULL;
	}

	if (user_only) {
		/* we only want to backtrace the user mode */
		if(!(u_regs32 || u_regs64)) {
			/* no user state to look at */
			return KERN_FAILURE;
		}
	}

	/* 
	 * Order of preference for top of stack:
	 * 64 bit kernel state (not likely)
	 * 32 bit kernel state
	 * 64 bit user land state
	 * 32 bit user land state
	 */

	if(kregs) {
		/*
		 * nofault read of the registers from the kernel stack (as they can
		 * disappear on the fly).
		 */

#if __LP64__
		if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
			return KERN_FAILURE;
		}
#else
		uint32_t tmp;
		if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) {
			return KERN_FAILURE;
		}
		currPC = (uint64_t)tmp;
#endif
	} else if(regs64) {
		currPC = regs64->isf.rip;
	} else if(regs32) {
		currPC = (uint64_t) regs32->eip;
	} else if(u_regs64) {
		currPC = u_regs64->isf.rip;
	} else if(u_regs32) {
		currPC = (uint64_t) u_regs32->eip;
	}
	
	if(!currPC) {
		/* no top of the stack, bail out */
		return KERN_FAILURE;
	}

	bufferIndex = 0;
		
	if(bufferMaxIndex < 1) {
		*count = 0;
		return KERN_RESOURCE_SHORTAGE;
	}

	/* backtrace kernel */
	if(kregs) {
		addr64_t address = 0ULL;
		size_t size = 0UL;

		// do the backtrace
		kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);

		// and do a nofault read of (r|e)sp
#if __LP64__
		uint64_t rsp = 0ULL;
		size = sizeof(uint64_t);
		
		if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
			address = 0ULL;
		}
#else
		uint32_t rsp = 0ULL, tmp = 0ULL;
		size = sizeof(uint32_t);

		if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) {
			address = 0ULL;
		} else {
			address = (addr64_t)tmp;
		}
#endif

		if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = (uint64_t)rsp;
		}
	} else if(regs64) {
		uint64_t rsp = 0ULL;

		// backtrace the 64bit side.
		kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex, 
			bufferMaxIndex, TRUE);

		if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = rsp;
		}

	} else if(regs32) {
		uint32_t esp = 0UL;

		// backtrace the 32bit side.
		kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex, 
			bufferMaxIndex, TRUE);
		
		if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = (uint64_t) esp;
		}
	} else if(u_regs64) {
		/* backtrace user land */
		uint64_t rsp = 0ULL;
		
		kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex, 
			bufferMaxIndex, FALSE);

		if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = rsp;
		}

	} else if(u_regs32) {
		uint32_t esp = 0UL;
		
		kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex, 
			bufferMaxIndex, FALSE);

		if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = (uint64_t) esp;
		}
	}

    *count = bufferIndex;
    return kr;
}
コード例 #6
0
ファイル: kdp_machdep.c プロジェクト: wzw19890321/xnu-1
int
machine_trace_thread64(thread_t thread,
                       char * tracepos,
                       char * tracebound,
                       int nframes,
                       boolean_t user_p,
                       boolean_t trace_fp,
                       uint32_t * thread_trace_flags)
{
	uint64_t * tracebuf = (uint64_t *)tracepos;
	unsigned framesize  = (trace_fp ? 2 : 1) * sizeof(addr64_t);

	uint32_t fence             = 0;
	addr64_t stackptr          = 0;
	int framecount             = 0;
	addr64_t prev_rip          = 0;
	addr64_t prevsp            = 0;
	vm_offset_t kern_virt_addr = 0;
	vm_map_t bt_vm_map         = VM_MAP_NULL;

	if (thread->machine.iss == NULL) {
        // no register states to backtrace, probably thread is terminating
        return 0;
	}

	nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;

	if (user_p) {
		x86_saved_state64_t	*iss64;
		iss64 = USER_REGS64(thread);
		prev_rip = iss64->isf.rip;
		stackptr = iss64->rbp;
		bt_vm_map = thread->task->map;
	}
	else {
		stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
		prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
		prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
		bt_vm_map = kernel_map;
	}

	for (framecount = 0; framecount < nframes; framecount++) {

		*tracebuf++ = prev_rip;
		if (trace_fp) {
			*tracebuf++ = stackptr;
		}

		if (!stackptr || (stackptr == fence)) {
			break;
		}
		if (stackptr & 0x0000007) {
			break;
		}
		if (stackptr <= prevsp) {
			break;
		}

		kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64, bt_vm_map, thread_trace_flags);
		if (!kern_virt_addr) {
			if (thread_trace_flags) {
				*thread_trace_flags |= kThreadTruncatedBT;
			}
			break;
		}

		prev_rip = *(uint64_t *)kern_virt_addr;
		if (!user_p) {
			prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
		}

		prevsp = stackptr;

		kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);

		if (kern_virt_addr) {
			stackptr = *(uint64_t *)kern_virt_addr;
		} else {
			stackptr = 0;
			if (thread_trace_flags) {
				*thread_trace_flags |= kThreadTruncatedBT;
			}
		}
	}

	machine_trace_thread_clear_validation_cache();

	return (uint32_t) (((char *) tracebuf) - tracepos);
}
コード例 #7
0
ファイル: kdp_machdep.c プロジェクト: Prajna/xnu
int
machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
{
	uint32_t *tracebuf = (uint32_t *)tracepos;
	uint32_t fence = 0;
	uint32_t stackptr = 0;
	uint32_t stacklimit = 0xfc000000;
	int framecount = 0;
	uint32_t init_eip = 0;
	uint32_t prevsp = 0;
	uint32_t framesize = 2 * sizeof(vm_offset_t);
	
	if (user_p) {
	        x86_saved_state32_t	*iss32;
		
		iss32 = USER_REGS32(thread);

			init_eip = iss32->eip;
			stackptr = iss32->ebp;

		/* This bound isn't useful, but it doesn't hinder us*/
		stacklimit = 0xffffffff;
		kdp_pmap = thread->task->map->pmap;
	}
	else {
		/*Examine the i386_saved_state at the base of the kernel stack*/
		stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
		init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
	}

	*tracebuf++ = init_eip;

	for (framecount = 0; framecount < nframes; framecount++) {

		if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
			tracebuf--;
			break;
		}

		*tracebuf++ = stackptr;
/* Invalid frame, or hit fence */
		if (!stackptr || (stackptr == fence)) {
			break;
		}

		/* Unaligned frame */
		if (stackptr & 0x0000003) {
			break;
		}

		if (stackptr > stacklimit) {
			break;
		}
		
		if (stackptr <= prevsp) {
			break;
		}

		if (kdp_machine_vm_read((mach_vm_address_t)(stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
			break;
		}
		tracebuf++;
		
		prevsp = stackptr;
		if (kdp_machine_vm_read((mach_vm_address_t)stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
			*tracebuf++ = 0;
			break;
		}
	}

	kdp_pmap = 0;

	return (uint32_t) (((char *) tracebuf) - tracepos);
}