Example #1
0
__private_extern__ kern_return_t
chudxnu_task_write(
				   task_t		task,
				   uint64_t	useraddr,
				   void		*kernaddr,
				   vm_size_t	size)
{
	kern_return_t ret = KERN_SUCCESS;
	boolean_t old_level;
	
	if(ml_at_interrupt_context()) {
		return KERN_FAILURE; // can't poke into tasks on interrupt stack
	}

	/*
	 * pmap layer requires interrupts to be on
	 */
	old_level = ml_set_interrupts_enabled(TRUE);
	
	if(current_task()==task) {    
		
		if(copyout(kernaddr, useraddr, size)) {
			ret = KERN_FAILURE;
		}
	} else {
		vm_map_t map = get_task_map(task);
		ret = vm_map_write_user(map, kernaddr, useraddr, size);
	}		
	
	ml_set_interrupts_enabled(old_level);

	return ret;
}
Example #2
0
IOReturn
IOI2CDevice::unlockI2CBus(
	UInt32	clientKey)
{
	IOReturn	status;

	if (ml_at_interrupt_context())
	{
		ERRLOG("IOI2CDevice@%lx::unlockI2CBus from primary interrupt context not permitted\n", fI2CAddress);
		return kIOReturnNotPermitted;
	}

	if (isI2COffline())
	{
		ERRLOG("IOI2CDevice@%lx::unlockI2CBus device is offline\n", fI2CAddress);
		return kIOReturnOffline;
	}

	status = fProvider->callPlatformFunction(symUnlockI2CBus, false, (void *)0, (void *)clientKey, (void *)0, (void *)0);
	if (kIOReturnSuccess != status)
	{
		ERRLOG("IOI2CDevice@%lx::unlockI2CBus controller unlock failed key:%lx status:0x%x\n", fI2CAddress, clientKey, status);
		return status;
	}

//	DLOG("IOI2CDevice@%lx::unlockI2CBus - device UNLOCKED key:%lx\n", fI2CAddress, clientKey);

	I2CUNLOCK;

	return status;
}
Example #3
0
/*
 * Interrupt manipulation
 * XXX dtrace_getipl() can be called from probe context.
 */
int
dtrace_getipl(void)
{
	/*
	 * XXX Drat, get_interrupt_level is MACH_KERNEL_PRIVATE
	 * in osfmk/kern/cpu_data.h
	 */
	/* return get_interrupt_level(); */
	return (ml_at_interrupt_context() ? 1: 0);
}
Example #4
0
/*
 * This method will bind a given thread to the requested CPU starting at the
 * next time quantum.  If the thread is the current thread, this method will
 * force a thread_block().  The result is that if you call this method on the
 * current thread, you will be on the requested CPU when this method returns.
 */
__private_extern__ kern_return_t
chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
{
    processor_t proc = NULL;

	if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
		return KERN_FAILURE;

	// temporary restriction until after phase 2 of the scheduler
	if(thread != current_thread())
		return KERN_FAILURE; 
	
	proc = cpu_to_processor(cpu);

	/* 
	 * Potentially racey, but mainly to prevent bind to shutdown
	 * processor.
	 */
	if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
			!(proc->state == PROCESSOR_SHUTDOWN)) {
		
		thread_bind(proc);

		/*
		 * If we're trying to bind the current thread, and
		 * we're not on the target cpu, and not at interrupt
		 * context, block the current thread to force a
		 * reschedule on the target CPU.
		 */
		if(thread == current_thread() && 
			!ml_at_interrupt_context() && cpu_number() != cpu) {
			(void)thread_block(THREAD_CONTINUE_NULL);
		}
		return KERN_SUCCESS;
	}
    return KERN_FAILURE;
}
bool IOPlatformExpert::atInterruptLevel(void)
{
  return ml_at_interrupt_context();
}
Example #6
0
static spl_t
panic_prologue(const char *str)
{
	spl_t	s;

	if (write_trace_on_panic && kdebug_enable) {
		if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
			ml_set_interrupts_enabled(TRUE);
			kdbg_dump_trace_to_file("/var/tmp/panic.trace");
		}
	}

	s = splhigh();
	disable_preemption();

#if	defined(__i386__) || defined(__x86_64__)
	/* Attempt to display the unparsed panic string */
	const char *tstr = str;

	kprintf("Panic initiated, string: ");
	while (tstr && *tstr)
		kprintf("%c", *tstr++);
	kprintf("\n");
#endif

	panic_safe();

	if( logPanicDataToScreen )
		disable_debug_output = FALSE;
		
	debug_mode = TRUE;

restart:
	PANIC_LOCK();

	if (panicstr) {
		if (cpu_number() != paniccpu) {
			PANIC_UNLOCK();
			/*
			 * Wait until message has been printed to identify correct
			 * cpu that made the first panic.
			 */
			while (panicwait)
				continue;
			goto restart;
	    } else {
			nestedpanic +=1;
			PANIC_UNLOCK();
			Debugger("double panic");
			printf("double panic:  We are hanging here...\n");
			panic_stop();
			/* NOTREACHED */
		}
	}
	panicstr = str;
	paniccpu = cpu_number();
	panicwait = 1;

	PANIC_UNLOCK();
	return(s);
}
Example #7
0
__private_extern__ boolean_t
chudxnu_at_interrupt_context(void)
{
    return ml_at_interrupt_context();
}
Example #8
0
/*
 * Interrupt manipulation
 * XXX dtrace_getipl() can be called from probe context.
 */
int
dtrace_getipl(void)
{
	return (ml_at_interrupt_context() ? 1: 0);
}
Example #9
0
static kern_return_t do_kernel_backtrace(
	thread_t thread,
	struct x86_kernel_state *regs, 
	uint64_t *frames,
	mach_msg_type_number_t *start_idx,
	mach_msg_type_number_t max_idx)
{
	uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
    uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
	mach_msg_type_number_t ct = *start_idx;
	kern_return_t kr = KERN_FAILURE;

#if __LP64__
	uint64_t currPC = 0ULL;
	uint64_t currFP = 0ULL;
	uint64_t prevPC = 0ULL;
	uint64_t prevFP = 0ULL;
	if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
		return KERN_FAILURE;
	}
	if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
		return KERN_FAILURE;
	}
#else
	uint32_t currPC = 0U;
	uint32_t currFP = 0U;
	uint32_t prevPC = 0U;
	uint32_t prevFP = 0U;
	if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
		return KERN_FAILURE;
	}
	if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
		return KERN_FAILURE;
	}
#endif

	if(*start_idx >= max_idx)
		return KERN_RESOURCE_SHORTAGE;	// no frames traced
	
	if(!currPC) {
		return KERN_FAILURE;
	}

	frames[ct++] = (uint64_t)currPC;

	// build a backtrace of this kernel state
#if __LP64__
	while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
		// this is the address where caller lives in the user thread
		uint64_t caller = currFP + sizeof(uint64_t);
#else
	while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
		uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
#endif

        if(!currFP || !currPC) {
            currPC = 0;
            break;
        }

        if(ct >= max_idx) {
			*start_idx = ct;
            return KERN_RESOURCE_SHORTAGE;
        }

		/* read our caller */
		kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));

		if(kr != KERN_SUCCESS || !currPC) {
			currPC = 0UL;
			break;
		}

        /* 
         * retrive contents of the frame pointer and advance to the next stack
         * frame if it's valid 
         */
        prevFP = 0;
		kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));

#if __LP64__
        if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
#else
        if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
#endif
            frames[ct++] = (uint64_t)currPC;
            prevPC = currPC;
        }
        if(prevFP <= currFP) {
            break;
        } else {
            currFP = prevFP;
        }	
	}

	*start_idx = ct;
	return KERN_SUCCESS;
}



__private_extern__
kern_return_t chudxnu_thread_get_callstack64(
	thread_t		thread,
	uint64_t		*callstack,
	mach_msg_type_number_t	*count,
	boolean_t		user_only)
{
	kern_return_t kr = KERN_FAILURE;
    task_t task = thread->task;
    uint64_t currPC = 0ULL;
	boolean_t supervisor = FALSE;
    mach_msg_type_number_t bufferIndex = 0;
    mach_msg_type_number_t bufferMaxIndex = *count;
	x86_saved_state_t *tagged_regs = NULL;		// kernel register state
	x86_saved_state64_t *regs64 = NULL;
	x86_saved_state32_t *regs32 = NULL;
	x86_saved_state32_t *u_regs32 = NULL;
	x86_saved_state64_t *u_regs64 = NULL;
	struct x86_kernel_state *kregs = NULL;

	if(ml_at_interrupt_context()) {
		
		if(user_only) {
			/* can't backtrace user state on interrupt stack. */
			return KERN_FAILURE;
		}

		/* backtracing at interrupt context? */
		 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
			/* 
			 * Locate the registers for the interrupted thread, assuming it is
			 * current_thread(). 
			 */
			tagged_regs = current_cpu_datap()->cpu_int_state;
			
			if(is_saved_state64(tagged_regs)) {
				/* 64 bit registers */
				regs64 = saved_state64(tagged_regs);
				supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
			} else {
				/* 32 bit registers */
				regs32 = saved_state32(tagged_regs);
				supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
			}
		} 
	}

	if(!ml_at_interrupt_context() && kernel_task == task) {

		if(!thread->kernel_stack) {
			return KERN_FAILURE;
		}

		// Kernel thread not at interrupt context
		kregs = (struct x86_kernel_state *)NULL;

		// nofault read of the thread->kernel_stack pointer
		if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
			return KERN_FAILURE;
		}

		// Adjust to find the saved kernel state
		kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);

		supervisor = TRUE;
	} else if(!tagged_regs) {
		/* 
		 * not at interrupt context, or tracing a different thread than
		 * current_thread() at interrupt context 
		 */
		tagged_regs = USER_STATE(thread);
		if(is_saved_state64(tagged_regs)) {
			/* 64 bit registers */
			regs64 = saved_state64(tagged_regs);
			supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U); 
		} else {
			/* 32 bit registers */
			regs32 = saved_state32(tagged_regs);
			supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
		}
	}

	*count = 0; 

	if(supervisor) {
		// the caller only wants a user callstack.
		if(user_only) {
			// bail - we've only got kernel state
			return KERN_FAILURE;
		}
	} else {
		// regs32(64) is not in supervisor mode.
		u_regs32 = regs32;
		u_regs64 = regs64;
		regs32 = NULL;
		regs64 = NULL;
	}

	if (user_only) {
		/* we only want to backtrace the user mode */
		if(!(u_regs32 || u_regs64)) {
			/* no user state to look at */
			return KERN_FAILURE;
		}
	}

	/* 
	 * Order of preference for top of stack:
	 * 64 bit kernel state (not likely)
	 * 32 bit kernel state
	 * 64 bit user land state
	 * 32 bit user land state
	 */

	if(kregs) {
		/*
		 * nofault read of the registers from the kernel stack (as they can
		 * disappear on the fly).
		 */

#if __LP64__
		if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
			return KERN_FAILURE;
		}
#else
		uint32_t tmp;
		if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) {
			return KERN_FAILURE;
		}
		currPC = (uint64_t)tmp;
#endif
	} else if(regs64) {
		currPC = regs64->isf.rip;
	} else if(regs32) {
		currPC = (uint64_t) regs32->eip;
	} else if(u_regs64) {
		currPC = u_regs64->isf.rip;
	} else if(u_regs32) {
		currPC = (uint64_t) u_regs32->eip;
	}
	
	if(!currPC) {
		/* no top of the stack, bail out */
		return KERN_FAILURE;
	}

	bufferIndex = 0;
		
	if(bufferMaxIndex < 1) {
		*count = 0;
		return KERN_RESOURCE_SHORTAGE;
	}

	/* backtrace kernel */
	if(kregs) {
		addr64_t address = 0ULL;
		size_t size = 0UL;

		// do the backtrace
		kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);

		// and do a nofault read of (r|e)sp
#if __LP64__
		uint64_t rsp = 0ULL;
		size = sizeof(uint64_t);
		
		if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
			address = 0ULL;
		}
#else
		uint32_t rsp = 0ULL, tmp = 0ULL;
		size = sizeof(uint32_t);

		if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) {
			address = 0ULL;
		} else {
			address = (addr64_t)tmp;
		}
#endif

		if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = (uint64_t)rsp;
		}
	} else if(regs64) {
		uint64_t rsp = 0ULL;

		// backtrace the 64bit side.
		kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex, 
			bufferMaxIndex, TRUE);

		if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = rsp;
		}

	} else if(regs32) {
		uint32_t esp = 0UL;

		// backtrace the 32bit side.
		kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex, 
			bufferMaxIndex, TRUE);
		
		if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = (uint64_t) esp;
		}
	} else if(u_regs64) {
		/* backtrace user land */
		uint64_t rsp = 0ULL;
		
		kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex, 
			bufferMaxIndex, FALSE);

		if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = rsp;
		}

	} else if(u_regs32) {
		uint32_t esp = 0UL;
		
		kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex, 
			bufferMaxIndex, FALSE);

		if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) && 
			bufferIndex < bufferMaxIndex) {
			callstack[bufferIndex++] = (uint64_t) esp;
		}
	}

    *count = bufferIndex;
    return kr;
}
Example #10
0
__private_extern__
kern_return_t chudxnu_current_thread_get_callstack(uint32_t *callStack,
                                                   mach_msg_type_number_t *count,
                                                   boolean_t user_only)
{
    kern_return_t kr;
    vm_address_t nextFramePointer = 0;
    vm_address_t currPC, currLR, currR0;
    vm_address_t framePointer;
    vm_address_t prevPC = 0;
    vm_address_t kernStackMin = min_valid_stack_address();
    vm_address_t kernStackMax = max_valid_stack_address();
    unsigned int *buffer = callStack;
    int bufferIndex = 0;
    int bufferMaxIndex = *count;
    boolean_t supervisor;
    struct savearea *sv;

    if(user_only) {
        sv = chudxnu_private_get_user_regs();
    } else {
        sv = chudxnu_private_get_regs();
    }

    if(!sv) {
        *count = 0;
        return KERN_FAILURE;
    }

    supervisor = SUPERVISOR_MODE(sv->save_srr1);

    if(!supervisor && ml_at_interrupt_context()) { // can't do copyin() if on interrupt stack
        *count = 0;
        return KERN_FAILURE;
    }

    bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
    if(bufferMaxIndex<2) {
        *count = 0;
        return KERN_RESOURCE_SHORTAGE;
    }

    currPC = sv->save_srr0;
    framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC)  */
    currLR = sv->save_lr;
    currR0 = sv->save_r0;

    bufferIndex = 0;  // start with a stack of size zero
    buffer[bufferIndex++] = currPC; // save PC in position 0.

    // Now, fill buffer with stack backtraces.
    while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
        vm_address_t pc = 0;
        // Above the stack pointer, the following values are saved:
        // saved LR
        // saved CR
        // saved SP
        //-> SP
        // Here, we'll get the lr from the stack.
        volatile vm_address_t fp_link = (vm_address_t)(((unsigned *)framePointer)+FP_LINK_OFFSET);

        // Note that we read the pc even for the first stack frame (which, in theory,
        // is always empty because the callee fills it in just before it lowers the
        // stack.  However, if we catch the program in between filling in the return
        // address and lowering the stack, we want to still have a valid backtrace.
        // FixupStack correctly disregards this value if necessary.

        if(supervisor) {
            kr = chudxnu_private_task_read_bytes(kernel_task, fp_link, sizeof(unsigned int), &pc);
        } else {
            kr = chudxnu_private_task_read_bytes(current_task(), fp_link, sizeof(unsigned int), &pc);
        }
        if(kr!=KERN_SUCCESS) {
            //        IOLog("task_read_callstack: unable to read framePointer: %08x\n",framePointer);
            pc = 0;
            break;
        }

        // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid

        if(supervisor) {
            kr = chudxnu_private_task_read_bytes(kernel_task, framePointer, sizeof(unsigned int), &nextFramePointer);
        } else {
            kr = chudxnu_private_task_read_bytes(current_task(), framePointer, sizeof(unsigned int), &nextFramePointer);
        }
        if(kr!=KERN_SUCCESS) {
            nextFramePointer = 0;
        }

        if(nextFramePointer) {
            buffer[bufferIndex++] = pc;
            prevPC = pc;
        }
    
        if(nextFramePointer<framePointer) {
            break;
        } else {
	    framePointer = nextFramePointer;
	}
    }

    if(bufferIndex>=bufferMaxIndex) {
        *count = 0;
        return KERN_RESOURCE_SHORTAGE;
    }

    // Save link register and R0 at bottom of stack.  This means that we won't worry
    // about these values messing up stack compression.  These end up being used
    // by FixupStack.
    buffer[bufferIndex++] = currLR;
    buffer[bufferIndex++] = currR0;

    *count = bufferIndex;
    return KERN_SUCCESS;
}
Example #11
0
IOReturn
IOI2CDevice::lockI2CBus(
	UInt32	*clientKeyRef)
{
	IOReturn	status = kIOReturnSuccess;
	UInt32		clientLockKey;

//	DLOG("IOI2CDevice@%lx::lockI2CBus\n", fI2CAddress);

	if (clientKeyRef == NULL)
	{
		ERRLOG("IOI2CDevice@%lx::lockI2CBus bad args\n", fI2CAddress);
		return kIOReturnBadArgument;
	}

	if (ml_at_interrupt_context())
	{
		ERRLOG("IOI2CDevice@%lx::lockI2CBus from primary interrupt context not permitted\n", fI2CAddress);
		*clientKeyRef = kIOI2C_CLIENT_KEY_INVALID;
		return kIOReturnNotPermitted;
	}

	if (isI2COffline())
	{
		ERRLOG("IOI2CDevice@%lx::lockI2CBus device is offline\n", fI2CAddress);
		*clientKeyRef = kIOI2C_CLIENT_KEY_INVALID;
		return kIOReturnOffline;
	}

	I2CLOCK;

//	DLOG("IOI2CDevice@%lx::lockI2CBus - device LOCKED\n", fI2CAddress);

	// Cancel any pending clients if power has been dropped.
	if (isI2COffline())
	{
		ERRLOG("IOI2CDevice@%lx::lockI2CBus lock canceled: device is offline\n", fI2CAddress);

		I2CUNLOCK;

		*clientKeyRef = kIOI2C_CLIENT_KEY_INVALID;
		return kIOReturnOffline;
	}

	status = fProvider->callPlatformFunction(symLockI2CBus, false, (void *)0, (void *)&clientLockKey, (void *)0, (void *)0);
	if (kIOReturnSuccess != status)
	{
		ERRLOG("IOI2CDevice@%lx::lockI2CBus - lock canceled: controller lockI2CBus failed:0x%lx\n", fI2CAddress, (UInt32)status);

		I2CUNLOCK;

		*clientKeyRef = kIOI2C_CLIENT_KEY_INVALID;
		return status;
	}

	// Lock Succeeded. Return key.
	*clientKeyRef = clientLockKey;
//	DLOG("IOI2CDevice@%lx::lockI2C key: %lx\n", fI2CAddress, clientLockKey);

	return status;
}