Пример #1
0
Файл: debug.c Проект: Prajna/xnu
__private_extern__ void panic_display_zprint()
{
	if(panic_include_zprint == TRUE) {

		unsigned int	i;
		struct zone	zone_copy;

		if(first_zone!=NULL) {
			if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
				for (i = 0; i < num_zones; i++) {
					if(zone_copy.cur_size > (1024*1024)) {
						kdb_printf("%.20s:%lu\n",zone_copy.zone_name,(uintptr_t)zone_copy.cur_size);
					}	
					
					if(zone_copy.next_zone == NULL) {
						break;
					}

					if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) {
						break;
					}
				}
			}
		}

		kdb_printf("Kernel Stacks:%lu\n",(uintptr_t)(kernel_stack_size * stack_total));

#if defined(__i386__) || defined (__x86_64__)
		kdb_printf("PageTables:%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
#endif

		kdb_printf("Kalloc.Large:%lu\n",(uintptr_t)kalloc_large_total);
	}
}
Пример #2
0
Файл: debug.c Проект: Prajna/xnu
static void
panic_display_process_name(void) {
	char proc_name[32] = "Unknown";
	task_t ctask = 0;
	void *cbsd_info = 0;

	if (ml_nofault_copy((vm_offset_t)&current_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t))
		if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(&ctask->bsd_info)) == sizeof(&ctask->bsd_info))
			if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0))
				proc_name[sizeof(proc_name) - 1] = '\0';
	kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name);
}
Пример #3
0
static void
panic_display_process_name(void) {
	/* because of scoping issues len(p_comm) from proc_t is hard coded here */
	char proc_name[17] = "Unknown";
	task_t ctask = 0;
	void *cbsd_info = 0;

	if (ml_nofault_copy((vm_offset_t)&current_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t))
		if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(cbsd_info)) == sizeof(cbsd_info))
			if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0))
				proc_name[sizeof(proc_name) - 1] = '\0';
	kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name);
}
Пример #4
0
Файл: fbt.c Проект: 0xffea/xnu
/*ARGSUSED*/
static void
fbt_resume(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id)
	fbt_probe_t *fbt = parg;
	struct modctl *ctl = fbt->fbtp_ctl;

#if defined (__ppc__) || defined (__ppc64__)
	dtrace_casptr(&tempDTraceIntHook, NULL, fbt_perfIntCallback);
	if (tempDTraceIntHook != (perfCallback)fbt_perfIntCallback) {
		if (fbt_verbose) {
			cmn_err(CE_NOTE, "fbt_enable is failing for probe %s "
			    "in module %s: tempDTraceIntHook already occupied.",
			    fbt->fbtp_name, ctl->mod_modname);
		}
		return;
	}
#endif
	
	dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback);
	if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) {
		if (fbt_verbose) {
			cmn_err(CE_NOTE, "fbt_resume is failing for probe %s "
			    "in module %s: tempDTraceTrapHook already occupied.",
			    fbt->fbtp_name, ctl->mod_modname);
		}
		return;
	}
	
	for (; fbt != NULL; fbt = fbt->fbtp_next)
		(void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, 
								sizeof(fbt->fbtp_patchval));
		
	dtrace_membar_consumer();
}
Пример #5
0
/*
 * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator.
 * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c
 */
__private_extern__ void panic_display_ztrace(void)
{
	if(panic_include_ztrace == TRUE) {
		unsigned int i = 0;
 		boolean_t keepsyms = FALSE;

		PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
		struct ztrace top_ztrace_copy;
		
		/* Make sure not to trip another panic if there's something wrong with memory */
		if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) {
			kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size);
			/* Print the backtrace addresses */
			for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) {
				kdb_printf("%p ", top_ztrace_copy.zt_stack[i]);
				if (keepsyms) {
					panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]);
				}
				kdb_printf("\n");
			}
			/* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */
			kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth);
		}
		else {
			kdb_printf("\nCan't access top_ztrace...\n");
		}
		kdb_printf("\n");
	}
}
Пример #6
0
/*ARGSUSED*/
static void
sdt_disable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id)
	sdt_probe_t *sdp = parg;
	struct modctl *ctl = sdp->sdp_ctl;

	ctl->mod_nenabled--;

	if (!ctl->mod_loaded || ctl->mod_loadcnt != sdp->sdp_loadcnt)
		goto err;

	while (sdp != NULL) {
		(void)ml_nofault_copy( (vm_offset_t)&sdp->sdp_savedval, (vm_offset_t)sdp->sdp_patchpoint, 
		                       (vm_size_t)sizeof(sdp->sdp_savedval));
		/*
		 * Make the patched instruction visible via a data + instruction
		 * cache flush on platforms that need it
		 */
		flush_dcache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_savedval), 0);
		invalidate_icache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_savedval), 0);
		sdp = sdp->sdp_next;
	}

err:
	;
}
Пример #7
0
/*ARGSUSED*/
static int
machtrace_enable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg) /* __APPLE__ */
    
	int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg);
	int enabled = (machtrace_sysent[sysnum].stsy_entry != DTRACE_IDNONE ||
			machtrace_sysent[sysnum].stsy_return != DTRACE_IDNONE);

	if (SYSTRACE_ISENTRY((uintptr_t)parg)) {
		machtrace_sysent[sysnum].stsy_entry = id;
	} else {
		machtrace_sysent[sysnum].stsy_return = id;
	}

	if (enabled) {
	    ASSERT(mach_trap_table[sysnum].mach_trap_function == (void *)dtrace_machtrace_syscall);
	    return(0);
	}

	lck_mtx_lock(&dtrace_systrace_lock);

	if (mach_trap_table[sysnum].mach_trap_function == machtrace_sysent[sysnum].stsy_underlying) {
		vm_offset_t dss = (vm_offset_t)&dtrace_machtrace_syscall;
		ml_nofault_copy((vm_offset_t)&dss, (vm_offset_t)&mach_trap_table[sysnum].mach_trap_function, sizeof(vm_offset_t));
	}

	lck_mtx_unlock(&dtrace_systrace_lock);

	return(0);
}
Пример #8
0
/*ARGSUSED*/
static void
systrace_disable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id) /* __APPLE__ */
    
	int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg);
	int disable = (systrace_sysent[sysnum].stsy_entry == DTRACE_IDNONE ||
	    systrace_sysent[sysnum].stsy_return == DTRACE_IDNONE);

	if (disable) {
		lck_mtx_lock(&dtrace_systrace_lock);
		if (sysent[sysnum].sy_callc == dtrace_systrace_syscall)
			ml_nofault_copy((vm_offset_t)&systrace_sysent[sysnum].stsy_underlying, (vm_offset_t)&sysent[sysnum].sy_callc, sizeof(systrace_sysent[sysnum].stsy_underlying));
		lck_mtx_unlock(&dtrace_systrace_lock);

#ifdef _SYSCALL32_IMPL
		(void) casptr(&sysent32[sysnum].sy_callc,
		    (void *)dtrace_systrace_syscall32,
		    (void *)systrace_sysent32[sysnum].stsy_underlying);
#endif
	}

	if (SYSTRACE_ISENTRY((uintptr_t)parg)) {
		systrace_sysent[sysnum].stsy_entry = DTRACE_IDNONE;
#ifdef _SYSCALL32_IMPL
		systrace_sysent32[sysnum].stsy_entry = DTRACE_IDNONE;
#endif
	} else {
		systrace_sysent[sysnum].stsy_return = DTRACE_IDNONE;
#ifdef _SYSCALL32_IMPL
		systrace_sysent32[sysnum].stsy_return = DTRACE_IDNONE;
#endif
	}
}
Пример #9
0
/*ARGSUSED*/
static void
fbt_suspend(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id)
	fbt_probe_t *fbt = parg;
	struct modctl *ctl = NULL;

	for (; fbt != NULL; fbt = fbt->fbtp_next) {
	    ctl = fbt->fbtp_ctl;

	    ASSERT(ctl->mod_nenabled > 0);
	    if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt))
		continue;

	    (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_savedval, (vm_offset_t)fbt->fbtp_patchpoint, 
								sizeof(fbt->fbtp_savedval));
		
		/*
		 * Make the patched instruction visible via a data + instruction
		 * cache flush for the platforms that need it
		 */
		flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_savedval), 0);
		invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_savedval), 0);
		
		fbt->fbtp_currentval = fbt->fbtp_savedval;
	}
	
	dtrace_membar_consumer();
}
Пример #10
0
/*ARGSUSED*/
static void
machtrace_disable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id) /* __APPLE__ */
      
	int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg);
	int disable = (machtrace_sysent[sysnum].stsy_entry == DTRACE_IDNONE ||
			machtrace_sysent[sysnum].stsy_return == DTRACE_IDNONE);

	if (disable) {

		lck_mtx_lock(&dtrace_systrace_lock);

		if (mach_trap_table[sysnum].mach_trap_function == (mach_call_t)dtrace_machtrace_syscall) {
			ml_nofault_copy((vm_offset_t)&machtrace_sysent[sysnum].stsy_underlying, (vm_offset_t)&mach_trap_table[sysnum].mach_trap_function, sizeof(vm_offset_t));
		}
		lck_mtx_unlock(&dtrace_systrace_lock);
	}

	if (SYSTRACE_ISENTRY((uintptr_t)parg)) {
		machtrace_sysent[sysnum].stsy_entry = DTRACE_IDNONE;
	} else {
		machtrace_sysent[sysnum].stsy_return = DTRACE_IDNONE;
	}
}
Пример #11
0
Файл: debug.c Проект: Prajna/xnu
static void panic_display_kernel_uuid(void) {
	char tmp_kernel_uuid[sizeof(kernel_uuid)];

	if (ml_nofault_copy((vm_offset_t) &kernel_uuid, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid)) != sizeof(kernel_uuid))
		return;

	if (tmp_kernel_uuid[0] != '\0')
		kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid);
}
Пример #12
0
__private_extern__ kern_return_t
chudxnu_kern_write(
				   vm_offset_t	dstaddr,
				   void		*srcaddr,
				   vm_size_t	size)
{
	return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
			KERN_SUCCESS: KERN_FAILURE);
}
Пример #13
0
__private_extern__ void panic_display_zprint()
{
	if(panic_include_zprint == TRUE) {

		unsigned int	i;
		struct zone	zone_copy;

		kdb_printf("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size");
		if(first_zone!=NULL) {
			if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
				for (i = 0; i < num_zones; i++) {
					if(zone_copy.cur_size > (1024*1024)) {
						kdb_printf("%-20s %10lu %10lu\n",zone_copy.zone_name, (uintptr_t)zone_copy.cur_size,(uintptr_t)(zone_copy.countfree * zone_copy.elem_size));
					}	
					
					if(zone_copy.next_zone == NULL) {
						break;
					}

					if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) {
						break;
					}
				}
			}
		}

		kdb_printf("%-20s %10lu\n", "Kernel Stacks", (uintptr_t)(kernel_stack_size * stack_total));

#if defined(__i386__) || defined (__x86_64__)
		kdb_printf("%-20s %10lu\n", "PageTables",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
#endif

		kdb_printf("%-20s %10lu\n", "Kalloc.Large", (uintptr_t)kalloc_large_total);
		if (panic_kext_memory_info) {
			mach_memory_info_t *mem_info = (mach_memory_info_t *)panic_kext_memory_info;
			kdb_printf("\n%-5s %10s\n", "Kmod", "Size");
			for (i = 0; i < VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT; i++) {
				if (((mem_info[i].flags & VM_KERN_SITE_TYPE) == VM_KERN_SITE_KMOD) && (mem_info[i].size > (1024 * 1024))) {
					kdb_printf("%-5lld %10lld\n", mem_info[i].site, mem_info[i].size);
				}
			}
		}
	}
}
Пример #14
0
Файл: debug.c Проект: Prajna/xnu
static void panic_display_model_name(void) {
	char tmp_model_name[sizeof(model_name)];

	if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name))
		return;

	tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';

	if (tmp_model_name[0] != 0)
		kdb_printf("System model name: %s\n", tmp_model_name);
}
Пример #15
0
Файл: fbt.c Проект: 0xffea/xnu
/*ARGSUSED*/
static void
fbt_suspend(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id)
	fbt_probe_t *fbt = parg;

	for (; fbt != NULL; fbt = fbt->fbtp_next)
		(void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_savedval, (vm_offset_t)fbt->fbtp_patchpoint, 
								sizeof(fbt->fbtp_savedval));
		
	dtrace_membar_consumer();
}
Пример #16
0
/*ARGSUSED*/
static void
fbt_resume(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id)
	fbt_probe_t *fbt = parg;
	struct modctl *ctl = NULL;

	for (; fbt != NULL; fbt = fbt->fbtp_next) {
	    ctl = fbt->fbtp_ctl;

	    ASSERT(ctl->mod_nenabled > 0);
	    if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt))
		continue;
	
	    dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback);
	    if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) {
		if (fbt_verbose) {
			cmn_err(CE_NOTE, "fbt_resume is failing for probe %s "
			    "in module %s: tempDTraceTrapHook already occupied.",
			    fbt->fbtp_name, ctl->mod_modname);
		}
		return;
	    }
	
	    (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, 
								sizeof(fbt->fbtp_patchval));

#if CONFIG_EMBEDDED
		/*
		 * Make the patched instruction visible via a data + instruction cache flush.
		 */
		flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0);
		invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0);
#endif
		
  	    fbt->fbtp_currentval = fbt->fbtp_patchval;
	}
	
	dtrace_membar_consumer();
}
Пример #17
0
static
void lockstat_hot_patch(boolean_t active, int ls_probe)
{
#pragma unused(active)
	int i;

	/*
	 * Loop through entire table, in case there are
	 * multiple patch points per probe. 
	 */
	for (i = 0; assembly_probes[i].lsap_patch_point; i++) {
		if (ls_probe == assembly_probes[i].lsap_probe)
#if defined(__x86_64__)
		{			
			uint8_t instr;
			instr = (active ? NOP : RET );
			(void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i].lsap_patch_point), 
								sizeof(instr));
		}
#endif
	} /* for */
}
Пример #18
0
/*ARGSUSED*/
static int
systrace_enable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg) /* __APPLE__ */
    
	int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg);
	int enabled = (systrace_sysent[sysnum].stsy_entry != DTRACE_IDNONE ||
	    systrace_sysent[sysnum].stsy_return != DTRACE_IDNONE);

	if (SYSTRACE_ISENTRY((uintptr_t)parg)) {
		systrace_sysent[sysnum].stsy_entry = id;
#ifdef _SYSCALL32_IMPL
		systrace_sysent32[sysnum].stsy_entry = id;
#endif
	} else {
		systrace_sysent[sysnum].stsy_return = id;
#ifdef _SYSCALL32_IMPL
		systrace_sysent32[sysnum].stsy_return = id;
#endif
	}

	if (enabled) {
		ASSERT(sysent[sysnum].sy_callc == dtrace_systrace_syscall);
		return(0);
	}
#ifdef _SYSCALL32_IMPL
	(void) casptr(&sysent32[sysnum].sy_callc,
	    (void *)systrace_sysent32[sysnum].stsy_underlying,
	    (void *)dtrace_systrace_syscall32);
#endif

	lck_mtx_lock(&dtrace_systrace_lock);
	if (sysent[sysnum].sy_callc == systrace_sysent[sysnum].stsy_underlying) {
		vm_offset_t dss = (vm_offset_t)&dtrace_systrace_syscall;
		ml_nofault_copy((vm_offset_t)&dss, (vm_offset_t)&sysent[sysnum].sy_callc, sizeof(vm_offset_t));
	}
	lck_mtx_unlock(&dtrace_systrace_lock);
	return (0);
}
Пример #19
0
/*ARGSUSED*/
static int
sdt_enable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id)
	sdt_probe_t *sdp = parg;
	struct modctl *ctl = sdp->sdp_ctl;

	ctl->mod_nenabled++;

	/*
	 * If this module has disappeared since we discovered its probes,
	 * refuse to enable it.
	 */
	if (!ctl->mod_loaded) {
		if (sdt_verbose) {
			cmn_err(CE_NOTE, "sdt is failing for probe %s "
			    "(module %s unloaded)",
			    sdp->sdp_name, ctl->mod_modname);
		}
		goto err;
	}

	/*
	 * Now check that our modctl has the expected load count.  If it
	 * doesn't, this module must have been unloaded and reloaded -- and
	 * we're not going to touch it.
	 */
	if (ctl->mod_loadcnt != sdp->sdp_loadcnt) {
		if (sdt_verbose) {
			cmn_err(CE_NOTE, "sdt is failing for probe %s "
			    "(module %s reloaded)",
			    sdp->sdp_name, ctl->mod_modname);
		}
		goto err;
	}

	dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback);
	if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) {
		if (sdt_verbose) {
			cmn_err(CE_NOTE, "sdt_enable is failing for probe %s "
			    "in module %s: tempDTraceTrapHook already occupied.",
			    sdp->sdp_name, ctl->mod_modname);
		}
		return (0);
	}

	while (sdp != NULL) {
		(void)ml_nofault_copy( (vm_offset_t)&sdp->sdp_patchval, (vm_offset_t)sdp->sdp_patchpoint, 
		                       (vm_size_t)sizeof(sdp->sdp_patchval));

		/*
		 * Make the patched instruction visible via a data + instruction
		 * cache fush on platforms that need it
		 */
		flush_dcache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_patchval), 0);
		invalidate_icache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_patchval), 0);

		sdp = sdp->sdp_next;
	}

err:
	return (0);
}
Пример #20
0
/*ARGSUSED*/
int
fbt_enable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id)
	fbt_probe_t *fbt = parg;
	struct modctl *ctl = NULL;

    for (; fbt != NULL; fbt = fbt->fbtp_next) {

	ctl = fbt->fbtp_ctl;
	
	if (!ctl->mod_loaded) {
		if (fbt_verbose) {
			cmn_err(CE_NOTE, "fbt is failing for probe %s "
			    "(module %s unloaded)",
			    fbt->fbtp_name, ctl->mod_modname);
		}

		continue;
	}

	/*
	 * Now check that our modctl has the expected load count.  If it
	 * doesn't, this module must have been unloaded and reloaded -- and
	 * we're not going to touch it.
	 */
	if (ctl->mod_loadcnt != fbt->fbtp_loadcnt) {
		if (fbt_verbose) {
			cmn_err(CE_NOTE, "fbt is failing for probe %s "
			    "(module %s reloaded)",
			    fbt->fbtp_name, ctl->mod_modname);
		}

		continue;
	}	

	dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback);
	if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) {
		if (fbt_verbose) {
			cmn_err(CE_NOTE, "fbt_enable is failing for probe %s "
			    "in module %s: tempDTraceTrapHook already occupied.",
			    fbt->fbtp_name, ctl->mod_modname);
		}
		continue;
	}

	if (fbt->fbtp_currentval != fbt->fbtp_patchval) {
		(void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, 
								sizeof(fbt->fbtp_patchval));
		/*
		 * Make the patched instruction visible via a data + instruction
		 * cache flush for the platforms that need it
		 */
		flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0);
		invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0);
                fbt->fbtp_currentval = fbt->fbtp_patchval;

		ctl->mod_nenabled++;
	}

    }
    
    dtrace_membar_consumer();
    
    return (0);
}
Пример #21
0
Файл: debug.c Проект: UIKit0/xnu
__private_extern__ void panic_display_system_configuration(void) {

	//panic_display_process_name();
#ifdef __arm__
	{
#else
	if (OSCompareAndSwap(0, 1, &config_displayed)) {
#endif
		char buf[256];
		if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
			kdb_printf("Boot args: %s\n", buf);
		kdb_printf("\nMac OS version:\n%s\n",
		    (osversion[0] != 0) ? osversion : "Not yet set");
		kdb_printf("\nKernel version:\n%s\n",version);
#ifdef __arm__
		kdb_printf("\niBoot version: %s\n", firmware_version);
		kdb_printf("Secure boot?: %s\n\n", debug_enabled ? "NO" : "YES");
#endif
		panic_display_kernel_uuid();
		panic_display_kernel_aslr();
		panic_display_pal_info();
		panic_display_model_name();
		panic_display_uptime();
		panic_display_zprint();
#if CONFIG_ZLEAKS
		panic_display_ztrace();
#endif /* CONFIG_ZLEAKS */
		kext_dump_panic_lists(&kdb_log);
	}
}

extern zone_t		first_zone;
extern unsigned int	num_zones, stack_total;
extern unsigned long long stack_allocs;

#if defined(__i386__) || defined (__x86_64__)
extern unsigned int	inuse_ptepages_count;
extern long long alloc_ptepages_count;
#endif

extern boolean_t	panic_include_zprint;

__private_extern__ void panic_display_zprint()
{
	if(panic_include_zprint == TRUE) {

		unsigned int	i;
		struct zone	zone_copy;

		if(first_zone!=NULL) {
			if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
				for (i = 0; i < num_zones; i++) {
					if(zone_copy.cur_size > (1024*1024)) {
						kdb_printf("%.20s:%lu\n",zone_copy.zone_name,(uintptr_t)zone_copy.cur_size);
					}	
					
					if(zone_copy.next_zone == NULL) {
						break;
					}

					if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) {
						break;
					}
				}
			}
		}

		kdb_printf("Kernel Stacks:%lu\n",(uintptr_t)(kernel_stack_size * stack_total));

#if defined(__i386__) || defined (__x86_64__)
		kdb_printf("PageTables:%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
#endif

		kdb_printf("Kalloc.Large:%lu\n",(uintptr_t)kalloc_large_total);
	}
}

#if CONFIG_ZLEAKS
extern boolean_t	panic_include_ztrace;
extern struct ztrace* top_ztrace;
/*
 * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator.
 * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c
 */
__private_extern__ void panic_display_ztrace(void)
{
	if(panic_include_ztrace == TRUE) {
		unsigned int i = 0;
		struct ztrace top_ztrace_copy;
		
		/* Make sure not to trip another panic if there's something wrong with memory */
		if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) {
			kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size);
			/* Print the backtrace addresses */
			for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) {
				kdb_printf("%p\n", top_ztrace_copy.zt_stack[i]);
			}
			/* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */
			kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth);
		}
		else {
			kdb_printf("\nCan't access top_ztrace...\n");
		}
		kdb_printf("\n");
	}
}
#endif /* CONFIG_ZLEAKS */

#if !MACH_KDP
static struct ether_addr kdp_current_mac_address = {{0, 0, 0, 0, 0, 0}};

/* XXX ugly forward declares to stop warnings */
void *kdp_get_interface(void);
void kdp_set_ip_and_mac_addresses(struct in_addr *, struct ether_addr *);
void kdp_set_gateway_mac(void *);
void kdp_set_interface(void *);
void kdp_register_send_receive(void *, void *);
void kdp_unregister_send_receive(void *, void *);
void kdp_snapshot_preflight(int, void *, uint32_t, uint32_t);
int kdp_stack_snapshot_geterror(void);
int kdp_stack_snapshot_bytes_traced(void);

void *
kdp_get_interface( void)
{
        return(void *)0;
}

unsigned int
kdp_get_ip_address(void )
{ return 0; }

struct ether_addr
kdp_get_mac_addr(void)
{       
        return kdp_current_mac_address;
}

void
kdp_set_ip_and_mac_addresses(   
        __unused struct in_addr          *ipaddr,
        __unused struct ether_addr       *macaddr)
{}

void
kdp_set_gateway_mac(__unused void *gatewaymac)
{}

void
kdp_set_interface(__unused void *ifp)
{}

void
kdp_register_send_receive(__unused void *send, __unused void *receive)
{}

void
kdp_unregister_send_receive(__unused void *send, __unused void *receive)
{}

void
kdp_snapshot_preflight(__unused int pid, __unused void * tracebuf,
		__unused uint32_t tracebuf_size, __unused uint32_t options)
{}

int
kdp_stack_snapshot_geterror(void)
{       
        return -1;
}

int
kdp_stack_snapshot_bytes_traced(void)
{       
        return 0;
}
Пример #22
0
__private_extern__ kern_return_t
chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
{
	return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
			KERN_SUCCESS: KERN_FAILURE);
}