Ejemplo n.º 1
0
Archivo: prints.c Proyecto: 7799/linux
/* %K */
static void sprintf_cpu_key(char *buf, struct cpu_key *key)
{
	if (key)
		sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
			key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
			cpu_type(key));
	else
		sprintf(buf, "[NULL]");
}
Ejemplo n.º 2
0
int checkboard(void)
{
    puts("Board: ");
    if (board_type == BOARD_IS_MARSBOARD)
        puts("MarSBoard\n");
    else if (board_type == BOARD_IS_RIOTBOARD)
        puts("RIoTboard\n");
    else
        printf("unknown - cputype : %02x\n", cpu_type(get_cpu_rev()));

    return 0;
}
Ejemplo n.º 3
0
/******************************************************************************
 * Generic MIB initialisation.
 *
 * This is a hack, and should be replaced with SYSINITs
 * at some point.
 */
void
sysctl_mib_init(void)
{
	cputype = cpu_type();
	cpusubtype = cpu_subtype();
	cputhreadtype = cpu_threadtype();
#if defined(__i386__) || defined (__x86_64__)
	cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
#else
#error Unsupported arch
#endif

	/*
	 * Populate the optional portion of the hw.* MIB.
	 *
	 * XXX This could be broken out into parts of the code
	 *     that actually directly relate to the functions in
	 *     question.
	 */

	if (cputhreadtype != CPU_THREADTYPE_NONE) {
		sysctl_register_oid(&sysctl__hw_cputhreadtype);
	}

#if defined (__i386__) || defined (__x86_64__)
	/* hw.cpufamily */
	cpufamily = cpuid_cpufamily();

	/* hw.cacheconfig */
	cacheconfig[0] = ml_cpu_cache_sharing(0);
	cacheconfig[1] = ml_cpu_cache_sharing(1);
	cacheconfig[2] = ml_cpu_cache_sharing(2);
	cacheconfig[3] = ml_cpu_cache_sharing(3);
	cacheconfig[4] = 0;

	/* hw.cachesize */
	cachesize[0] = ml_cpu_cache_size(0);
	cachesize[1] = ml_cpu_cache_size(1);
	cachesize[2] = ml_cpu_cache_size(2);
	cachesize[3] = ml_cpu_cache_size(3);
	cachesize[4] = 0;

	/* hw.packages */
	packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count)
			/ cpuid_info()->thread_count;

#else
#error unknown architecture
#endif /* !__i386__ && !__x86_64 && !__arm__ */

}
Ejemplo n.º 4
0
void
kdp_machine_hostinfo(
    kdp_hostinfo_t *hostinfo
)
{
    int			i;

    hostinfo->cpus_mask = 0;

    for (i = 0; i < machine_info.max_cpus; i++) {
	if (cpu_data_ptr[i] == NULL)
            continue;
	
        hostinfo->cpus_mask |= (1 << i);
    }

    hostinfo->cpu_type = cpu_type();
    hostinfo->cpu_subtype = cpu_subtype();
}
Ejemplo n.º 5
0
int board_early_init_f(void)
{
    u32 cputype = cpu_type(get_cpu_rev());

    switch (cputype) {
    case MXC_CPU_MX6SOLO:
        board_type = BOARD_IS_RIOTBOARD;
        break;
    case MXC_CPU_MX6D:
        board_type = BOARD_IS_MARSBOARD;
        break;
    }

    setup_iomux_uart();

    if (board_type == BOARD_IS_RIOTBOARD)
        imx_iomux_v3_setup_multiple_pads(
            tft_pads_riot, ARRAY_SIZE(tft_pads_riot));
    else if (board_type == BOARD_IS_MARSBOARD)
        imx_iomux_v3_setup_multiple_pads(
            tft_pads_mars, ARRAY_SIZE(tft_pads_mars));
#if defined(CONFIG_VIDEO_IPUV3)
    /* power ON LCD */
    gpio_direction_output(IMX_GPIO_NR(1, 29) , 1);
    /* touch interrupt is an input */
    gpio_direction_input(IMX_GPIO_NR(6, 14));
    /* power ON backlight */
    gpio_direction_output(IMX_GPIO_NR(6, 15) , 1);
    /* set backlight level to off */
    if (board_type == BOARD_IS_RIOTBOARD)
        gpio_direction_output(IMX_GPIO_NR(1, 18) , 0);
    else if (board_type == BOARD_IS_MARSBOARD)
        gpio_direction_output(IMX_GPIO_NR(2, 10) , 0);
    setup_display();
#endif

    return 0;
}
Ejemplo n.º 6
0
Archivo: kdp_vm.c Proyecto: Prajna/xnu
int
kern_dump(void)
{
	vm_map_t	map;
	unsigned int	thread_count, segment_count;
	unsigned int	command_size = 0, header_size = 0, tstate_size = 0;
	uint64_t	hoffset = 0, foffset = 0, nfoffset = 0;
	unsigned int	max_header_size = 0;
	vm_offset_t	header, txstart;
	vm_map_offset_t vmoffset;
	struct mach_header_64		*mh64;
	struct segment_command_64	*sc64;
	mach_vm_size_t	size = 0;
	vm_prot_t	prot = 0;
	vm_prot_t	maxprot = 0;
	mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
	vm_size_t	nflavors;
	vm_size_t	i;
	uint32_t	nesting_depth = 0;
	kern_return_t	kret = 0;
	struct vm_region_submap_info_64	vbr;
	mach_msg_type_number_t	vbrcount  = 0;
	tir_t tir1;

	int error = 0;
	int panic_error = 0;

	map = kernel_map;

	thread_count = 1;
	segment_count = get_vmmap_entries(map); 
  
	printf("Kernel map has %d entries\n", segment_count);

	nflavors = kdp_mynum_flavors;
	bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));

	for (i = 0; i < nflavors; i++)
		tstate_size += (uint32_t)(sizeof(mythread_state_flavor_t) +
		    (flavors[i].count * sizeof(int)));

	command_size = (uint32_t)((segment_count) *
	    sizeof(struct segment_command_64) +
	    thread_count * sizeof(struct thread_command) +
	    tstate_size * thread_count);

	header_size = command_size + (uint32_t)sizeof(struct mach_header_64);
	header = (vm_offset_t) command_buffer;
	
	/*
	 *	Set up Mach-O header for currently executing 32 bit kernel.
	 */
	printf ("Generated Mach-O header size was %d\n", header_size);

	mh64 = (struct mach_header_64 *) header;
	mh64->magic = MH_MAGIC_64;
	mh64->cputype = cpu_type();
	mh64->cpusubtype = cpu_subtype();
	mh64->filetype = MH_CORE;
	mh64->ncmds = segment_count + thread_count;
	mh64->sizeofcmds = command_size;
	mh64->flags = 0;
	mh64->reserved = 0;

	hoffset = sizeof(struct mach_header_64);	/* offset into header */
	foffset = (uint32_t)round_page(header_size);	/* offset into file */
	/* Padding */
	if ((foffset - header_size) < (4*sizeof(struct segment_command_64))) {
		foffset += (uint32_t)((4*sizeof(struct segment_command_64)) - (foffset-header_size)); 
	}

	max_header_size = (unsigned int)foffset;

	vmoffset = vm_map_min(map);

	/* Transmit the Mach-O MH_CORE header, and seek forward past the 
	 * area reserved for the segment and thread commands 
	 * to begin data transmission 
	 */
	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) { 
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	} 

	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct mach_header_64), (caddr_t) mh64) < 0)) {
		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
	printf ("Transmitting kernel state, please wait: ");

	while ((segment_count > 0) || (kret == KERN_SUCCESS)){

		while (1) {

			/*
			 *	Get region information for next region.
			 */

			vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
			if((kret = mach_vm_region_recurse(map, 
				    &vmoffset, &size, &nesting_depth, 
				    (vm_region_recurse_info_t)&vbr,
				    &vbrcount)) != KERN_SUCCESS) {
				break;
			}

			if(vbr.is_submap) {
				nesting_depth++;
				continue;
			} else {
				break;
			}
		}

		if(kret != KERN_SUCCESS)
			break;

		prot = vbr.protection;
		maxprot = vbr.max_protection;

		/*
		 *	Fill in segment command structure.
		 */
    
		if (hoffset > max_header_size)
			break;
		sc64 = (struct segment_command_64 *) (header);
		sc64->cmd = LC_SEGMENT_64;
		sc64->cmdsize = sizeof(struct segment_command_64);
		sc64->segname[0] = 0;
		sc64->vmaddr = vmoffset;
		sc64->vmsize = size;
		sc64->fileoff = foffset;
		sc64->filesize = size;
		sc64->maxprot = maxprot;
		sc64->initprot = prot;
		sc64->nsects = 0;

		if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { 
			printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
			error = panic_error;
			goto out;
		} 
    
		if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct segment_command_64) , (caddr_t) sc64)) < 0) {
			printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
			error = panic_error;
			goto out;
		}

		/* Do not transmit memory tagged VM_MEMORY_IOKIT - instead,
		 * seek past that region on the server - this creates a
		 * hole in the file.
		 */

		if ((vbr.user_tag != VM_MEMORY_IOKIT)) {

			if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
				printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
				error = panic_error;
				goto out;
			}

			txstart = vmoffset;

			if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t) txstart)) < 0)	{
				printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
				error = panic_error;
				goto out;
			}
		}

		hoffset += (unsigned int)sizeof(struct segment_command_64);
		foffset += (unsigned int)size;
		vmoffset += size;
		segment_count--;
	}
	tir1.header = header;
	tir1.hoffset = 0;
	tir1.flavors = flavors;
	tir1.tstate_size = tstate_size;

	/* Now send out the LC_THREAD load command, with the thread information
	 * for the current activation.
	 * Note that the corefile can contain LC_SEGMENT commands with file
	 * offsets that point past the edge of the corefile, in the event that
	 * the last N VM regions were all I/O mapped or otherwise
	 * non-transferable memory,  not followed by a normal VM region;
	 * i.e. there will be no hole that reaches to the end of the core file.
	 */
	kern_collectth_state (current_thread(), &tir1);

	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) { 
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
  
	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
    
	/* last packet */
	if ((panic_error = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
	{
		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
		error = panic_error;
		goto out;
	}
out:
	return (error);
}
Ejemplo n.º 7
0
unsigned long long darwin_virtual_size()
{
    kern_return_t error;
    task_t task;
    struct task_basic_info_64 taskinfo;
    cpu_type_t cputype;
    mach_msg_type_number_t count;
    mach_vm_size_t size;
    mach_vm_address_t address;
    mach_port_t object_name;
    vm_region_top_info_data_t info;
    mach_vm_size_t	vsize;
    mach_vm_size_t	empty;
    int has_shared_regions;

    empty = 0;

    count = TASK_BASIC_INFO_64_COUNT;
    task = mach_task_self();
    error = task_info(task, TASK_BASIC_INFO_64, (task_info_t)&taskinfo, &count);

    if (error != KERN_SUCCESS) {
        return 0;
    }

    vsize = taskinfo.virtual_size;

    cputype = cpu_type();

    // Go through all the vm regions and check to see if we should count them in the vsize or not
    for (address = 0, has_shared_regions = 0; ; address += size) {
        count = VM_REGION_TOP_INFO_COUNT;
        if (mach_vm_region(task, &address, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name) != KERN_SUCCESS) {
            // There are no more vm regions to look at.
            break;
        }

        if (in_shared_region(cputype, address)) {
            // Check if this process has the globally shared text and data regions mapped in.
            // If so, set has_shared_regions to 1 and so we only check once.
            if (has_shared_regions == 0 && info.share_mode == SM_EMPTY) {
                vm_region_basic_info_data_64_t basic_info;

                count = VM_REGION_BASIC_INFO_COUNT_64;
                if (mach_vm_region(task, &address, &size, VM_REGION_BASIC_INFO, (vm_region_info_t)&basic_info, &count, &object_name) != KERN_SUCCESS) {
                    break;
                }

                if (basic_info.reserved) {
                    has_shared_regions = 1;
                }
            }

            // Skip the vm region if it is not a shared private region.
            if (info.share_mode != SM_PRIVATE) {
                continue;
            }
        }

        if (info.share_mode == SM_EMPTY) {
            empty += size;
        }
    }

    // Subtract out the globally shared text and data region.
    if (has_shared_regions == 1) {
        vsize -= shared_region_size(cputype);
    }

    // Subtract out the empty pages (pagezero, stack guard, etc)
    vsize -= empty;

    return vsize;
}
Ejemplo n.º 8
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands. If "map"==VM_MAP_NULL or
 * "thread"==THREAD_NULL, do not make permament VM modifications,
 * just preflight the parse.
 */
static
load_return_t
parse_machfile(
	struct vnode 		*vp,       
	vm_map_t		map,
	thread_t		thread,
	struct mach_header	*header,
	off_t			file_offset,
	off_t			macho_size,
	int			depth,
	int64_t			aslr_offset,
	load_result_t		*result
)
{
	uint32_t		ncmds;
	struct load_command	*lcp;
	struct dylinker_command	*dlp = 0;
	struct uuid_command	*uulp = 0;
	integer_t		dlarchbits = 0;
	void *			control;
	load_return_t		ret = LOAD_SUCCESS;
	caddr_t			addr;
	void *			kl_addr;
	vm_size_t		size,kl_size;
	size_t			offset;
	size_t			oldoffset;	/* for overflow check */
	int			pass;
	proc_t			p = current_proc();		/* XXXX */
	int			error;
	int resid=0;
	size_t			mach_header_sz = sizeof(struct mach_header);
	boolean_t		abi64;
	boolean_t		got_code_signatures = FALSE;
	int64_t			slide = 0;

	if (header->magic == MH_MAGIC_64 ||
	    header->magic == MH_CIGAM_64) {
	    	mach_header_sz = sizeof(struct mach_header_64);
	}

	/*
	 *	Break infinite recursion
	 */
	if (depth > 6) {
		return(LOAD_FAILURE);
	}

	depth++;

	/*
	 *	Check to see if right machine type.
	 */
	if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
	    !grade_binary(header->cputype, 
	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
		return(LOAD_BADARCH);
		
	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
		
	switch (header->filetype) {
	
	case MH_OBJECT:
	case MH_EXECUTE:
	case MH_PRELOAD:
		if (depth != 1) {
			return (LOAD_FAILURE);
		}
		break;
		
	case MH_FVMLIB:
	case MH_DYLIB:
		if (depth == 1) {
			return (LOAD_FAILURE);
		}
		break;

	case MH_DYLINKER:
		if (depth != 2) {
			return (LOAD_FAILURE);
		}
		break;
		
	default:
		return (LOAD_FAILURE);
	}

	/*
	 *	Get the pager for the file.
	 */
	control = ubc_getobject(vp, UBC_FLAGS_NONE);

	/*
	 *	Map portion that must be accessible directly into
	 *	kernel's map.
	 */
	if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
		return(LOAD_BADMACHO);

	/*
	 *	Round size of Mach-O commands up to page boundry.
	 */
	size = round_page(mach_header_sz + header->sizeofcmds);
	if (size <= 0)
		return(LOAD_BADMACHO);

	/*
	 * Map the load commands into kernel memory.
	 */
	addr = 0;
	kl_size = size;
	kl_addr = kalloc(size);
	addr = (caddr_t)kl_addr;
	if (addr == NULL)
		return(LOAD_NOSPACE);

	error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
	    UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
	if (error) {
		if (kl_addr )
			kfree(kl_addr, kl_size);
		return(LOAD_IOERROR);
	}

	/*
	 *	For PIE and dyld, slide everything by the ASLR offset.
	 */
    aslr_offset = 0;
	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
		slide = aslr_offset;
	}

	/*
	 *	Scan through the commands, processing each one as necessary.
	 */
	for (pass = 1; pass <= 3; pass++) {

		/*
		 * Check that the entry point is contained in an executable segments
		 */ 
		if ((pass == 3) && (result->validentry == 0)) {
			thread_state_initialize(thread);
			ret = LOAD_FAILURE;
			break;
		}

		/*
		 * Loop through each of the load_commands indicated by the
		 * Mach-O header; if an absurd value is provided, we just
		 * run off the end of the reserved section by incrementing
		 * the offset too far, so we are implicitly fail-safe.
		 */
		offset = mach_header_sz;
		ncmds = header->ncmds;

		while (ncmds--) {
			/*
			 *	Get a pointer to the command.
			 */
			lcp = (struct load_command *)(addr + offset);
			oldoffset = offset;
			offset += lcp->cmdsize;

			/*
			 * Perform prevalidation of the struct load_command
			 * before we attempt to use its contents.  Invalid
			 * values are ones which result in an overflow, or
			 * which can not possibly be valid commands, or which
			 * straddle or exist past the reserved section at the
			 * start of the image.
			 */
			if (oldoffset > offset ||
			    lcp->cmdsize < sizeof(struct load_command) ||
			    offset > header->sizeofcmds + mach_header_sz) {
				ret = LOAD_BADMACHO;
				break;
			}

			/*
			 * Act on struct load_command's for which kernel
			 * intervention is required.
			 */
			switch(lcp->cmd) {
			case LC_SEGMENT:
			case LC_SEGMENT_64:
				if (pass != 2)
					break;
				ret = load_segment(lcp,
				    		   header->filetype,
						   control,
						   file_offset,
						   macho_size,
						   vp,
						   map,
						   slide,
						   result);
				break;
			case LC_UNIXTHREAD:
				if (pass != 1)
					break;
				ret = load_unixthread(
						 (struct thread_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_MAIN:
				if (pass != 1)
					break;
				if (depth != 1)
					break;
				ret = load_main(
						 (struct entry_point_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_LOAD_DYLINKER:
				if (pass != 3)
					break;
				if ((depth == 1) && (dlp == 0)) {
					dlp = (struct dylinker_command *)lcp;
					dlarchbits = (header->cputype & CPU_ARCH_MASK);
				} else {
					ret = LOAD_FAILURE;
				}
				break;
			case LC_UUID:
				if (pass == 1 && depth == 1) {
					uulp = (struct uuid_command *)lcp;
					memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
				}
				break;
			case LC_CODE_SIGNATURE:
				/* CODE SIGNING */
				if (pass != 1)
					break;
				/* pager -> uip ->
				   load signatures & store in uip
				   set VM object "signed_pages"
				*/
				ret = load_code_signature(
					(struct linkedit_data_command *) lcp,
					vp,
					file_offset,
					macho_size,
					header->cputype,
					(depth == 1) ? result : NULL);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: load code signature error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					ret = LOAD_SUCCESS; /* ignore error */
				} else {
					got_code_signatures = TRUE;
				}
				break;
#if CONFIG_CODE_DECRYPTION
#ifndef __arm__
			case LC_ENCRYPTION_INFO:
				if (pass != 3)
					break;
				ret = set_code_unprotect(
					(struct encryption_info_command *) lcp,
					addr, map, slide, vp);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: set_code_unprotect() error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					/* Don't let the app run if it's 
					 * encrypted but we failed to set up the
					 * decrypter */
					 psignal(p, SIGKILL);
				}
				break;
#endif
#endif
			default:
				/* Other commands are ignored by the kernel */
				ret = LOAD_SUCCESS;
				break;
			}
			if (ret != LOAD_SUCCESS)
				break;
		}
		if (ret != LOAD_SUCCESS)
			break;
	}
	if (ret == LOAD_SUCCESS) { 
	    if (! got_code_signatures) {
		    struct cs_blob *blob;
		    /* no embedded signatures: look for detached ones */
		    blob = ubc_cs_blob_get(vp, -1, file_offset);
		    if (blob != NULL) {
			    /* get flags to be applied to the process */
			    result->csflags |= blob->csb_flags;
		    }
	    }

		/* Make sure if we need dyld, we got it */
		if (result->needs_dynlinker && !dlp) {
			ret = LOAD_FAILURE;
		}

	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
		    /* load the dylinker, and always slide it by the ASLR
		     * offset regardless of PIE */
		    ret = load_dylinker(dlp, dlarchbits, map, thread, depth, aslr_offset, result);
	    }

	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
			if (result->thread_count == 0) {
				ret = LOAD_FAILURE;
			}
	    }
	}

	if (kl_addr )
		kfree(kl_addr, kl_size);

	return(ret);
}
Ejemplo n.º 9
0
/******************************************************************************
 * Generic MIB initialisation.
 *
 * This is a hack, and should be replaced with SYSINITs
 * at some point.
 */
void
sysctl_mib_init(void)
{
	cputype = cpu_type();
	cpusubtype = cpu_subtype();
	cputhreadtype = cpu_threadtype();
#if defined(__i386__) || defined (__x86_64__)
    cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
#elif defined(__arm__)
    kprintf("sysctl_mib_init: NEED ARM DEFINES\n");
#else
#error Unsupported arch
#endif

	/*
	 * Populate the optional portion of the hw.* MIB.
	 *
	 * XXX This could be broken out into parts of the code
	 *     that actually directly relate to the functions in
	 *     question.
	 */

	if (cputhreadtype != CPU_THREADTYPE_NONE) {
		sysctl_register_oid(&sysctl__hw_cputhreadtype);
	}

#if defined (__i386__) || defined (__x86_64__)
#define is_capability_set(k) (((_get_cpu_capabilities() & (k)) == (k)) ? 1 : 0)
	mmx_flag		= is_capability_set(kHasMMX);
	sse_flag		= is_capability_set(kHasSSE);
	sse2_flag		= is_capability_set(kHasSSE2);
	sse3_flag		= is_capability_set(kHasSSE3);
	supplementalsse3_flag	= is_capability_set(kHasSupplementalSSE3);
	sse4_1_flag		= is_capability_set(kHasSSE4_1);
	sse4_2_flag		= is_capability_set(kHasSSE4_2);
	x86_64_flag		= is_capability_set(k64Bit);
	aes_flag		= is_capability_set(kHasAES);
	avx1_0_flag		= is_capability_set(kHasAVX1_0);
	rdrand_flag		= is_capability_set(kHasRDRAND);
	f16c_flag		= is_capability_set(kHasF16C);
	enfstrg_flag		= is_capability_set(kHasENFSTRG);

	/* hw.cpufamily */
	cpufamily = cpuid_cpufamily();

	/* hw.cacheconfig */
	cacheconfig[0] = ml_cpu_cache_sharing(0);
	cacheconfig[1] = ml_cpu_cache_sharing(1);
	cacheconfig[2] = ml_cpu_cache_sharing(2);
	cacheconfig[3] = ml_cpu_cache_sharing(3);
	cacheconfig[4] = 0;

	/* hw.cachesize */
	cachesize[0] = ml_cpu_cache_size(0);
	cachesize[1] = ml_cpu_cache_size(1);
	cachesize[2] = ml_cpu_cache_size(2);
	cachesize[3] = ml_cpu_cache_size(3);
	cachesize[4] = 0;

	/* hw.packages */
	packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count)
			/ cpuid_info()->thread_count;
#elif defined(__arm__)
    kprintf("sysctl_mib_init: shortcircuiting to finish, reimplement\n");
#else
#error unknown architecture
#endif /* !__i386__ && !__x86_64 && !__arm__ */

}
Ejemplo n.º 10
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands.
 */
static
load_return_t
parse_machfile(
    struct vnode 		*vp,
    vm_map_t		map,
    thread_t		thread,
    struct mach_header	*header,
    off_t			file_offset,
    off_t			macho_size,
    int			depth,
    load_result_t		*result
)
{
    uint32_t		ncmds;
    struct load_command	*lcp;
    struct dylinker_command	*dlp = 0;
    integer_t		dlarchbits = 0;
    void *			pager;
    load_return_t		ret = LOAD_SUCCESS;
    caddr_t			addr;
    void *			kl_addr;
    vm_size_t		size,kl_size;
    size_t			offset;
    size_t			oldoffset;	/* for overflow check */
    int			pass;
    proc_t			p = current_proc();		/* XXXX */
    int			error;
    int resid=0;
    task_t task;
    size_t			mach_header_sz = sizeof(struct mach_header);
    boolean_t		abi64;
    boolean_t		got_code_signatures = FALSE;

    if (header->magic == MH_MAGIC_64 ||
            header->magic == MH_CIGAM_64) {
        mach_header_sz = sizeof(struct mach_header_64);
    }

    /*
     *	Break infinite recursion
     */
    if (depth > 6) {
        return(LOAD_FAILURE);
    }

    task = (task_t)get_threadtask(thread);

    depth++;

    /*
     *	Check to see if right machine type.
     */
    if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
            !grade_binary(header->cputype,
                          header->cpusubtype & ~CPU_SUBTYPE_MASK))
        return(LOAD_BADARCH);

    abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);

    switch (header->filetype) {

    case MH_OBJECT:
    case MH_EXECUTE:
    case MH_PRELOAD:
        if (depth != 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_FVMLIB:
    case MH_DYLIB:
        if (depth == 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_DYLINKER:
        if (depth != 2) {
            return (LOAD_FAILURE);
        }
        break;

    default:
        return (LOAD_FAILURE);
    }

    /*
     *	Get the pager for the file.
     */
    pager = (void *) ubc_getpager(vp);

    /*
     *	Map portion that must be accessible directly into
     *	kernel's map.
     */
    if ((mach_header_sz + header->sizeofcmds) > macho_size)
        return(LOAD_BADMACHO);

    /*
     *	Round size of Mach-O commands up to page boundry.
     */
    size = round_page(mach_header_sz + header->sizeofcmds);
    if (size <= 0)
        return(LOAD_BADMACHO);

    /*
     * Map the load commands into kernel memory.
     */
    addr = 0;
    kl_size = size;
    kl_addr = kalloc(size);
    addr = (caddr_t)kl_addr;
    if (addr == NULL)
        return(LOAD_NOSPACE);

    error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
                    UIO_SYSSPACE32, 0, kauth_cred_get(), &resid, p);
    if (error) {
        if (kl_addr )
            kfree(kl_addr, kl_size);
        return(LOAD_IOERROR);
    }
    /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */

    /*
     *	Scan through the commands, processing each one as necessary.
     */
    for (pass = 1; pass <= 2; pass++) {
        /*
         * Loop through each of the load_commands indicated by the
         * Mach-O header; if an absurd value is provided, we just
         * run off the end of the reserved section by incrementing
         * the offset too far, so we are implicitly fail-safe.
         */
        offset = mach_header_sz;
        ncmds = header->ncmds;
        while (ncmds--) {
            /*
             *	Get a pointer to the command.
             */
            lcp = (struct load_command *)(addr + offset);
            oldoffset = offset;
            offset += lcp->cmdsize;

            /*
             * Perform prevalidation of the struct load_command
             * before we attempt to use its contents.  Invalid
             * values are ones which result in an overflow, or
             * which can not possibly be valid commands, or which
             * straddle or exist past the reserved section at the
             * start of the image.
             */
            if (oldoffset > offset ||
                    lcp->cmdsize < sizeof(struct load_command) ||
                    offset > header->sizeofcmds + mach_header_sz) {
                ret = LOAD_BADMACHO;
                break;
            }

            /*
             * Act on struct load_command's for which kernel
             * intervention is required.
             */
            switch(lcp->cmd) {
            case LC_SEGMENT_64:
                if (pass != 1)
                    break;
                ret = load_segment_64(
                          (struct segment_command_64 *)lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_SEGMENT:
                if (pass != 1)
                    break;
                ret = load_segment(
                          (struct segment_command *) lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_THREAD:
                if (pass != 2)
                    break;
                ret = load_thread((struct thread_command *)lcp,
                                  thread,
                                  result);
                break;
            case LC_UNIXTHREAD:
                if (pass != 2)
                    break;
                ret = load_unixthread(
                          (struct thread_command *) lcp,
                          thread,
                          result);
                break;
            case LC_LOAD_DYLINKER:
                if (pass != 2)
                    break;
                if ((depth == 1) && (dlp == 0)) {
                    dlp = (struct dylinker_command *)lcp;
                    dlarchbits = (header->cputype & CPU_ARCH_MASK);
                } else {
                    ret = LOAD_FAILURE;
                }
                break;
            case LC_CODE_SIGNATURE:
                /* CODE SIGNING */
                if (pass != 2)
                    break;
                /* pager -> uip ->
                   load signatures & store in uip
                   set VM object "signed_pages"
                */
                ret = load_code_signature(
                          (struct linkedit_data_command *) lcp,
                          vp,
                          file_offset,
                          macho_size,
                          header->cputype,
                          (depth == 1) ? result : NULL);
                if (ret != LOAD_SUCCESS) {
                    printf("proc %d: load code signature error %d "
                           "for file \"%s\"\n",
                           p->p_pid, ret, vp->v_name);
                    ret = LOAD_SUCCESS; /* ignore error */
                } else {
                    got_code_signatures = TRUE;
                }
                break;
            default:
                /* Other commands are ignored by the kernel */
                ret = LOAD_SUCCESS;
                break;
            }
            if (ret != LOAD_SUCCESS)
                break;
        }
        if (ret != LOAD_SUCCESS)
            break;
    }
    if (ret == LOAD_SUCCESS) {
        if (! got_code_signatures) {
            struct cs_blob *blob;
            /* no embedded signatures: look for detached ones */
            blob = ubc_cs_blob_get(vp, -1, file_offset);
            if (blob != NULL) {
                /* get flags to be applied to the process */
                result->csflags |= blob->csb_flags;
            }
        }

        if (dlp != 0)
            ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64);

        if(depth == 1) {
            if (result->thread_count == 0) {
                ret = LOAD_FAILURE;
            } else if ( abi64 ) {
#ifdef __ppc__
                /* Map in 64-bit commpage */
                /* LP64todo - make this clean */
                /*
                 * PPC51: ppc64 is limited to 51-bit addresses.
                 * Memory above that limit is handled specially
                 * at the pmap level.
                 */
                pmap_map_sharedpage(current_task(), get_map_pmap(map));
#endif /* __ppc__ */
            }
        }
    }

    if (kl_addr )
        kfree(kl_addr, kl_size);

    if (ret == LOAD_SUCCESS)
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);

    return(ret);
}