Exemplo n.º 1
0
kern_return_t
host_ipc_hash_info(
	host_t					host,
	hash_info_bucket_array_t		*infop,
	mach_msg_type_number_t 		*countp)
{
	vm_map_copy_t copy;
	vm_offset_t addr;
	vm_size_t size;
	hash_info_bucket_t *info;
	natural_t count;
	kern_return_t kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	/* start with in-line data */

	count = ipc_hash_size();
	size = round_page(count * sizeof(hash_info_bucket_t));
	kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
	if (kr != KERN_SUCCESS)
		return KERN_RESOURCE_SHORTAGE;

	info = (hash_info_bucket_t *) addr;
	count = ipc_hash_info(info, count);

	if (size > count * sizeof(hash_info_bucket_t))
		bzero((char *)&info[count], size - count * sizeof(hash_info_bucket_t));

	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, 
			   (vm_map_size_t)size, TRUE, &copy);
	assert(kr == KERN_SUCCESS);

	*infop = (hash_info_bucket_t *) copy;
	*countp = count;
	return KERN_SUCCESS;
}
Exemplo n.º 2
0
kern_return_t
host_virtual_physical_table_info(
	__DEBUG_ONLY host_t			host,
	__DEBUG_ONLY hash_info_bucket_array_t	*infop,
	__DEBUG_ONLY mach_msg_type_number_t 	*countp)
{
#if !MACH_VM_DEBUG
        return KERN_FAILURE;
#else
	vm_offset_t addr = 0;
	vm_size_t size = 0;
	hash_info_bucket_t *info;
	unsigned int potential, actual;
	kern_return_t kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	/* start with in-line data */

	info = *infop;
	potential = *countp;

	for (;;) {
		actual = vm_page_info(info, potential);
		if (actual <= potential)
			break;

		/* allocate more memory */

		if (info != *infop)
			kmem_free(ipc_kernel_map, addr, size);

		size = vm_map_round_page(actual * sizeof *info,
					 VM_MAP_PAGE_MASK(ipc_kernel_map));
		kr = vm_allocate(ipc_kernel_map, &addr, size,
				 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		info = (hash_info_bucket_t *) addr;
		potential = (unsigned int) (size/sizeof (*info));
	}

	if (info == *infop) {
		/* data fit in-line; nothing to deallocate */

		*countp = actual;
	} else if (actual == 0) {
		kmem_free(ipc_kernel_map, addr, size);

		*countp = 0;
	} else {
		vm_map_copy_t copy;
		vm_size_t used, vmused;

		used = (actual * sizeof(*info));
		vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));

		if (vmused != size)
			kmem_free(ipc_kernel_map, addr + vmused, size - vmused);

		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
				   (vm_map_size_t)used, TRUE, &copy);
		assert(kr == KERN_SUCCESS);

		*infop = (hash_info_bucket_t *) copy;
		*countp = actual;
	}

	return KERN_SUCCESS;
#endif /* MACH_VM_DEBUG */
}
Exemplo n.º 3
0
/*
 * Return an array of virtual pages that are mapped to a task.
 */
kern_return_t
vm32_mapped_pages_info(
	__DEBUG_ONLY vm_map_t			map,
	__DEBUG_ONLY page_address_array_t	*pages,
	__DEBUG_ONLY mach_msg_type_number_t	*pages_count)
{
#if !MACH_VM_DEBUG
        return KERN_FAILURE;
#else
	pmap_t		pmap;
	vm_size_t	size, size_used;
	unsigned int	actual, space;
	page_address_array_t list;
	vm_offset_t	addr = 0;

	if (map == VM_MAP_NULL)
	    return (KERN_INVALID_ARGUMENT);

	pmap = map->pmap;
	size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
	size = vm_map_round_page(size,
				 VM_MAP_PAGE_MASK(ipc_kernel_map));

	for (;;) {
	    (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
	    (void) vm_map_unwire(
		    ipc_kernel_map,
		    vm_map_trunc_page(addr,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)),
		    vm_map_round_page(addr + size,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)),
		    FALSE);

	    list = (page_address_array_t) addr;
	    space = (unsigned int) (size / sizeof(vm_offset_t));

	    actual = pmap_list_resident_pages(pmap,
					list,
					space);
	    if (actual <= space)
		break;

	    /*
	     * Free memory if not enough
	     */
	    (void) kmem_free(ipc_kernel_map, addr, size);

	    /*
	     * Try again, doubling the size
	     */
	    size = vm_map_round_page(actual * sizeof(vm_offset_t),
				     VM_MAP_PAGE_MASK(ipc_kernel_map));
	}
	if (actual == 0) {
	    *pages = 0;
	    *pages_count = 0;
	    (void) kmem_free(ipc_kernel_map, addr, size);
	}
	else {
	    vm_size_t vmsize_used;
	    *pages_count = actual;
	    size_used = (actual * sizeof(vm_offset_t));
	    vmsize_used = vm_map_round_page(size_used,
					    VM_MAP_PAGE_MASK(ipc_kernel_map));
	    (void) vm_map_wire(
		    ipc_kernel_map,
		    vm_map_trunc_page(addr,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)),
		    vm_map_round_page(addr + size,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)), 
		    VM_PROT_READ|VM_PROT_WRITE,
		    FALSE);
	    (void) vm_map_copyin(ipc_kernel_map,
				(vm_map_address_t)addr,
				(vm_map_size_t)size_used,
				TRUE,
				(vm_map_copy_t *)pages);
	    if (vmsize_used != size) {
		(void) kmem_free(ipc_kernel_map,
				addr + vmsize_used,
				size - vmsize_used);
	    }
	}

	return (KERN_SUCCESS);
#endif /* MACH_VM_DEBUG */
}
Exemplo n.º 4
0
kern_return_t
vm32_region_info_64(
	__DEBUG_ONLY vm_map_t			map,
	__DEBUG_ONLY vm32_offset_t		address,
	__DEBUG_ONLY vm_info_region_64_t	*regionp,
	__DEBUG_ONLY vm_info_object_array_t	*objectsp,
	__DEBUG_ONLY mach_msg_type_number_t	*objectsCntp)
{
#if !MACH_VM_DEBUG
        return KERN_FAILURE;
#else
	vm_map_copy_t copy;
	vm_offset_t addr = 0;	/* memory for OOL data */
	vm_size_t size;		/* size of the memory */
	unsigned int room;	/* room for this many objects */
	unsigned int used;	/* actually this many objects */
	vm_info_region_64_t region;
	kern_return_t kr;

	if (map == VM_MAP_NULL)
		return KERN_INVALID_TASK;

	size = 0;		/* no memory allocated yet */

	for (;;) {
		vm_map_t cmap;	/* current map in traversal */
		vm_map_t nmap;	/* next map to look at */
		vm_map_entry_t entry;
		vm_object_t object, cobject, nobject;

		/* nothing is locked */

		vm_map_lock_read(map);
		for (cmap = map;; cmap = nmap) {
			/* cmap is read-locked */

			if (!vm_map_lookup_entry(cmap, address, &entry)) {
				entry = entry->vme_next;
				if (entry == vm_map_to_entry(cmap)) {
					vm_map_unlock_read(cmap);
					if (size != 0)
						kmem_free(ipc_kernel_map,
							  addr, size);
					return KERN_NO_SPACE;
				}
			}

			if (entry->is_sub_map)
				nmap = VME_SUBMAP(entry);
			else
				break;

			/* move down to the lower map */

			vm_map_lock_read(nmap);
			vm_map_unlock_read(cmap);
		}

		/* cmap is read-locked; we have a real entry */

		object = VME_OBJECT(entry);
		region.vir_start = (natural_t) entry->vme_start;
		region.vir_end = (natural_t) entry->vme_end;
		region.vir_object = (natural_t)(uintptr_t) object;
		region.vir_offset = VME_OFFSET(entry);
		region.vir_needs_copy = entry->needs_copy;
		region.vir_protection = entry->protection;
		region.vir_max_protection = entry->max_protection;
		region.vir_inheritance = entry->inheritance;
		region.vir_wired_count = entry->wired_count;
		region.vir_user_wired_count = entry->user_wired_count;

		used = 0;
		room = (unsigned int) (size / sizeof(vm_info_object_t));

		if (object == VM_OBJECT_NULL) {
			vm_map_unlock_read(cmap);
			/* no memory needed */
			break;
		}

		vm_object_lock(object);
		vm_map_unlock_read(cmap);

		for (cobject = object;; cobject = nobject) {
			/* cobject is locked */

			if (used < room) {
				vm_info_object_t *vio =
					&((vm_info_object_t *) addr)[used];

				vio->vio_object =
					(natural_t)(uintptr_t) cobject;
				vio->vio_size =
					(natural_t) cobject->vo_size;
				vio->vio_ref_count =
					cobject->ref_count;
				vio->vio_resident_page_count =
					cobject->resident_page_count;
				vio->vio_copy =
					(natural_t)(uintptr_t) cobject->copy;
				vio->vio_shadow =
					(natural_t)(uintptr_t) cobject->shadow;
				vio->vio_shadow_offset =
					(natural_t) cobject->vo_shadow_offset;
				vio->vio_paging_offset =
					(natural_t) cobject->paging_offset;
				vio->vio_copy_strategy =
					cobject->copy_strategy;
				vio->vio_last_alloc =
					(vm_offset_t) cobject->last_alloc;
				vio->vio_paging_in_progress =
					cobject->paging_in_progress +
					cobject->activity_in_progress;
				vio->vio_pager_created =
					cobject->pager_created;
				vio->vio_pager_initialized =
					cobject->pager_initialized;
				vio->vio_pager_ready =
					cobject->pager_ready;
				vio->vio_can_persist =
					cobject->can_persist;
				vio->vio_internal =
					cobject->internal;
				vio->vio_temporary =
					cobject->temporary;
				vio->vio_alive =
					cobject->alive;
				vio->vio_purgable =
					(cobject->purgable != VM_PURGABLE_DENY);
				vio->vio_purgable_volatile =
					(cobject->purgable == VM_PURGABLE_VOLATILE ||
					 cobject->purgable == VM_PURGABLE_EMPTY);
			}

			used++;
			nobject = cobject->shadow;
			if (nobject == VM_OBJECT_NULL) {
				vm_object_unlock(cobject);
				break;
			}

			vm_object_lock(nobject);
			vm_object_unlock(cobject);
		}

		/* nothing locked */

		if (used <= room)
			break;

		/* must allocate more memory */

		if (size != 0)
			kmem_free(ipc_kernel_map, addr, size);
		size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
					 VM_MAP_PAGE_MASK(ipc_kernel_map));

		kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		kr = vm_map_wire(
			ipc_kernel_map,
			vm_map_trunc_page(addr,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			vm_map_round_page(addr + size,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			VM_PROT_READ|VM_PROT_WRITE,
			FALSE);
		assert(kr == KERN_SUCCESS);
	}

	/* free excess memory; make remaining memory pageable */

	if (used == 0) {
		copy = VM_MAP_COPY_NULL;

		if (size != 0)
			kmem_free(ipc_kernel_map, addr, size);
	} else {
		vm_size_t size_used = (used * sizeof(vm_info_object_t));
		vm_size_t vmsize_used = vm_map_round_page(size_used,
					  VM_MAP_PAGE_MASK(ipc_kernel_map));

		kr = vm_map_unwire(
			ipc_kernel_map,
			vm_map_trunc_page(addr,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			vm_map_round_page(addr + size_used,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
				   (vm_map_size_t)size_used, TRUE, &copy);
		assert(kr == KERN_SUCCESS);

		if (size != vmsize_used)
			kmem_free(ipc_kernel_map,
				  addr + vmsize_used, size - vmsize_used);
	}

	*regionp = region;
	*objectsp = (vm_info_object_array_t) copy;
	*objectsCntp = used;
	return KERN_SUCCESS;
#endif /* MACH_VM_DEBUG */
}
Exemplo n.º 5
0
kern_return_t
host_ipc_marequest_info(
	host_t 				host,
	unsigned int 			*maxp,
	hash_info_bucket_array_t 	*infop,
	unsigned int 			*countp)
{
	vm_offset_t addr;
	vm_size_t size = 0; /* '=0' to shut up lint */
	hash_info_bucket_t *info;
	unsigned int potential, actual;
	kern_return_t kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	/* start with in-line data */

	info = *infop;
	potential = *countp;

	for (;;) {
		actual = ipc_marequest_info(maxp, info, potential);
		if (actual <= potential)
			break;

		/* allocate more memory */

		if (info != *infop)
			kmem_free(ipc_kernel_map, addr, size);

		size = round_page(actual * sizeof *info);
		kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		info = (hash_info_bucket_t *) addr;
		potential = size/sizeof *info;
	}

	if (info == *infop) {
		/* data fit in-line; nothing to deallocate */

		*countp = actual;
	} else if (actual == 0) {
		kmem_free(ipc_kernel_map, addr, size);

		*countp = 0;
	} else {
		vm_map_copy_t copy;
		vm_size_t used;

		used = round_page(actual * sizeof *info);

		if (used != size)
			kmem_free(ipc_kernel_map, addr + used, size - used);

		kr = vm_map_copyin(ipc_kernel_map, addr, used,
				   TRUE, &copy);
		assert(kr == KERN_SUCCESS);

		*infop = (hash_info_bucket_t *) copy;
		*countp = actual;
	}

	return KERN_SUCCESS;
}
Exemplo n.º 6
0
kern_return_t
mach_port_space_info(
	ipc_space_t			space,
	ipc_info_space_t		*infop,
	ipc_info_name_array_t		*tablep,
	mach_msg_type_number_t 		*tableCntp,
	ipc_info_tree_name_array_t	*treep,
	mach_msg_type_number_t 		*treeCntp)
{
	ipc_info_name_t *table_info;
	unsigned int table_potential, table_actual;
	vm_offset_t table_addr;
	vm_size_t table_size = 0;	/* Suppress gcc warning */
	ipc_info_tree_name_t *tree_info;
	unsigned int tree_potential, tree_actual;
	vm_offset_t tree_addr;
	vm_size_t tree_size = 0;	/* Suppress gcc warning */
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	kern_return_t kr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	/* start with in-line memory */

	table_info = *tablep;
	table_potential = *tableCntp;
	tree_info = *treep;
	tree_potential = *treeCntp;

	for (;;) {
		is_read_lock(space);
		if (!space->is_active) {
			is_read_unlock(space);
			if (table_info != *tablep)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);
			if (tree_info != *treep)
				kmem_free(ipc_kernel_map,
					  tree_addr, tree_size);
			return KERN_INVALID_TASK;
		}

		table_actual = space->is_table_size;
		tree_actual = space->is_tree_total;

		if ((table_actual <= table_potential) &&
		    (tree_actual <= tree_potential))
			break;

		is_read_unlock(space);

		if (table_actual > table_potential) {
			if (table_info != *tablep)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);

			table_size = round_page(table_actual *
						sizeof *table_info);
			kr = kmem_alloc(ipc_kernel_map,
					&table_addr, table_size);
			if (kr != KERN_SUCCESS) {
				if (tree_info != *treep)
					kmem_free(ipc_kernel_map,
						  tree_addr, tree_size);

				return KERN_RESOURCE_SHORTAGE;
			}

			table_info = (ipc_info_name_t *) table_addr;
			table_potential = table_size/sizeof *table_info;
		}

		if (tree_actual > tree_potential) {
			if (tree_info != *treep)
				kmem_free(ipc_kernel_map,
					  tree_addr, tree_size);

			tree_size = round_page(tree_actual *
					       sizeof *tree_info);
			kr = kmem_alloc(ipc_kernel_map,
					&tree_addr, tree_size);
			if (kr != KERN_SUCCESS) {
				if (table_info != *tablep)
					kmem_free(ipc_kernel_map,
						  table_addr, table_size);

				return KERN_RESOURCE_SHORTAGE;
			}

			tree_info = (ipc_info_tree_name_t *) tree_addr;
			tree_potential = tree_size/sizeof *tree_info;
		}
	}
	/* space is read-locked and active; we have enough wired memory */

	infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iis_table_size = space->is_table_size;
	infop->iis_table_next = space->is_table_next->its_size;
	infop->iis_tree_size = space->is_tree_total;
	infop->iis_tree_small = space->is_tree_small;
	infop->iis_tree_hash = space->is_tree_hash;

	table = space->is_table;
	tsize = space->is_table_size;

	for (index = 0; index < tsize; index++) {
		ipc_info_name_t *iin = &table_info[index];
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits = entry->ie_bits;

		iin->iin_name = MACH_PORT_MAKEB(index, bits);
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_compat = FALSE;
		iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;
	}

	for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
	     tentry != ITE_NULL;
	     tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
		ipc_info_tree_name_t *iitn = &tree_info[index++];
		ipc_info_name_t *iin = &iitn->iitn_name;
		ipc_entry_t entry = &tentry->ite_entry;
		ipc_entry_bits_t bits = entry->ie_bits;

		assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);

		iin->iin_name = tentry->ite_name;
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_compat = FALSE;
		iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;

		if (tentry->ite_lchild == ITE_NULL)
			iitn->iitn_lchild = MACH_PORT_NULL;
		else
			iitn->iitn_lchild = tentry->ite_lchild->ite_name;

		if (tentry->ite_rchild == ITE_NULL)
			iitn->iitn_rchild = MACH_PORT_NULL;
		else
			iitn->iitn_rchild = tentry->ite_rchild->ite_name;

	}
	ipc_splay_traverse_finish(&space->is_tree);
	is_read_unlock(space);

	if (table_info == *tablep) {
		/* data fit in-line; nothing to deallocate */

		*tableCntp = table_actual;
	} else if (table_actual == 0) {
		kmem_free(ipc_kernel_map, table_addr, table_size);

		*tableCntp = 0;
	} else {
		vm_size_t size_used, rsize_used;
		vm_map_copy_t copy;

		/* kmem_alloc doesn't zero memory */

		size_used = table_actual * sizeof *table_info;
		rsize_used = round_page(size_used);

		if (rsize_used != table_size)
			kmem_free(ipc_kernel_map,
				  table_addr + rsize_used,
				  table_size - rsize_used);

		if (size_used != rsize_used)
			memset((void *) (table_addr + size_used), 0,
			      rsize_used - size_used);

		kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used,
				   TRUE, &copy);

		assert(kr == KERN_SUCCESS);

		*tablep = (ipc_info_name_t *) copy;
		*tableCntp = table_actual;
	}

	if (tree_info == *treep) {
		/* data fit in-line; nothing to deallocate */

		*treeCntp = tree_actual;
	} else if (tree_actual == 0) {
		kmem_free(ipc_kernel_map, tree_addr, tree_size);

		*treeCntp = 0;
	} else {
		vm_size_t size_used, rsize_used;
		vm_map_copy_t copy;

		/* kmem_alloc doesn't zero memory */

		size_used = tree_actual * sizeof *tree_info;
		rsize_used = round_page(size_used);

		if (rsize_used != tree_size)
			kmem_free(ipc_kernel_map,
				  tree_addr + rsize_used,
				  tree_size - rsize_used);

		if (size_used != rsize_used)
			memset((void *) (tree_addr + size_used), 0,
			      rsize_used - size_used);

		kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used,
				   TRUE, &copy);

		assert(kr == KERN_SUCCESS);

		*treep = (ipc_info_tree_name_t *) copy;
		*treeCntp = tree_actual;
	}

	return KERN_SUCCESS;
}
Exemplo n.º 7
0
kern_return_t
map_fd_funneled(
	int			fd,
	vm_object_offset_t	offset,
	vm_offset_t		*va,
	boolean_t		findspace,
	vm_size_t		size)
{
	kern_return_t	result;
	struct fileproc	*fp;
	struct vnode	*vp;
	void *	pager;
	vm_offset_t	map_addr=0;
	vm_size_t	map_size;
	int		err=0;
	vm_map_t	my_map;
	proc_t		p = current_proc();
	struct vnode_attr vattr;

	/*
	 *	Find the inode; verify that it's a regular file.
	 */

	err = fp_lookup(p, fd, &fp, 0);
	if (err)
		return(err);
	
	if (fp->f_fglob->fg_type != DTYPE_VNODE){
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	if (!(fp->f_fglob->fg_flag & FREAD)) {
		err = KERN_PROTECTION_FAILURE;
		goto bad;
	}

	vp = (struct vnode *)fp->f_fglob->fg_data;
	err = vnode_getwithref(vp);
	if(err != 0) 
		goto bad;

	if (vp->v_type != VREG) {
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	AUDIT_ARG(vnpath, vp, ARG_VNODE1);

	/*
	 * POSIX: mmap needs to update access time for mapped files
	 */
	if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
		VATTR_INIT(&vattr);
		nanotime(&vattr.va_access_time);
		VATTR_SET_ACTIVE(&vattr, va_access_time);
		vnode_setattr(vp, &vattr, vfs_context_current());
	}
	
	if (offset & PAGE_MASK_64) {
		printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}
	map_size = round_page(size);

	/*
	 * Allow user to map in a zero length file.
	 */
	if (size == 0) {
		(void)vnode_put(vp);
		err = KERN_SUCCESS;
		goto bad;
	}
	/*
	 *	Map in the file.
	 */
	pager = (void *)ubc_getpager(vp);
	if (pager == NULL) {
		(void)vnode_put(vp);
		err = KERN_FAILURE;
		goto bad;
	}


	my_map = current_map();

	result = vm_map_64(
			my_map,
			&map_addr, map_size, (vm_offset_t)0, 
			VM_FLAGS_ANYWHERE, pager, offset, TRUE,
			VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (result != KERN_SUCCESS) {
		(void)vnode_put(vp);
		err = result;
		goto bad;
	}


	if (!findspace) {
		vm_offset_t	dst_addr;
		vm_map_copy_t	tmp;

		if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr))	||
					trunc_page_32(dst_addr) != dst_addr) {
			(void) vm_map_remove(
					my_map,
					map_addr, map_addr + map_size,
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}

		result = vm_map_copyin(my_map, (vm_map_address_t)map_addr,
				       (vm_map_size_t)map_size, TRUE, &tmp);
		if (result != KERN_SUCCESS) {
			
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}

		result = vm_map_copy_overwrite(my_map,
					(vm_map_address_t)dst_addr, tmp, FALSE);
		if (result != KERN_SUCCESS) {
			vm_map_copy_discard(tmp);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}
	} else {
		if (copyout(&map_addr, CAST_USER_ADDR_T(va), sizeof (map_addr))) {
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}
	}

	ubc_setthreadcred(vp, current_proc(), current_thread());
	(void)ubc_map(vp, (PROT_READ | PROT_EXEC));
	(void)vnode_put(vp);
	err = 0;
bad:
	fp_drop(p, fd, fp, 0);
	return (err);
}
Exemplo n.º 8
0
kern_return_t
mach_port_space_info(
	ipc_space_t			space,
	ipc_info_space_t		*infop,
	ipc_info_name_array_t		*tablep,
	mach_msg_type_number_t 		*tableCntp,
	__unused ipc_info_tree_name_array_t	*treep,
	__unused mach_msg_type_number_t         *treeCntp)
{
	ipc_info_name_t *table_info;
	vm_offset_t table_addr;
	vm_size_t table_size, table_size_needed;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	kern_return_t kr;
	vm_map_copy_t copy;


	if (space == IS_NULL)
		return KERN_INVALID_TASK;

#if !(DEVELOPMENT | DEBUG)
	const boolean_t dbg_ok = (mac_task_check_expose_task(kernel_task) == 0);
#else
	const boolean_t dbg_ok = TRUE;
#endif

	/* start with in-line memory */

	table_size = 0;

	for (;;) {
		is_read_lock(space);
		if (!is_active(space)) {
			is_read_unlock(space);
			if (table_size != 0)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);
			return KERN_INVALID_TASK;
		}

		table_size_needed =
			vm_map_round_page((space->is_table_size
					   * sizeof(ipc_info_name_t)),
					  VM_MAP_PAGE_MASK(ipc_kernel_map));

		if (table_size_needed == table_size)
			break;

		is_read_unlock(space);

		if (table_size != table_size_needed) {
			if (table_size != 0)
				kmem_free(ipc_kernel_map, table_addr, table_size);
			kr = kmem_alloc(ipc_kernel_map,	&table_addr, table_size_needed, VM_KERN_MEMORY_IPC);
			if (kr != KERN_SUCCESS) {
				return KERN_RESOURCE_SHORTAGE;
			}
			table_size = table_size_needed;
		}

	}
	/* space is read-locked and active; we have enough wired memory */

	/* get the overall space info */
	infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iis_table_size = space->is_table_size;
	infop->iis_table_next = space->is_table_next->its_size;

	/* walk the table for this space */
	table = space->is_table;
	tsize = space->is_table_size;
	table_info = (ipc_info_name_array_t)table_addr;
	for (index = 0; index < tsize; index++) {
		ipc_info_name_t *iin = &table_info[index];
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits;

		bits = entry->ie_bits;
		iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
		iin->iin_collision = 0;
		iin->iin_type = IE_BITS_TYPE(bits);
		if ((entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) != MACH_PORT_TYPE_NONE &&
		    entry->ie_request != IE_REQ_NONE) {
			__IGNORE_WCASTALIGN(ipc_port_t port = (ipc_port_t) entry->ie_object);

			assert(IP_VALID(port));
			ip_lock(port);
			iin->iin_type |= ipc_port_request_type(port, iin->iin_name, entry->ie_request);
			ip_unlock(port);
		}

		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (dbg_ok) ? (natural_t)VM_KERNEL_ADDRPERM((uintptr_t)entry->ie_object) : 0;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;
	}

	is_read_unlock(space);

	/* prepare the table out-of-line data for return */
	if (table_size > 0) {
		vm_size_t used_table_size;

		used_table_size = infop->iis_table_size * sizeof(ipc_info_name_t);
		if (table_size > used_table_size)
			bzero((char *)&table_info[infop->iis_table_size],
			      table_size - used_table_size);

		kr = vm_map_unwire(
			ipc_kernel_map,
			vm_map_trunc_page(table_addr,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			vm_map_round_page(table_addr + table_size,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			FALSE);
		assert(kr == KERN_SUCCESS);
		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, 
				   (vm_map_size_t)used_table_size, TRUE, &copy);
		assert(kr == KERN_SUCCESS);
		*tablep = (ipc_info_name_t *)copy;
		*tableCntp = infop->iis_table_size;
	} else {
		*tablep = (ipc_info_name_t *)0;
		*tableCntp = 0;
	}

	/* splay tree is obsolete, no work to do... */
	*treep = (ipc_info_tree_name_t *)0;
	*treeCntp = 0;
	return KERN_SUCCESS;
}
Exemplo n.º 9
0
Arquivo: host.c Projeto: DJHartley/xnu
/*
 *	host_processor_info
 *
 *	Return info about the processors on this host.  It will return
 *	the number of processors, and the specific type of info requested
 *	in an OOL array.
 */
kern_return_t
host_processor_info(
	host_t					host,
	processor_flavor_t		flavor,
	natural_t				*out_pcount,
	processor_info_array_t	*out_array,
	mach_msg_type_number_t	*out_array_count)
{
	kern_return_t			result;
	processor_t				processor;
	host_t					thost;
	processor_info_t		info;
	unsigned int			icount, tcount;
	unsigned int			pcount, i;
	vm_offset_t				addr;
	vm_size_t				size, needed;
	vm_map_copy_t			copy;

	if (host == HOST_NULL)
		return (KERN_INVALID_ARGUMENT);

	result = processor_info_count(flavor, &icount);
	if (result != KERN_SUCCESS)
		return (result);

	pcount = processor_count;
	assert(pcount != 0);

	needed = pcount * icount * sizeof(natural_t);
	size = round_page(needed);
	result = kmem_alloc(ipc_kernel_map, &addr, size);
	if (result != KERN_SUCCESS)
		return (KERN_RESOURCE_SHORTAGE);

	info = (processor_info_t) addr;
	processor = processor_list;
	tcount = icount;

	result = processor_info(processor, flavor, &thost, info, &tcount);
	if (result != KERN_SUCCESS) {
		kmem_free(ipc_kernel_map, addr, size);
		return (result);
	}

	if (pcount > 1) {
		for (i = 1; i < pcount; i++) {
			simple_lock(&processor_list_lock);
			processor = processor->processor_list;
			simple_unlock(&processor_list_lock);

			info += icount;
			tcount = icount;
			result = processor_info(processor, flavor, &thost, info, &tcount);
			if (result != KERN_SUCCESS) {
				kmem_free(ipc_kernel_map, addr, size);
				return (result);
			}
		}
	}

	if (size != needed) 
		bzero((char *) addr + needed, size - needed);

	result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
			       vm_map_round_page(addr + size), FALSE);
	assert(result == KERN_SUCCESS);
	result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
			       (vm_map_size_t)size, TRUE, &copy);
	assert(result == KERN_SUCCESS);

	*out_pcount = pcount;
	*out_array = (processor_info_array_t) copy;
	*out_array_count = pcount * icount;

	return (KERN_SUCCESS);
}
Exemplo n.º 10
0
kern_return_t
host_lockgroup_info(
	host_t					host,
	lockgroup_info_array_t	*lockgroup_infop,
	mach_msg_type_number_t	*lockgroup_infoCntp)
{
	lockgroup_info_t	*lockgroup_info_base;
	lockgroup_info_t	*lockgroup_info;
	vm_offset_t			lockgroup_info_addr;
	vm_size_t			lockgroup_info_size;
	vm_size_t			lockgroup_info_vmsize;
	lck_grp_t			*lck_grp;
	unsigned int		i;
	vm_map_copy_t		copy;
	kern_return_t		kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	lck_mtx_lock(&lck_grp_lock);

	lockgroup_info_size = lck_grp_cnt * sizeof(*lockgroup_info);
	lockgroup_info_vmsize = round_page(lockgroup_info_size);
	kr = kmem_alloc_pageable(ipc_kernel_map,
						 &lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC);
	if (kr != KERN_SUCCESS) {
		lck_mtx_unlock(&lck_grp_lock);
		return(kr);
	}

	lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
	lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
	lockgroup_info = lockgroup_info_base;

	for (i = 0; i < lck_grp_cnt; i++) {

		lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
		lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
		lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
		lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
		lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
		lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;

		lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
		lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
		lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
		lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
		lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
		lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
		lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
		lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
		lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;

		lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
		lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
		lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
		lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
		lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
		lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
		lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
		lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
		lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;

		(void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);

		lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
		lockgroup_info++;
	}

	*lockgroup_infoCntp = lck_grp_cnt;
	lck_mtx_unlock(&lck_grp_lock);

	if (lockgroup_info_size != lockgroup_info_vmsize)
		bzero((char *)lockgroup_info, lockgroup_info_vmsize - lockgroup_info_size);

	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
			   (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
	assert(kr == KERN_SUCCESS);

	*lockgroup_infop = (lockgroup_info_t *) copy;

	return(KERN_SUCCESS);
}
Exemplo n.º 11
0
kern_return_t
mach_port_space_info(
	ipc_space_t			space,
	ipc_info_space_t		*infop,
	ipc_info_name_array_t		*tablep,
	mach_msg_type_number_t 		*tableCntp,
	ipc_info_tree_name_array_t	*treep,
	mach_msg_type_number_t 		*treeCntp)
{
	ipc_info_name_t *table_info;
	vm_offset_t table_addr;
	vm_size_t table_size, table_size_needed;
	ipc_info_tree_name_t *tree_info;
	vm_offset_t tree_addr;
	vm_size_t tree_size, tree_size_needed;
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	kern_return_t kr;
	vm_map_copy_t copy;


	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	/* start with in-line memory */

	table_size = 0;
	tree_size = 0;

	for (;;) {
		is_read_lock(space);
		if (!space->is_active) {
			is_read_unlock(space);
			if (table_size != 0)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);
			if (tree_size != 0)
				kmem_free(ipc_kernel_map,
					  tree_addr, tree_size);
			return KERN_INVALID_TASK;
		}

		table_size_needed = round_page(space->is_table_size
					       * sizeof(ipc_info_name_t));
		tree_size_needed = round_page(space->is_tree_total
					      * sizeof(ipc_info_tree_name_t));

		if ((table_size_needed == table_size) &&
		    (tree_size_needed == tree_size))
			break;

		is_read_unlock(space);

		if (table_size != table_size_needed) {
			if (table_size != 0)
				kmem_free(ipc_kernel_map, table_addr, table_size);
			kr = kmem_alloc(ipc_kernel_map,	&table_addr, table_size_needed);
			if (kr != KERN_SUCCESS) {
				if (tree_size != 0)
					kmem_free(ipc_kernel_map, tree_addr, tree_size);
				return KERN_RESOURCE_SHORTAGE;
			}
			table_size = table_size_needed;
		}
		if (tree_size != tree_size_needed) {
			if (tree_size != 0)
				kmem_free(ipc_kernel_map, tree_addr, tree_size);
			kr = kmem_alloc(ipc_kernel_map, &tree_addr, tree_size_needed);
			if (kr != KERN_SUCCESS) {
				if (table_size != 0)
					kmem_free(ipc_kernel_map, table_addr, table_size);
				return KERN_RESOURCE_SHORTAGE;
			}
			tree_size = tree_size_needed;
		}
	}
	/* space is read-locked and active; we have enough wired memory */

	/* get the overall space info */
	infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iis_table_size = space->is_table_size;
	infop->iis_table_next = space->is_table_next->its_size;
	infop->iis_tree_size = space->is_tree_total;
	infop->iis_tree_small = space->is_tree_small;
	infop->iis_tree_hash = space->is_tree_hash;

	/* walk the table for this space */
	table = space->is_table;
	tsize = space->is_table_size;
	table_info = (ipc_info_name_array_t)table_addr;
	for (index = 0; index < tsize; index++) {
		ipc_info_name_t *iin = &table_info[index];
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits;

		bits = entry->ie_bits;
		iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		if (entry->ie_request)
			iin->iin_type |= MACH_PORT_TYPE_DNREQUEST;
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;
	}

	/* walk the splay tree for this space */
	tree_info = (ipc_info_tree_name_array_t)tree_addr;
	for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
	     tentry != ITE_NULL;
	     tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
		ipc_info_tree_name_t *iitn = &tree_info[index++];
		ipc_info_name_t *iin = &iitn->iitn_name;
		ipc_entry_t entry = &tentry->ite_entry;
		ipc_entry_bits_t bits = entry->ie_bits;

		assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);

		iin->iin_name = tentry->ite_name;
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		if (entry->ie_request)
			iin->iin_type |= MACH_PORT_TYPE_DNREQUEST;
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;

		if (tentry->ite_lchild == ITE_NULL)
			iitn->iitn_lchild = MACH_PORT_NULL;
		else
			iitn->iitn_lchild = tentry->ite_lchild->ite_name;

		if (tentry->ite_rchild == ITE_NULL)
			iitn->iitn_rchild = MACH_PORT_NULL;
		else
			iitn->iitn_rchild = tentry->ite_rchild->ite_name;

	}
	ipc_splay_traverse_finish(&space->is_tree);
	is_read_unlock(space);

	/* prepare the table out-of-line data for return */
	if (table_size > 0) {
		if (table_size > infop->iis_table_size * sizeof(ipc_info_name_t))
			bzero((char *)&table_info[infop->iis_table_size],
			      table_size - infop->iis_table_size * sizeof(ipc_info_name_t));

		kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(table_addr),
				   vm_map_round_page(table_addr + table_size), FALSE);
		assert(kr == KERN_SUCCESS);
		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, 
				   (vm_map_size_t)table_size, TRUE, &copy);
		assert(kr == KERN_SUCCESS);
		*tablep = (ipc_info_name_t *)copy;
		*tableCntp = infop->iis_table_size;
	} else {
		*tablep = (ipc_info_name_t *)0;
		*tableCntp = 0;
	}

	/* prepare the tree out-of-line data for return */
	if (tree_size > 0) {
		if (tree_size > infop->iis_tree_size * sizeof(ipc_info_tree_name_t))
			bzero((char *)&tree_info[infop->iis_tree_size],
			      tree_size - infop->iis_tree_size * sizeof(ipc_info_tree_name_t));

		kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(tree_addr),
				   vm_map_round_page(tree_addr + tree_size), FALSE);
		assert(kr == KERN_SUCCESS);
		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)tree_addr, 
				   (vm_map_size_t)tree_size, TRUE, &copy);
		assert(kr == KERN_SUCCESS);
		*treep = (ipc_info_tree_name_t *)copy;
		*treeCntp = infop->iis_tree_size;
	} else {
		*treep = (ipc_info_tree_name_t *)0;
		*treeCntp = 0;
	}
	return KERN_SUCCESS;
}
Exemplo n.º 12
0
kern_return_t
mach_port_names(
	ipc_space_t		space,
	mach_port_name_t	**namesp,
	mach_msg_type_number_t	*namesCnt,
	mach_port_type_t	**typesp,
	mach_msg_type_number_t	*typesCnt)
{
	ipc_entry_bits_t *capability;
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	ipc_entry_num_t actual;	/* this many names */
	ipc_port_timestamp_t timestamp;	/* logical time of this operation */
	mach_port_name_t *names;
	mach_port_type_t *types;
	kern_return_t kr;

	vm_size_t size;		/* size of allocated memory */
	vm_offset_t addr1;	/* allocated memory, for names */
	vm_offset_t addr2;	/* allocated memory, for types */
	vm_map_copy_t memory1;	/* copied-in memory, for names */
	vm_map_copy_t memory2;	/* copied-in memory, for types */

	/* safe simplifying assumption */
	assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t));

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	size = 0;

	for (;;) {
		ipc_entry_num_t bound;
		vm_size_t size_needed;

		is_read_lock(space);
		if (!space->is_active) {
			is_read_unlock(space);
			if (size != 0) {
				kmem_free(ipc_kernel_map, addr1, size);
				kmem_free(ipc_kernel_map, addr2, size);
			}
			return KERN_INVALID_TASK;
		}

		/* upper bound on number of names in the space */

		bound = space->is_table_size + space->is_tree_total;
		size_needed = round_page_32(bound * sizeof(mach_port_name_t));

		if (size_needed <= size)
			break;

		is_read_unlock(space);

		if (size != 0) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
		}
		size = size_needed;

		kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			return KERN_RESOURCE_SHORTAGE;
		}

		/* can't fault while we hold locks */

		kr = vm_map_wire(ipc_kernel_map, addr1, addr1 + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
			return KERN_RESOURCE_SHORTAGE;
		}

		kr = vm_map_wire(ipc_kernel_map, addr2, addr2 + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
			return KERN_RESOURCE_SHORTAGE;
		}

	}
	/* space is read-locked and active */

	names = (mach_port_name_t *) addr1;
	types = (mach_port_type_t *) addr2;
	actual = 0;

	timestamp = ipc_port_timestamp();

	table = space->is_table;
	tsize = space->is_table_size;

	for (index = 0; index < tsize; index++) {
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits = entry->ie_bits;

		if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
			mach_port_name_t name;

			name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
			mach_port_names_helper(timestamp, entry, name, names,
					       types, &actual, space);
		}
	}

	for (tentry = ipc_splay_traverse_start(&space->is_tree);
	    tentry != ITE_NULL;
	    tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
		ipc_entry_t entry = &tentry->ite_entry;
		mach_port_name_t name = tentry->ite_name;

		assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
		mach_port_names_helper(timestamp, entry, name, names,
				       types, &actual, space);
	}
	ipc_splay_traverse_finish(&space->is_tree);
	is_read_unlock(space);

	if (actual == 0) {
		memory1 = VM_MAP_COPY_NULL;
		memory2 = VM_MAP_COPY_NULL;

		if (size != 0) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
		}
	} else {
		vm_size_t size_used;
		vm_size_t vm_size_used;

		size_used = actual * sizeof(mach_port_name_t);
		vm_size_used = round_page_32(size_used);

		/*
		 *	Make used memory pageable and get it into
		 *	copied-in form.  Free any unused memory.
		 */

		kr = vm_map_unwire(ipc_kernel_map,
				     addr1, addr1 + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_unwire(ipc_kernel_map,
				     addr2, addr2 + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr1, size_used,
				   TRUE, &memory1);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr2, size_used,
				   TRUE, &memory2);
		assert(kr == KERN_SUCCESS);

		if (vm_size_used != size) {
			kmem_free(ipc_kernel_map,
				  addr1 + vm_size_used, size - vm_size_used);
			kmem_free(ipc_kernel_map,
				  addr2 + vm_size_used, size - vm_size_used);
		}
	}

	*namesp = (mach_port_name_t *) memory1;
	*namesCnt = actual;
	*typesp = (mach_port_type_t *) memory2;
	*typesCnt = actual;
	return KERN_SUCCESS;
}
Exemplo n.º 13
0
kern_return_t
mach_port_get_set_status(
	ipc_space_t			space,
	mach_port_name_t		name,
	mach_port_name_t		**members,
	mach_msg_type_number_t		*membersCnt)
{
	ipc_entry_num_t actual;		/* this many members */
	ipc_entry_num_t maxnames;	/* space for this many members */
	kern_return_t kr;

	vm_size_t size;		/* size of allocated memory */
	vm_offset_t addr;	/* allocated memory */
	vm_map_copy_t memory;	/* copied-in memory */

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	if (!MACH_PORT_VALID(name))
		return KERN_INVALID_RIGHT;

	size = PAGE_SIZE;	/* initial guess */

	for (;;) {
		ipc_tree_entry_t tentry;
		ipc_entry_t entry, table;
		ipc_entry_num_t tsize;
		mach_port_index_t index;
		mach_port_name_t *names;
		ipc_pset_t pset;

		kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		/* can't fault while we hold locks */

		kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = ipc_right_lookup_read(space, name, &entry);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr, size);
			return kr;
		}
		/* space is read-locked and active */

		if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) {
			is_read_unlock(space);
			kmem_free(ipc_kernel_map, addr, size);
			return KERN_INVALID_RIGHT;
		}

		pset = (ipc_pset_t) entry->ie_object;
		assert(pset != IPS_NULL);
		/* the port set must be active */

		names = (mach_port_name_t *) addr;
		maxnames = size / sizeof(mach_port_name_t);
		actual = 0;

		table = space->is_table;
		tsize = space->is_table_size;

		for (index = 0; index < tsize; index++) {
			ipc_entry_t ientry = &table[index];

			if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
				ipc_port_t port =
					(ipc_port_t) ientry->ie_object;

				mach_port_gst_helper(pset, port,
						     maxnames, names, &actual);
			}
		}

		for (tentry = ipc_splay_traverse_start(&space->is_tree);
		    tentry != ITE_NULL;
		    tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
			ipc_entry_bits_t bits = tentry->ite_bits;

			assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);

			if (bits & MACH_PORT_TYPE_RECEIVE) {
			    ipc_port_t port = (ipc_port_t) tentry->ite_object;

			    mach_port_gst_helper(pset, port, maxnames,
						 names, &actual);
			}
		}
		ipc_splay_traverse_finish(&space->is_tree);
		is_read_unlock(space);

		if (actual <= maxnames)
			break;

		/* didn't have enough memory; allocate more */

		kmem_free(ipc_kernel_map, addr, size);
		size = round_page_32(actual * sizeof(mach_port_name_t)) + PAGE_SIZE;
	}

	if (actual == 0) {
		memory = VM_MAP_COPY_NULL;

		kmem_free(ipc_kernel_map, addr, size);
	} else {
		vm_size_t size_used;
		vm_size_t vm_size_used;

		size_used = actual * sizeof(mach_port_name_t);
		vm_size_used = round_page_32(size_used);

		/*
		 *	Make used memory pageable and get it into
		 *	copied-in form.  Free any unused memory.
		 */

		kr = vm_map_unwire(ipc_kernel_map,
				     addr, addr + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
				   TRUE, &memory);
		assert(kr == KERN_SUCCESS);

		if (vm_size_used != size)
			kmem_free(ipc_kernel_map,
				  addr + vm_size_used, size - vm_size_used);
	}

	*members = (mach_port_name_t *) memory;
	*membersCnt = actual;
	return KERN_SUCCESS;
}
Exemplo n.º 14
0
/*********************************************************************
* IMPORTANT: Once we have done the vm_map_copyout(), we *must* return
* KERN_SUCCESS or the kernel map gets messed up (reason as yet
* unknown). We use op_result to return the real result of our work.
*********************************************************************/
kern_return_t kext_request(
    host_priv_t                             hostPriv,
    /* in only */  uint32_t                 clientLogSpec,
    /* in only */  vm_offset_t              requestIn,
    /* in only */  mach_msg_type_number_t   requestLengthIn,
    /* out only */ vm_offset_t            * responseOut,
    /* out only */ mach_msg_type_number_t * responseLengthOut,
    /* out only */ vm_offset_t            * logDataOut,
    /* out only */ mach_msg_type_number_t * logDataLengthOut,
    /* out only */ kern_return_t          * op_result)
{
    kern_return_t     result          = KERN_FAILURE;
    vm_map_address_t  map_addr        = 0;     // do not free/deallocate
    char            * request         = NULL;  // must vm_deallocate

    mkext2_header   * mkextHeader     = NULL;  // do not release
    bool              isMkext         = false;

    char            * response        = NULL;  // must kmem_free
    uint32_t          responseLength  = 0;
    char            * logData         = NULL;  // must kmem_free
    uint32_t          logDataLength   = 0;

   /* MIG doesn't pass "out" parameters as empty, so clear them immediately
    * just in case, or MIG will try to copy out bogus data.
    */    
    *op_result = KERN_FAILURE;
    *responseOut = NULL;
    *responseLengthOut = 0;
    *logDataOut = NULL;
    *logDataLengthOut = 0;

   /* Check for input. Don't discard what isn't there, though.
    */
    if (!requestLengthIn || !requestIn) {
		OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogIPCFlag,
            "Invalid request from user space (no data).");
        *op_result = KERN_INVALID_ARGUMENT;
        goto finish;
    }

   /* Once we have done the vm_map_copyout(), we *must* return KERN_SUCCESS
    * or the kernel map gets messed up (reason as yet unknown). We will use
    * op_result to return the real result of our work.
    */
    result = vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)requestIn);
    if (result != KERN_SUCCESS) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogIPCFlag,
            "vm_map_copyout() failed for request from user space.");
        vm_map_copy_discard((vm_map_copy_t)requestIn);
        goto finish;
    }
    request = CAST_DOWN(char *, map_addr);

   /* Check if request is an mkext; this is always a load request
    * and requires root access. If it isn't an mkext, see if it's
    * an XML request, and check the request to see if that requires
    * root access.
    */
    if (requestLengthIn > sizeof(mkext2_header)) {
        mkextHeader = (mkext2_header *)request;
        if (MKEXT_GET_MAGIC(mkextHeader) == MKEXT_MAGIC &&
            MKEXT_GET_SIGNATURE(mkextHeader) == MKEXT_SIGN) {

            isMkext = true;
        }
    }

    if (isMkext) {
#ifdef SECURE_KERNEL
        // xxx - something tells me if we have a secure kernel we don't even
        // xxx - want to log a message here. :-)
        *op_result = KERN_NOT_SUPPORTED;
        goto finish;
#else
        // xxx - can we find out if calling task is kextd?
        // xxx - can we find the name of the calling task?
        if (hostPriv == HOST_PRIV_NULL) {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogLoadFlag | kOSKextLogIPCFlag,
                "Attempt by non-root process to load a kext.");
            *op_result = kOSKextReturnNotPrivileged;
            goto finish;
        }

        *op_result = OSKext::loadFromMkext((OSKextLogSpec)clientLogSpec,
            request, requestLengthIn,
            &logData, &logDataLength);

#endif /* defined(SECURE_KERNEL) */

    } else {

       /* If the request isn't an mkext, then is should be XML. Parse it
        * if possible and hand the request over to OSKext.
        */
        *op_result = OSKext::handleRequest(hostPriv,
            (OSKextLogSpec)clientLogSpec,
            request, requestLengthIn,
            &response, &responseLength,
            &logData, &logDataLength);
    }

    if (response && responseLength > 0) {
        kern_return_t copyin_result;

        copyin_result = vm_map_copyin(kernel_map,
            CAST_USER_ADDR_T(response), responseLength,
            /* src_destroy */ false, (vm_map_copy_t *)responseOut);
        if (copyin_result == KERN_SUCCESS) {
            *responseLengthOut = responseLength;
        } else {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogIPCFlag,
                "Failed to copy response to request from user space.");
            *op_result = copyin_result;  // xxx - should we map to our own code?
            *responseOut = NULL;
            *responseLengthOut = 0;
            goto finish;
        }
    }

    if (logData && logDataLength > 0) {
        kern_return_t copyin_result;

        copyin_result = vm_map_copyin(kernel_map,
            CAST_USER_ADDR_T(logData), logDataLength,
            /* src_destroy */ false, (vm_map_copy_t *)logDataOut);
        if (copyin_result == KERN_SUCCESS) {
            *logDataLengthOut = logDataLength;
        } else {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogIPCFlag,
                "Failed to copy log data for request from user space.");
            *op_result = copyin_result;  // xxx - should we map to our own code?
            *logDataOut = NULL;
            *logDataLengthOut = 0;
            goto finish;
        }
    }

finish:
    if (request) {
        (void)vm_deallocate(kernel_map, (vm_offset_t)request, requestLengthIn);
    }
    if (response) {
        kmem_free(kernel_map, (vm_offset_t)response, responseLength);
    }
    if (logData) {
        kmem_free(kernel_map, (vm_offset_t)logData, logDataLength);
    }

    return result;
}
Exemplo n.º 15
0
static
load_return_t
load_dylinker(
    struct dylinker_command	*lcp,
    integer_t		archbits,
    vm_map_t		map,
    thread_t	thread,
    int			depth,
    load_result_t		*result,
    boolean_t		is_64bit
)
{
    char			*name;
    char			*p;
    struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
    struct mach_header	header;
    off_t			file_offset = 0; /* set by get_macho_vnode() */
    off_t			macho_size = 0;	/* set by get_macho_vnode() */
    vm_map_t		copy_map;
    load_result_t		myresult;
    kern_return_t		ret;
    vm_map_copy_t	tmp;
    mach_vm_offset_t	dyl_start, map_addr;
    mach_vm_size_t		dyl_length;

    name = (char *)lcp + lcp->name.offset;
    /*
     *	Check for a proper null terminated string.
     */
    p = name;
    do {
        if (p >= (char *)lcp + lcp->cmdsize)
            return(LOAD_BADMACHO);
    } while (*p++);

    ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
    if (ret)
        return (ret);

    myresult = load_result_null;

    /*
     *	First try to map dyld in directly.  This should work most of
     *	the time since there shouldn't normally be something already
     *	mapped to its address.
     */

    ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
                         depth, &myresult);

    /*
     *	If it turned out something was in the way, then we'll take
     *	take this longer path to map dyld into a temporary map and
     *	copy it into destination map at a different address.
     */

    if (ret == LOAD_NOSPACE) {

        /*
         *	Load the Mach-O.
         *	Use a temporary map to do the work.
         */
        copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
                                             is_64bit),
                                 get_map_min(map), get_map_max(map), TRUE);
        if (VM_MAP_NULL == copy_map) {
            ret = LOAD_RESOURCE;
            goto out;
        }

        myresult = load_result_null;

        ret = parse_machfile(vp, copy_map, thread, &header,
                             file_offset, macho_size,
                             depth, &myresult);

        if (ret) {
            vm_map_deallocate(copy_map);
            goto out;
        }

        if (get_map_nentries(copy_map) > 0) {

            dyl_start = mach_get_vm_start(copy_map);
            dyl_length = mach_get_vm_end(copy_map) - dyl_start;

            map_addr = dyl_start;
            ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);

            if (ret != KERN_SUCCESS) {
                vm_map_deallocate(copy_map);
                ret = LOAD_NOSPACE;
                goto out;

            }

            ret = vm_map_copyin(copy_map,
                                (vm_map_address_t)dyl_start,
                                (vm_map_size_t)dyl_length,
                                TRUE, &tmp);
            if (ret != KERN_SUCCESS) {
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            ret = vm_map_copy_overwrite(map,
                                        (vm_map_address_t)map_addr,
                                        tmp, FALSE);
            if (ret != KERN_SUCCESS) {
                vm_map_copy_discard(tmp);
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            if (map_addr != dyl_start)
                myresult.entry_point += (map_addr - dyl_start);
        } else {
            ret = LOAD_FAILURE;
        }

        vm_map_deallocate(copy_map);
    }

    if (ret == LOAD_SUCCESS) {
        result->dynlinker = TRUE;
        result->entry_point = myresult.entry_point;
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);
    }
out:
    vnode_put(vp);
    return (ret);

}